diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000000..4f67c422f8
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,49 @@
+[flake8]
+# References:
+# https://flake8.readthedocs.io/en/latest/user/configuration.html
+# https://flake8.readthedocs.io/en/latest/user/error-codes.html
+# https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
+
+max-line-length = 80
+max-complexity = 50
+select = C,E,F,W,B,B950
+ignore =
+ # E203: whitespace before ':'
+ E203,
+ # E226: missing whitespace around arithmetic operator
+ E226,
+ # E231: missing whitespace after ',', ';', or ':'
+ E231,
+ # E402: module level imports on one line
+ E402,
+ # E501: line too long
+ E501,
+ # E731: do not assign a lambda expression, use a def
+ E731,
+ # W503: line break before binary operator
+ W503,
+ # W504: line break after binary operator
+ W504,
+exclude =
+ #
+ # ignore the following directories
+ #
+ .eggs,
+ build,
+ docs/src/sphinxext/*,
+ tools/*,
+ benchmarks/*,
+ #
+ # ignore auto-generated files
+ #
+ _ff_cross_refrences.py,
+ std_names.py,
+ um_cf_map.py,
+ #
+ # ignore third-party files
+ #
+ gitwash_dumper.py,
+ #
+ # convenience imports
+ #
+ lib/iris/common/__init__.py
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..f6cae020f3
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,20 @@
+# Format: numpy array format (#5235)
+c18dcd8dafef0cc7bbbf80dfce66f76a46ce59c5
+
+# style: flake8 (#3755)
+7c86bc0168684345dc475457b1a77dadc77ce9bb
+
+# style: black (#3518)
+ffcfad475e0593e1e40895453cf1df154e5f6f2c
+
+# style: isort (#4174)
+15bbcc5ac3d539cb6e820148b66e7cf55d91c5d2
+
+# style: blacken-docs (#4205)
+1572e180243e492d8ff76fa8cdefb82ef6f90415
+
+# style: sort-all (#4353)
+64705dbc40881233aae45f051d96049150369e53
+
+# style: codespell (#5186)
+417aa6bbd9b10d25cad7def54d47ef4d718bc38d
diff --git a/.git_archival.txt b/.git_archival.txt
new file mode 100644
index 0000000000..3994ec0a83
--- /dev/null
+++ b/.git_archival.txt
@@ -0,0 +1,4 @@
+node: $Format:%H$
+node-date: $Format:%cI$
+describe-name: $Format:%(describe:tags=true)$
+ref-names: $Format:%D$
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000..82bf71c1c5
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+.git_archival.txt export-subst
\ No newline at end of file
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 425a427357..594c45a1ee 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -1,36 +1,13 @@
-How to contribute
-=================
+# Contributing to Iris
-We want Iris to be driven by the community - your contributions are
-invaluable to us! This page lists the guidelines for contributors which
-will help ease the process of getting your hard work accepted into Iris,
-and shared back to the world.
+Want to contribute to Iris? But don't know where to start 🤔
-Getting started
----------------
+We recommend that you first checkout our advice to [First Time Contributors](https://github.com/SciTools/iris/issues/4133),
+which has some solid suggestions on where to begin.
-1. If you've not already got one, sign up for a
- [GitHub account](https://github.com/signup/free).
-1. Fork the Iris repository, create your new fix/feature branch, and
- start commiting code.
- - Our
- [development guide](http://scitools.org.uk/iris/docs/latest/developers_guide/gitwash/git_development.html)
- has more detail.
-1. Make sure you've added appropriate tests, and that *all* the tests
- pass.
+Otherwise, head over to our [Developers Guide on Getting Involved](https://scitools-iris.readthedocs.io/en/stable/developers_guide/contributing_getting_involved.html)
+for all the information you need to start on your Iris development journey.
+Still need help or advice?
-Submitting changes
-------------------
-
-1. Read and sign the Contributor Licence Agreement (CLA).
- - See our [governance page](http://scitools.org.uk/governance.html)
- for the CLA and what to do with it.
-1. Push your branch to your fork of Iris.
-1. Submit your pull request.
-1. Chillax.
-
-
-If in doubt, please
-[contact us](https://groups.google.com/forum/?fromgroups=#!forum/scitools-iris)
-on our Google Group, and we'll be happy to help you.
+Then join us over on our [Iris GitHub Discussions](https://github.com/SciTools/iris/discussions). We'd love to hear from you!
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 0000000000..134b6ff8da
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,39 @@
+---
+name: "\U0001F41B Bug Report"
+about: Submit a bug report to help us improve Iris
+title: ''
+labels: 'Type: Bug'
+assignees: ''
+
+---
+
+## 🐛 Bug Report
+
+
+## How To Reproduce
+Steps to reproduce the behaviour:
+
+1.
+2.
+3.
+
+## Expected behaviour
+
+
+## Screenshots
+
+
+## Environment
+ - OS & Version: [e.g., Ubuntu 20.04 LTS]
+ - Iris Version: [e.g., From the command line run `python -c "import iris; print(iris.__version__)"`]
+
+## Additional context
+
+
+Click to expand this section...
+
+```
+Please add additional verbose information in this section e.g., code, output, tracebacks, screenshots etc
+```
+
+
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000..84af305034
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,9 @@
+# reference: https://docs.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
+blank_issues_enabled: false
+contact_links:
+ - name: 💬 Iris GitHub Discussions
+ url: https://github.com/SciTools/iris/discussions
+ about: Engage with the Iris community to discuss your issue
+ - name: ❓ Usage Question
+ url: https://github.com/SciTools/iris/discussions/categories/q-a
+ about: Raise a question about how to use Iris in the Q&A section of Discussions
diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md
new file mode 100644
index 0000000000..01eb2a6734
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation.md
@@ -0,0 +1,12 @@
+---
+name: "\U0001F4DA Documentation"
+about: Report an issue with the Iris documentation
+title: ''
+labels: 'Type: Documentation'
+assignees: ''
+
+---
+
+## 📚 Documentation
+
+
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
new file mode 100644
index 0000000000..2f66321405
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.md
@@ -0,0 +1,25 @@
+---
+name: "✨ Feature Request"
+about: Submit a request for a new feature in Iris
+title: ''
+assignees: ''
+
+---
+
+## ✨ Feature Request
+
+
+## Motivation
+
+
+
+## Additional context
+
+
+Click to expand this section...
+
+```
+Please add additional verbose information in this section e.g., references, screenshots, listings etc
+```
+
+
diff --git a/.github/ISSUE_TEMPLATE/issue.md b/.github/ISSUE_TEMPLATE/issue.md
new file mode 100644
index 0000000000..63de163743
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/issue.md
@@ -0,0 +1,10 @@
+---
+name: "\U0001F4F0 Custom Issue"
+about: Submit a generic issue to help us improve Iris
+title: ''
+assignees: ''
+
+---
+
+## 📰 Custom Issue
+
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..82ba80c4ff
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,20 @@
+# Reference:
+# - https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot
+# - https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+
+ - package-ecosystem: "github-actions"
+ directories:
+ - "/"
+ - "/.github/workflows/composite/*"
+ schedule:
+ # Check later in the week - the upstream dependabot check in `workflows` runs deliberately early in the week.
+ # Therefore allowing time for the `workflows` update to be merged-and-released first.
+ interval: "weekly"
+ day: "thursday"
+ time: "01:00"
+ timezone: "Europe/London"
+ labels:
+ - "Bot"
diff --git a/.github/deploy_key.scitools-docs.enc b/.github/deploy_key.scitools-docs.enc
deleted file mode 100644
index 165a7c1970..0000000000
--- a/.github/deploy_key.scitools-docs.enc
+++ /dev/null
@@ -1 +0,0 @@
-gAAAAABZSMeGIlHxHu4oCV_h8shbCRf1qJYoLO9Z0q9uKRDTlytoigzlvfxhN-9WMjc3Js1f1Zg55PfEpTOpL82p6QHF-gqW0k0qGjanO3lnQzM6EzIu3KyJPrVrL-O6edwoPMYKqwsNO3VQHNuEspsFKY0TbjnTPHc45SPU5LjEGX4c_SADSDcLDJm2rbrU2eVkT-gFHy_-ZzK0Di83WlDc79YzIkVe5BAn5PbWv3O9BROR4fJzecbjmWRT_rp1cqI_gaUpVcwTdRK3II9YnazBtW4h2WbCeTcySLD7N4o9K0P71SR6gG_XFbpML3Haf5IUdRi0qPBuvJ_4YVnnuJo6mhiIOJfUEcNj_bbLOYVzPmKyQMHvrPf_lK5JhdX6MUvqluhqHuc0i_z_j1O2y32lB7b1iiY6eE_BsNlXJHlOX1GiXkX0nZLI48p-D22jya44WshWSnVcoalcCDkdbvdFbpOscwXDR3nB-PCOmRUF_d1BlMbp1if-VP0yt3tJ_5yyCrqSRWwFusaibQTF6yoImetl7Am95hh2FjFDNkalHqtarnUv86w-26v1ukcTIjJ0iHzNbCK1m0VMkvE6uDeqRgIZnVKON5cesmM3YbulRrHpaOiSly_sMhLhfg5jTxAuOa319AQGoHEOcRLRUYdw2TQkDEiHGiUh_U4-nC7GTGDGcXyeBIa4ciuC2Qi0QXf9qyEGoIRcU8BP34LDNdtovJoZOBDzhr5Ajnu7yA3GB3TD_kiZrgm6agFuu7a51OMfjezhwGzUJ4X-empPctwm9woOJmPCTFqCvxB2VwVV0L6yngsTooyAHCi5st_AG-p5FIT3VZGx7EgCd68ze9XlRoACoe9XOdSFklbaSMGRbJlvKCPAA0zj4__PfIhlD8Cxwwjq_VXlSr_QxygIGZJlhkT46P9TroolgdipaBp1aQ3_PKHfgw5Y9ZqBKCZF5DOJejqUbfVKUp2JdqoX3yQBD0ByQFdfCuLvoiYcM2ofKdIMvel3Jwn0Nx4NYR2qg3h7FYti0jdrNlC89gnL4tKsf0DAGxZ1UYmqQMWJ3-GKCKrlKyeaHYB2djPRGP8VeoRZh_UorSNHU56KSztK_hTP6P0nFymRJRUSRBMKTaTfJf1aBlk9zJHSe9hOKwxyUNkwcTftGn5P0WNcnaTk3ecTVe-1QJKbPWwMBDzqQtTCsCizgN4UdQsmy4iMYq-LT2TC-JXXo0CPTNDybUj92wSa7KeKTvKnbN8DMZbGRdgy5BOSGw4hMIoIFSB-6tnBIvTntNfMT9ac9e9jKm47Q4qXpaeF3AsvBqxkMRQLaYVppPng6cA49VjJQDZ0gTdPKSSKZkApfeeQL0LLCGwzQ4C52TWK2NJSQ3pvRYI1F0taDQWopIiwFfox-OSYnOJECHkHjxaxhHQzVb3w47xKKZNXbLb-LV7QI-kGuKLfoqO1lq94cw1H-EVrXaGJcDDLjK2jRgdVfDyPsHMcW1oUDJqu8gQ6fCXYPbqJzdmFNFsc1hywHWCU7crV61D2QubwzbLRnP8053MvsMnbdhWtwocTlvvdG-qW6CiEA9Eanfpf0RW1W9oh6yQJ__0vS9UWswqq5ahkkpHY9LTE0US4L3xbFOrq7HgbA2jelTdPVfxo3BfUHuL8oKpFDTzgZi07gNmkhIZfpuXj2KFnm9XM31AsY6V2rXL0xSx-9rvi4FP0LK6V5vQ8OKI8aRPCDyzLUv2xnayMW4yaYg3GHD5yo7pIOswKc6GOEmetPnay3j0dVN3hfpkpfJWhss3vjZ2Zl0NmjJ7OuS25tjUGLy82A1yFSpL8mKRkHZJuMDZbd_Or6gaPVoVT_Otbkh-6pMZuDeOHOUfgey0Z374jCjRpyQ9k-Fpw8ykow8iIIQ088kC5CeQy6jRhD7mO3iR4-U1XKDJQNlNg1z_JYyDrwykp7FFN2sQn7RRYHIXx2iMrEDXdrdTrujMFN6omC13yDuXJukAgZb6zBBUTlonxRUBjUJWt2P-1sRRTsG8mr9EaE5K-xhR5Ust_37L3svNQ0vwLtPLIpWGZHhD8P_dYNR2RL4679xyzI8A7wLY82wFBHrcghAd4UtLJH9ul6IuS_CaVo-gbfowNRaQ0Zw7WHZGIXpZWEx1_zck6qDEaCY8TpQeciBWpH5uJDSYqdLdMwigdQEGzAJ1DHSWsyTrmOR7Lhwi9WqOzfWe4ahxAkAUH_Jdr_i-nGfl_x3OgQdHM7jWVMXDcXEmR0bkw-s0EKXCn20q2bxDkm5SUWkYtWAZ2aZRgo4wHOqGBcP99xZ25mq9uxtNOkLBF81lnVbn_4BAZBNnnKwwj4SafeIW4KR1ZOpnEI47sGUR6NhEk9VtJsv0zeZIv8VjRbNLh3QCxkNMue60SjJ48kjotZSX1RQJN0xwPftiABBf8MX9tyZe8emQvPeIcdQTSQPnYEUx22xZGeeJTNrZ9soQyP6mrkkRihp6o9tG7HT9QEVLGM19wAigwAAMMXGqdGzWwpar30JtJU94gAmIlwFUJqeO_fdJKFspnUyJ6gt5_oHsKNEV7Uz5EJwGpa94tlPJXjvZpu-wWQfu8U0trTU2mTCA0bmZIDID-Xk4vCW_SD4OVnsvWyga4QHSg3AqVTjnjlapAjsYcFjiOo2C_U3besloprpyuAwpTdn7zdfMHIJO0ckBFnXlk8XB3kT0YGrCpBvW6gYMXlnePVcr3wJehCvMg1Q9Dc5fVQUqt65zcjbgiudfzFGtTe9T4f1IttoAtrJgTN4W1mtbZzSK864I_ngaX5YWgZSinjkbocCCFEJDcbiXMnV7OWOZefqW6VZu4BZKEKlN9k2kH3UCECCK3uRAQIPn_48DgaVnAff2-fMADltiosSPJ_a3057acJP0cf-1QsJuV7r3zdzL3shgrMRjpSsSTCYdMhZ6disFGcJg7hJJvtH1FieZ76jps5FYi5lE8Ua9yBKlG4dCGuUBnikvpfy2FLMLFNn-iXLflu2oiBbcLvn_ReZUnFIR6KgGRN8xKEBaXATQVtb2E678GtQptK8PHP2DoAtbsIXUDn60YH04D9pEck8NnmWYAz7sWbiL6OKdaO7jQep4mt3CgkyFC0NCKP9zCbVNtmfHRVmHtckjgfHF-tK_v59KeAuwWPtm7ow2BjynAK42IGR9nWtQFRUZIboaND8UF76YGKFF7kOf_XTvoNrVTCRkD6b8KJy2IFfdoHP6WET9QLvwDSXgYLPlCX9z7aQ_lc57u5d_dGO-7NZ_Qbs69ByyIvQoztVBjw6fa7EzSwccqPfMQL_fiecNCng-r4gHaH6TlgSbfqQOISHxTtvmbym1no560ZsHfnQfuL6BCI8s6OoygxhOnQhaDqyOUVBut_x3VR_DKFMyUazXYNgLbRsdITaAvR-0gIx5TAX9n3A4HwHuiBZCtwRYaiJnW8FX9lk1Y_g5UHL2OC3rsNFui3aBLzAFhx58lALxnxhlUItuHHK9BgexnR2yCj2nOWLoWQzfFaf2_fpjEh_QBHTqUxdQZ8ighg_8lh6hmLbW4PcUxKX71RFmikLyS3-idlzsiEomNlPNaVllRF21vE6dR-nZ6xsxzTvNB4wumP2irQ9mFBTN1WpiLMyNoEEucA2I848YHUfkZrjTG_dcCQNp7H_2gKdIsZ135lUEG6lYfhLMHTmP5uYxxx3Pipjp6wF2GFCsZPIlIPsgrhbSxqkWg1EOViHtpw6ypFKn7wQHHfnrnHkFWnrKbMARVBjJUB-FhK4b6qLU_k_MTMipemneMUFXlj3EkEhKM18MIHGkIOkwG5QtPYcjUAf_2sZlxSMVnh6sQ8kVwF6lfk_l8jhoO93HUTntZUSv7GrE3s80yJgII4Qw37AdgcJiAkoPn1-17HfSsAy6uRh5-OvrCtkDqQxfuJSyn_4pRMh6hZT7N9pI5limMXXn2nHnxU93UT3qU-smA8q0ECfvK3JwoaYy_llSx0wSBvpmxjLQ302sFYM5FVZ9zRbHuLCCZShVopiyMDLHVJe_1g9Ou1KL-h6RVZgg3Ttyb5m2KDfoHEVLeZkW81YLCsyo7uNb6SVRM-615TIVGT6Eq7oJ6wO2LMDKjEpHKFiOFpY2fpR8noM81UqgLddYfl_lei7RVjaNO98otqE4iSNtpgJgyhAx4CdYm__yQRSXhckR4K7yAhM9Kh5BLbQQnf2_0WS1sWTmNMZZNMfOSqmTCRVwcYvg4TDGOA-vZARbZW1M7npVMldV_SbvgcEZD6InY9c40eheRqS0YD2W2HEZIiNeLRw0y5WBcYuJIpXhI3ViTXx-frJnv0Mo9uwmuLbJmWFcn6RdIVcU68_oPZZlZD4Vm7SjikbuZKF1BF3lXamTTDIBcWiDLwuNDv2lUkURDCWa5WJsfUCfTAJ6PTe8=
\ No newline at end of file
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 0000000000..14e9a95d30
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,9 @@
+# benchmark_this automatically triggers the benchmark workflow when added by
+# a user. No triggering happens when GitHub Actions adds the label (this
+# avoids security vulnerabilities), so alternative triggers for the below
+# files are therefore included in workflows/benchmarks_run.yml. Automatic
+# labelling is still included here to make it easier to search pull requests,
+# and to reinforce the culture of using this label.
+benchmark_this:
+- changed-files:
+ - any-glob-to-any-file: ['requirements/locks/*.lock', "setup.py"]
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000000..34bc59182c
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,14 @@
+## 🚀 Pull Request
+
+### Description
+
+
+
+
+---
+[Consult Iris pull request check list]( https://scitools-iris.readthedocs.io/en/latest/developers_guide/contributing_pull_request_checklist.html)
+
+---
+Add any of the below labels to trigger actions on this PR:
+
+- https://github.com/SciTools/iris/labels/benchmark_this
diff --git a/.github/workflows/benchmarks_report.yml b/.github/workflows/benchmarks_report.yml
new file mode 100644
index 0000000000..93a2bc1a77
--- /dev/null
+++ b/.github/workflows/benchmarks_report.yml
@@ -0,0 +1,83 @@
+# Post any reports generated by benchmarks_run.yml .
+# Separated for security:
+# https://securitylab.github.com/research/github-actions-preventing-pwn-requests/
+
+name: benchmarks-report
+run-name: Report benchmark results
+
+on:
+ workflow_run:
+ workflows: [benchmarks-run]
+ types:
+ - completed
+
+jobs:
+ download:
+ runs-on: ubuntu-latest
+ outputs:
+ reports_exist: ${{ steps.unzip.outputs.reports_exist }}
+ steps:
+ - name: Download artifact
+ id: download-artifact
+ # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#using-data-from-the-triggering-workflow
+ uses: actions/github-script@v7
+ with:
+ script: |
+ let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: context.payload.workflow_run.id,
+ });
+ let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
+ return artifact.name == "benchmark_reports"
+ })[0];
+ if (typeof matchArtifact != 'undefined') {
+ let download = await github.rest.actions.downloadArtifact({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ artifact_id: matchArtifact.id,
+ archive_format: 'zip',
+ });
+ let fs = require('fs');
+ fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/benchmark_reports.zip`, Buffer.from(download.data));
+ };
+
+ - name: Unzip artifact
+ id: unzip
+ run: |
+ if test -f "benchmark_reports.zip"; then
+ reports_exist=1
+ unzip benchmark_reports.zip -d benchmark_reports
+ else
+ reports_exist=0
+ fi
+ echo "reports_exist=$reports_exist" >> "$GITHUB_OUTPUT"
+
+ - name: Store artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark_reports
+ path: benchmark_reports
+
+ post_reports:
+ runs-on: ubuntu-latest
+ needs: download
+ if: needs.download.outputs.reports_exist == 1
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Download artifact
+ uses: actions/download-artifact@v4
+ with:
+ name: benchmark_reports
+ path: .github/workflows/benchmark_reports
+
+ - name: Set up Python
+ # benchmarks/bm_runner.py only needs builtins to run.
+ uses: actions/setup-python@v5
+
+ - name: Post reports
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: python benchmarks/bm_runner.py _gh_post
diff --git a/.github/workflows/benchmarks_run.yml b/.github/workflows/benchmarks_run.yml
new file mode 100644
index 0000000000..287735c335
--- /dev/null
+++ b/.github/workflows/benchmarks_run.yml
@@ -0,0 +1,157 @@
+# Use ASV to check for performance regressions, either:
+# - In the last 24 hours' commits.
+# - Introduced by this pull request.
+
+name: benchmarks-run
+run-name: Run benchmarks
+
+on:
+ schedule:
+ # Runs every day at 23:00.
+ - cron: "0 23 * * *"
+ workflow_dispatch:
+ inputs:
+ first_commit:
+ description: "First commit to benchmark (see bm_runner.py > Overnight)."
+ required: false
+ type: string
+ pull_request:
+ # Add the `labeled` type to the default list.
+ types: [labeled, opened, synchronize, reopened]
+
+jobs:
+ pre-checks:
+ runs-on: ubuntu-latest
+ if: github.repository == 'SciTools/iris'
+ outputs:
+ overnight: ${{ steps.overnight.outputs.check }}
+ branch: ${{ steps.branch.outputs.check }}
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 2
+ - id: files-changed
+ uses: marceloprado/has-changed-path@df1b7a3161b8fb9fd8c90403c66a9e66dfde50cb
+ with:
+ # SEE ALSO .github/labeler.yml .
+ paths: requirements/locks/*.lock setup.py
+ - id: overnight
+ if: github.event_name != 'pull_request'
+ run: echo "check=true" >> "$GITHUB_OUTPUT"
+ - id: branch
+ if: >
+ github.event_name == 'pull_request'
+ &&
+ (
+ steps.files-changed.outputs.changed == 'true'
+ ||
+ github.event.label.name == 'benchmark_this'
+ )
+ run: echo "check=true" >> "$GITHUB_OUTPUT"
+
+
+ benchmark:
+ runs-on: ubuntu-latest
+ needs: pre-checks
+ if: >
+ needs.pre-checks.outputs.overnight == 'true' ||
+ needs.pre-checks.outputs.branch == 'true'
+
+ env:
+ IRIS_TEST_DATA_LOC_PATH: benchmarks
+ IRIS_TEST_DATA_PATH: benchmarks/iris-test-data
+ IRIS_TEST_DATA_VERSION: "2.28"
+ # Lets us manually bump the cache to rebuild
+ ENV_CACHE_BUILD: "0"
+ TEST_DATA_CACHE_BUILD: "2"
+
+ steps:
+ # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Install Nox
+ run: |
+ pip install nox
+
+ - name: Cache environment directories
+ id: cache-env-dir
+ uses: actions/cache@v4
+ with:
+ path: |
+ .nox
+ benchmarks/.asv/env
+ $CONDA/pkgs
+ key: ${{ runner.os }}-${{ hashFiles('requirements/') }}-${{ env.ENV_CACHE_BUILD }}
+
+ - name: Cache test data directory
+ id: cache-test-data
+ uses: actions/cache@v4
+ with:
+ path: |
+ ${{ env.IRIS_TEST_DATA_PATH }}
+ key:
+ test-data-${{ env.IRIS_TEST_DATA_VERSION }}-${{ env.TEST_DATA_CACHE_BUILD }}
+
+ - name: Fetch the test data
+ if: steps.cache-test-data.outputs.cache-hit != 'true'
+ run: |
+ wget --quiet https://github.com/SciTools/iris-test-data/archive/v${IRIS_TEST_DATA_VERSION}.zip -O iris-test-data.zip
+ unzip -q iris-test-data.zip
+ mkdir --parents ${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_LOC_PATH}
+ mv iris-test-data-${IRIS_TEST_DATA_VERSION} ${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_PATH}
+
+ - name: Set test data var
+ run: |
+ echo "OVERRIDE_TEST_DATA_REPOSITORY=${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_PATH}/test_data" >> $GITHUB_ENV
+
+ - name: Benchmark this pull request
+ if: needs.pre-checks.outputs.branch == 'true'
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ PR_NUMBER: ${{ github.event.number }}
+ run: |
+ nox -s benchmarks -- branch origin/${{ github.base_ref }}
+
+ - name: Run overnight benchmarks
+ id: overnight
+ if: needs.pre-checks.outputs.overnight == 'true'
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ first_commit=${{ inputs.first_commit }}
+ if [ "$first_commit" == "" ]
+ then
+ first_commit=$(git log --after="$(date -d "1 day ago" +"%Y-%m-%d") 23:00:00" --pretty=format:"%h" | tail -n 1)
+ fi
+
+ if [ "$first_commit" != "" ]
+ then
+ nox -s benchmarks -- overnight $first_commit
+ fi
+
+ - name: Warn of failure
+ if: >
+ failure() &&
+ steps.overnight.outcome == 'failure'
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ title="Overnight benchmark workflow failed: \`${{ github.run_id }}\`"
+ body="Generated by GHA run [\`${{github.run_id}}\`](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})"
+ gh issue create --title "$title" --body "$body" --label "Bot" --label "Type: Performance" --repo $GITHUB_REPOSITORY
+
+ - name: Upload any benchmark reports
+ if: success() || steps.overnight.outcome == 'failure'
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark_reports
+ path: .github/workflows/benchmark_reports
+
+ - name: Archive asv results
+ if: ${{ always() }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: asv-raw-results
+ path: benchmarks/.asv/results
diff --git a/.github/workflows/ci-citation.yml b/.github/workflows/ci-citation.yml
new file mode 100644
index 0000000000..99cced758b
--- /dev/null
+++ b/.github/workflows/ci-citation.yml
@@ -0,0 +1,30 @@
+name: ci-citation
+
+on:
+ pull_request:
+ paths:
+ - "CITATION.cff"
+
+ push:
+ paths:
+ - "CITATION.cff"
+
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ validate:
+ name: "validate"
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: "check CITATION.cff"
+ uses: citation-file-format/cffconvert-github-action@4cf11baa70a673bfdf9dad0acc7ee33b3f4b6084
+ with:
+ args: "--validate"
diff --git a/.github/workflows/ci-manifest.yml b/.github/workflows/ci-manifest.yml
new file mode 100644
index 0000000000..18b7fb224d
--- /dev/null
+++ b/.github/workflows/ci-manifest.yml
@@ -0,0 +1,26 @@
+# Reference
+# - https://github.com/actions/checkout
+
+name: ci-manifest
+
+on:
+ pull_request:
+ branches:
+ - "*"
+
+ push:
+ branches-ignore:
+ - "auto-update-lockfiles"
+ - "pre-commit-ci-update-config"
+ - "dependabot/*"
+
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ manifest:
+ name: "check-manifest"
+ uses: scitools/workflows/.github/workflows/ci-manifest.yml@2024.10.1
diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml
new file mode 100644
index 0000000000..4b21e73384
--- /dev/null
+++ b/.github/workflows/ci-tests.yml
@@ -0,0 +1,148 @@
+# reference:
+# - https://github.com/actions/cache
+# - https://github.com/actions/checkout
+# - https://github.com/marketplace/actions/setup-miniconda
+
+name: ci-tests
+
+on:
+ push:
+ branches:
+ - "main"
+ - "v*x"
+ tags:
+ - "v*"
+ pull_request:
+ branches:
+ - "*"
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ tests:
+ name: "${{ matrix.session }} (py${{ matrix.python-version }} ${{ matrix.os }})"
+
+ runs-on: ${{ matrix.os }}
+
+ defaults:
+ run:
+ shell: bash -l {0}
+
+ strategy:
+ fail-fast: false
+ matrix:
+ os: ["ubuntu-latest"]
+ python-version: ["3.12"]
+ session: ["doctest", "gallery", "linkcheck"]
+ include:
+ - os: "ubuntu-latest"
+ python-version: "3.12"
+ session: "tests"
+ coverage: "--coverage"
+ - os: "ubuntu-latest"
+ python-version: "3.11"
+ session: "tests"
+ - os: "ubuntu-latest"
+ python-version: "3.10"
+ session: "tests"
+
+ env:
+ # NOTE: IRIS_TEST_DATA_VERSION is also set in benchmarks_run.yml
+ IRIS_TEST_DATA_VERSION: "2.28"
+ ENV_NAME: "ci-tests"
+
+ steps:
+ - name: "checkout"
+ uses: actions/checkout@v4
+
+ - name: "environment configure"
+ env:
+ # Maximum cache period (in weeks) before forcing a cache refresh.
+ CACHE_WEEKS: 2
+ run: |
+ echo "CACHE_PERIOD=$(date +%Y).$(expr $(date +%U) / ${CACHE_WEEKS})" >> ${GITHUB_ENV}
+ echo "LOCK_FILE=requirements/locks/py$(echo ${{ matrix.python-version }} | tr -d '.')-linux-64.lock" >> ${GITHUB_ENV}
+
+ - name: "data cache"
+ uses: ./.github/workflows/composite/iris-data-cache
+ with:
+ cache_build: 6
+ env_name: ${{ env.ENV_NAME }}
+ version: ${{ env.IRIS_TEST_DATA_VERSION }}
+
+ - name: "conda package cache"
+ uses: ./.github/workflows/composite/conda-pkg-cache
+ with:
+ cache_build: 6
+ cache_period: ${{ env.CACHE_PERIOD }}
+ env_name: ${{ env.ENV_NAME }}
+
+ - name: "conda install"
+ uses: conda-incubator/setup-miniconda@v3
+ with:
+ miniforge-version: latest
+ channels: conda-forge
+ activate-environment: ${{ env.ENV_NAME }}
+ auto-update-conda: false
+
+ - name: "conda environment cache"
+ uses: ./.github/workflows/composite/conda-env-cache
+ with:
+ cache_build: 6
+ cache_period: ${{ env.CACHE_PERIOD }}
+ env_name: ${{ env.ENV_NAME }}
+ install_packages: "cartopy nox pip"
+
+ - name: "conda info"
+ run: |
+ conda info
+ conda list
+
+ - name: "cartopy cache"
+ uses: ./.github/workflows/composite/cartopy-cache
+ with:
+ cache_build: 6
+ cache_period: ${{ env.CACHE_PERIOD }}
+ env_name: ${{ env.ENV_NAME }}
+
+ - name: "nox cache"
+ uses: ./.github/workflows/composite/nox-cache
+ with:
+ cache_build: 6
+ env_name: ${{ env.ENV_NAME }}
+ lock_file: ${{ env.LOCK_FILE }}
+
+ # TODO: drop use of site.cfg and explicit use of mplrc
+ - name: "iris configure"
+ env:
+ SITE_CFG: lib/iris/etc/site.cfg
+ MPL_RC: ${HOME}/.config/matplotlib/matplotlibrc
+ run: |
+ mkdir -p $(dirname ${SITE_CFG})
+ echo ${SITE_CFG}
+ echo "[Resources]" >> ${SITE_CFG}
+ echo "test_data_dir = ${HOME}/iris-test-data/test_data" >> ${SITE_CFG}
+ echo "doc_dir = ${GITHUB_WORKSPACE}/docs" >> ${SITE_CFG}
+ cat ${SITE_CFG}
+ mkdir -p $(dirname ${MPL_RC})
+ echo ${MPL_RC}
+ echo "backend : agg" >> ${MPL_RC}
+ echo "image.cmap : viridis" >> ${MPL_RC}
+ cat ${MPL_RC}
+
+ - name: "iris ${{ matrix.session }}"
+ env:
+ PY_VER: ${{ matrix.python-version }}
+ # Force coloured output on GitHub Actions.
+ PY_COLORS: "1"
+ run: |
+ nox --session ${{ matrix.session }} -- --verbose ${{ matrix.coverage }}
+
+ - name: "upload coverage report"
+ if: ${{ matrix.coverage }}
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
\ No newline at end of file
diff --git a/.github/workflows/ci-wheels.yml b/.github/workflows/ci-wheels.yml
new file mode 100644
index 0000000000..9c53673481
--- /dev/null
+++ b/.github/workflows/ci-wheels.yml
@@ -0,0 +1,165 @@
+# Reference:
+# - https://github.com/actions/checkout
+# - https://github.com/actions/download-artifact
+# - https://github.com/actions/upload-artifact
+# - https://github.com/pypa/build
+# - https://github.com/pypa/gh-action-pypi-publish
+# - https://test.pypi.org/help/#apitoken
+
+name: ci-wheels
+
+on:
+ pull_request:
+
+ push:
+ tags:
+ - "v*"
+ branches-ignore:
+ - "auto-update-lockfiles"
+ - "pre-commit-ci-update-config"
+ - "dependabot/*"
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: "build sdist & wheel"
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: "building"
+ shell: bash
+ run: |
+ pipx run build
+
+ - uses: actions/upload-artifact@v4
+ with:
+ name: pypi-artifacts
+ path: ${{ github.workspace }}/dist/*
+
+ test-wheel:
+ needs: build
+ name: "test wheel (py${{ matrix.python-version }})"
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ shell: bash -l {0}
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.10", "3.11", "3.12"]
+ session: ["wheel"]
+ env:
+ ENV_NAME: "ci-wheels"
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - uses: actions/download-artifact@v4
+ with:
+ name: pypi-artifacts
+ path: ${{ github.workspace }}/dist
+
+ - name: "environment configure"
+ env:
+ # Maximum cache period (in weeks) before forcing a cache refresh.
+ CACHE_WEEKS: 2
+ run: |
+ echo "CACHE_PERIOD=$(date +%Y).$(expr $(date +%U) / ${CACHE_WEEKS})" >> ${GITHUB_ENV}
+ echo "LOCK_FILE=requirements/locks/py$(echo ${{ matrix.python-version }} | tr -d '.')-linux-64.lock" >> ${GITHUB_ENV}
+
+ - name: "conda package cache"
+ uses: ./.github/workflows/composite/conda-pkg-cache
+ with:
+ cache_build: 0
+ cache_period: ${{ env.CACHE_PERIOD }}
+ env_name: ${{ env.ENV_NAME }}
+
+ - name: "conda install"
+ uses: conda-incubator/setup-miniconda@v3
+ with:
+ miniforge-version: latest
+ channels: conda-forge,defaults
+ activate-environment: ${{ env.ENV_NAME }}
+ auto-update-conda: false
+ use-only-tar-bz2: true
+
+ - name: "conda environment cache"
+ uses: ./.github/workflows/composite/conda-env-cache
+ with:
+ cache_build: 0
+ cache_period: ${{ env.CACHE_PERIOD }}
+ env_name: ${{ env.ENV_NAME }}
+ install_packages: "nox pip"
+
+ - name: "nox cache"
+ uses: ./.github/workflows/composite/nox-cache
+ with:
+ cache_build: 1
+ env_name: ${{ env.ENV_NAME }}
+ lock_file: ${{ env.LOCK_FILE }}
+
+ - name: "nox install and test wheel"
+ env:
+ PY_VER: ${{ matrix.python-version }}
+ run: |
+ nox --session ${{ matrix.session }} -- --verbose
+
+ show-artifacts:
+ needs: build
+ name: "show artifacts"
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/download-artifact@v4
+ with:
+ name: pypi-artifacts
+ path: ${{ github.workspace }}/dist
+
+ - shell: bash
+ run: |
+ ls -l ${{ github.workspace }}/dist
+
+ publish-artifacts-test-pypi:
+ needs: test-wheel
+ name: "publish to test.pypi"
+ runs-on: ubuntu-latest
+ # upload to Test PyPI for every commit on main branch
+ # and check for the SciTools repo
+ if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' && github.repository_owner == 'SciTools'
+ steps:
+ - uses: actions/download-artifact@v4
+ with:
+ name: pypi-artifacts
+ path: ${{ github.workspace }}/dist
+
+ - uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ user: __token__
+ password: ${{ secrets.TEST_PYPI_API_TOKEN }}
+ repository_url: https://test.pypi.org/legacy/
+ skip_existing: true
+ print_hash: true
+
+ publish-artifacts-pypi:
+ needs: test-wheel
+ name: "publish to pypi"
+ runs-on: ubuntu-latest
+ # upload to PyPI for every tag starting with 'v'
+ if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') && github.repository_owner == 'SciTools'
+ steps:
+ - uses: actions/download-artifact@v4
+ with:
+ name: pypi-artifacts
+ path: ${{ github.workspace }}/dist
+
+ - uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ user: __token__
+ password: ${{ secrets.PYPI_API_TOKEN }}
+ print_hash: true
diff --git a/.github/workflows/composite/cartopy-cache/action.yml b/.github/workflows/composite/cartopy-cache/action.yml
new file mode 100644
index 0000000000..d42e5c36cb
--- /dev/null
+++ b/.github/workflows/composite/cartopy-cache/action.yml
@@ -0,0 +1,41 @@
+name: "cartopy-cache"
+description: "create and cache cartopy assets"
+
+#
+# Assumes the environment contains the following variables:
+# - CONDA
+#
+inputs:
+ cache_build:
+ description: "conda environment cache build number"
+ required: false
+ default: "0"
+ cache_period:
+ description: "conda environment cache timestamp"
+ required: true
+ env_name:
+ description: "environment name"
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/cache@v4
+ id: cartopy-cache
+ with:
+ path: ~/.local/share/cartopy
+ key: ${{ runner.os }}-cartopy-${{ inputs.env_name }}-p${{ inputs.cache_period }}-b${{ inputs.cache_build }}
+
+ - if: steps.cartopy-cache.outputs.cache-hit != 'true'
+ env:
+ CARTOPY_SHARE_DIR: ~/.local/share/cartopy
+ CARTOPY_FEATURE: https://raw.githubusercontent.com/SciTools/cartopy/v0.20.0/tools/cartopy_feature_download.py
+ shell: bash
+ run: |
+ # Require to explicitly activate the environment within the composite action.
+ source ${{ env.CONDA }}/etc/profile.d/conda.sh >/dev/null 2>&1
+ conda activate ${{ inputs.env_name }}
+ wget --quiet ${CARTOPY_FEATURE}
+ mkdir -p ${CARTOPY_SHARE_DIR}
+ # Requires a pre-installed version of cartopy within the environment.
+ python cartopy_feature_download.py physical --output ${CARTOPY_SHARE_DIR} --no-warn
diff --git a/.github/workflows/composite/conda-env-cache/action.yml b/.github/workflows/composite/conda-env-cache/action.yml
new file mode 100644
index 0000000000..15eaaec63c
--- /dev/null
+++ b/.github/workflows/composite/conda-env-cache/action.yml
@@ -0,0 +1,35 @@
+name: "conda-env-cache"
+description: "create and cache the conda environment"
+
+#
+# Assumes the environment contains the following variables:
+# - CONDA
+#
+inputs:
+ cache_build:
+ description: "conda environment cache build number"
+ required: false
+ default: "0"
+ cache_period:
+ description: "conda environment cache timestamp"
+ required: true
+ env_name:
+ description: "environment name"
+ required: true
+ install_packages:
+ description: "conda packages to install into environment"
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/cache@v4
+ id: conda-env-cache
+ with:
+ path: ${{ env.CONDA }}/envs/${{ inputs.env_name }}
+ key: ${{ runner.os }}-conda-env-${{ inputs.env_name }}-p${{ inputs.cache_period }}-b${{ inputs.cache_build }}
+
+ - if: steps.conda-env-cache.outputs.cache-hit != 'true'
+ shell: bash
+ run: |
+ conda install --quiet --name ${{ inputs.env_name }} ${{ inputs.install_packages }}
diff --git a/.github/workflows/composite/conda-pkg-cache/action.yml b/.github/workflows/composite/conda-pkg-cache/action.yml
new file mode 100644
index 0000000000..48c4470e44
--- /dev/null
+++ b/.github/workflows/composite/conda-pkg-cache/action.yml
@@ -0,0 +1,22 @@
+name: "conda-pkg-cache"
+description: "cache the conda environment packages"
+
+inputs:
+ cache_build:
+ description: "conda environment cache build number"
+ required: false
+ default: "0"
+ cache_period:
+ description: "conda environment cache timestamp"
+ required: true
+ env_name:
+ description: "environment name"
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/cache@v4
+ with:
+ path: ~/conda_pkgs_dir
+ key: ${{ runner.os }}-conda-pkgs-${{ inputs.env_name }}-p${{ inputs.cache_period }}-b${{ inputs.cache_build }}
diff --git a/.github/workflows/composite/iris-data-cache/action.yml b/.github/workflows/composite/iris-data-cache/action.yml
new file mode 100644
index 0000000000..7ba7acb2cc
--- /dev/null
+++ b/.github/workflows/composite/iris-data-cache/action.yml
@@ -0,0 +1,30 @@
+name: "iris-data-cache"
+description: "create and cache the iris test data"
+
+inputs:
+ cache_build:
+ description: "data cache build number"
+ required: false
+ default: "0"
+ env_name:
+ description: "environment name"
+ required: true
+ version:
+ description: "iris test data version"
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/cache@v4
+ id: data-cache
+ with:
+ path: ~/iris-test-data
+ key: ${{ runner.os }}-iris-test-data-${{ inputs.env_name }}-v${{ inputs.version }}-b${{ inputs.cache_build }}
+
+ - if: steps.data-cache.outputs.cache-hit != 'true'
+ shell: bash
+ run: |
+ wget --quiet https://github.com/SciTools/iris-test-data/archive/v${{ inputs.version }}.zip -O iris-test-data.zip
+ unzip -q iris-test-data.zip
+ mv iris-test-data-${{ inputs.version }} ~/iris-test-data
diff --git a/.github/workflows/composite/nox-cache/action.yml b/.github/workflows/composite/nox-cache/action.yml
new file mode 100644
index 0000000000..00387331e7
--- /dev/null
+++ b/.github/workflows/composite/nox-cache/action.yml
@@ -0,0 +1,22 @@
+name: "nox cache"
+description: "cache the nox test environments"
+
+inputs:
+ cache_build:
+ description: "nox cache build number"
+ required: false
+ default: "0"
+ env_name:
+ description: "environment name"
+ required: true
+ lock_file:
+ description: "conda-lock environment requirements filename"
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/cache@v4
+ with:
+ path: ${{ github.workspace }}/.nox
+ key: ${{ runner.os }}-nox-${{ inputs.env_name }}-s${{ matrix.session }}-py${{ matrix.python-version }}-b${{ inputs.cache_build }}-${{ hashFiles(inputs.lock_file) }}
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
new file mode 100644
index 0000000000..7914ec2531
--- /dev/null
+++ b/.github/workflows/labeler.yml
@@ -0,0 +1,15 @@
+# Reference
+# - https://github.com/actions/labeler
+
+name: "Pull Request Labeler"
+on:
+- pull_request_target
+
+jobs:
+ labeler:
+ permissions:
+ contents: read
+ pull-requests: write
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/labeler@v5
\ No newline at end of file
diff --git a/.github/workflows/refresh-lockfiles.yml b/.github/workflows/refresh-lockfiles.yml
new file mode 100644
index 0000000000..3ebb056433
--- /dev/null
+++ b/.github/workflows/refresh-lockfiles.yml
@@ -0,0 +1,18 @@
+# Updates the environment lock files. See the called workflow in the
+# scitools/workflows repo for more details.
+
+name: Refresh Lockfiles
+
+
+on:
+ workflow_dispatch:
+ schedule:
+ # Run once a week on a Saturday night
+ # N.B. "should" be quoted, according to
+ # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule
+ - cron: "1 0 * * 6"
+
+jobs:
+ refresh_lockfiles:
+ uses: scitools/workflows/.github/workflows/refresh-lockfiles.yml@2024.10.1
+ secrets: inherit
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000000..3df5aa3a18
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,83 @@
+# See https://github.com/actions/stale
+
+name: Stale issues and pull-requests
+
+on:
+ schedule:
+ # Run once a day
+ # N.B. "should" be quoted, according to
+ # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+ if: "github.repository == 'SciTools/iris'"
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+
+ # Idle number of days before marking issues/prs stale.
+ days-before-stale: 500
+
+ # Idle number of days before closing stale issues/prs.
+ days-before-close: 28
+
+ # Comment on the staled issues.
+ stale-issue-message: |
+ In order to maintain a backlog of relevant issues, we automatically label them as stale after 500 days of inactivity.
+
+ If this issue is still important to you, then please comment on this issue and the stale label will be removed.
+
+ Otherwise this issue will be automatically closed in 28 days time.
+
+ # Comment on the staled prs.
+ stale-pr-message: |
+ In order to maintain a backlog of relevant PRs, we automatically label them as stale after 500 days of inactivity.
+
+ If this PR is still important to you, then please comment on this PR and the stale label will be removed.
+
+ Otherwise this PR will be automatically closed in 28 days time.
+
+ # Comment on the staled issues while closed.
+ close-issue-message: |
+ This stale issue has been automatically closed due to a lack of community activity.
+
+ If you still care about this issue, then please either:
+ * Re-open this issue, if you have sufficient permissions, or
+ * Add a comment stating that this is still relevant and someone will re-open it on your behalf.
+
+ # Comment on the staled prs while closed.
+ close-pr-message: |
+ This stale PR has been automatically closed due to a lack of community activity.
+
+ If you still care about this PR, then please either:
+ * Re-open this PR, if you have sufficient permissions, or
+ * Add a comment pinging `@SciTools/iris-devs` who will re-open on your behalf.
+
+ # Label to apply on staled issues.
+ stale-issue-label: Stale
+
+ # Label to apply on staled prs.
+ stale-pr-label: Stale
+
+ # Labels on issues exempted from stale.
+ exempt-issue-labels:
+ "Status: Blocked,Status: Decision Required,Peloton 🚴♂️,Good First Issue, Dragon 🐉, Dragon Sub-Task 🦎, Release: Major"
+
+ # Labels on prs exempted from stale.
+ exempt-pr-labels:
+ "Status: Blocked,Status: Decision Required,Peloton 🚴♂️,Good First Issue, Dragon 🐉, Dragon Sub-Task 🦎, Release: Major"
+
+ # Max number of operations per run.
+ operations-per-run: 300
+
+ # Remove stale label from issues/prs on updates/comments.
+ remove-stale-when-updated: true
+
+ # Order to get issues/PRs.
+ ascending: true
+
+ # Exempt all issues/prs with milestones from stale.
+ exempt-all-milestones: true
diff --git a/.gitignore b/.gitignore
index f0420cbc22..1b132cbd38 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,8 @@
*.py[co]
+# setuptools-scm
+_version.py
+
# Environment file which should be autogenerated
*conda_requirements.txt*
@@ -15,6 +18,7 @@ var
sdist
develop-eggs
.installed.cfg
+.nox
# Installer logs
pip-log.txt
@@ -25,13 +29,17 @@ pip-cache
.tox
.pytest_cache
+# asv data, environments, results
+.asv
+benchmarks/.data
+.github/workflows/benchmark_reports
+
#Translations
*.mo
# Created by Iris build
*.so
lib/iris/etc/site.cfg
-lib/iris/fileformats/_pyke_rules/compiled_krb/
lib/iris/std_names.py
# Iris test result files
@@ -51,18 +59,23 @@ lib/iris/tests/results/imagerepo.lock
/.idea
*.cover
+# vscode files
+.vscode
+
# Auto generated documentation files
-docs/iris/src/_static/random_image.js
-docs/iris/src/_templates/gallery.html
-docs/iris/src/examples/
-docs/iris/src/iris/
-docs/iris/src/matplotlibrc
+docs/src/_build/*
+docs/src/generated
+docs/src/sg_execution_times.rst
# Example test results
-docs/iris/iris_image_test_output/
+docs/iris_image_test_output/
# Created by editiors
*~
\#*
\.\#*
*.swp
+.ipynb_checkpoints
+
+# Files generated during test runs.
+lib/iris/tests/results/**/*.dot
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000..053e4f839a
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,78 @@
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+
+files: |
+ (?x)(
+ noxfile\.py|
+ setup\.py|
+ docs\/.+\.py|
+ lib\/.+\.py|
+ benchmarks\/.+\.py
+ )
+minimum_pre_commit_version: 1.21.0
+
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v5.0.0
+ hooks:
+ # Prevent giant files from being committed.
+ - id: check-added-large-files
+ # Check whether files parse as valid Python.
+ - id: check-ast
+ # Check for file name conflicts on case-insensitive filesytems.
+ - id: check-case-conflict
+ # Check for files that contain merge conflict strings.
+ - id: check-merge-conflict
+ # Check for debugger imports and py37+ `breakpoint()` calls in Python source.
+ - id: debug-statements
+ # Don't commit to main branch.
+ - id: no-commit-to-branch
+
+- repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: "v0.7.0"
+ hooks:
+ - id: ruff
+ types: [file, python]
+ args: [--fix, --show-fixes]
+ - id: ruff-format
+ types: [file, python]
+
+- repo: https://github.com/codespell-project/codespell
+ rev: "v2.3.0"
+ hooks:
+ - id: codespell
+ types_or: [asciidoc, python, markdown, rst]
+ additional_dependencies: [tomli]
+
+- repo: https://github.com/PyCQA/flake8
+ rev: 7.1.1
+ hooks:
+ - id: flake8
+ types: [file, python]
+
+- repo: https://github.com/asottile/blacken-docs
+ rev: 1.19.0
+ hooks:
+ - id: blacken-docs
+ types: [file, rst]
+
+- repo: https://github.com/aio-libs/sort-all
+ rev: v1.2.0
+ hooks:
+ - id: sort-all
+ types: [file, python]
+
+- repo: https://github.com/pre-commit/mirrors-mypy
+ rev: 'v1.12.1'
+ hooks:
+ - id: mypy
+ additional_dependencies:
+ - 'types-requests'
+ exclude: 'noxfile\.py|docs/src/conf\.py'
+
+- repo: https://github.com/numpy/numpydoc
+ rev: v1.8.0
+ hooks:
+ - id: numpydoc-validation
+ exclude: "^lib/iris/tests/|docs/gallery_code/"
+ types: [file, python]
diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644
index 0000000000..d82bd513ca
--- /dev/null
+++ b/.readthedocs.yml
@@ -0,0 +1,31 @@
+version: 2
+
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "mambaforge-22.9"
+ jobs:
+ post_checkout:
+ # The SciTools/iris repository is shallow i.e., has a .git/shallow,
+ # therefore complete the repository with a full history in order
+ # to allow setuptools-scm to correctly auto-discover the version.
+ - git fetch --unshallow
+ - git fetch --all
+ # Need to stash the local changes that Read the Docs makes so that
+ # setuptools_scm can generate the correct Iris version.
+ pre_install:
+ - git stash
+ post_install:
+ - git stash pop
+
+conda:
+ environment: requirements/readthedocs.yml
+
+sphinx:
+ configuration: docs/src/conf.py
+ fail_on_warning: false
+
+python:
+ install:
+ - method: pip
+ path: .
diff --git a/.ruff.toml b/.ruff.toml
new file mode 100644
index 0000000000..5d78ecdb57
--- /dev/null
+++ b/.ruff.toml
@@ -0,0 +1,176 @@
+extend = "pyproject.toml"
+
+lint.ignore = [
+ # NOTE: To find a rule code to fix, run:
+ # ruff --select="ALL" --statistics lib/iris/
+
+ # Pyflakes (F)
+ # https://docs.astral.sh/ruff/rules/#pyflakes-f
+ "F",
+
+ # pycodestyle (E, W)
+ # https://docs.astral.sh/ruff/rules/#pycodestyle-e-w
+ "E",
+
+ # mccabe (C90)
+ # https://docs.astral.sh/ruff/rules/#mccabe-c90
+ "C90",
+
+ # pep8-naming (N)
+ # https://docs.astral.sh/ruff/rules/#pep8-naming-n
+ "N",
+
+ # pydocstyle (D)
+ # https://docs.astral.sh/ruff/rules/#pydocstyle-d
+ # (D-1) Permanent
+ "D105", # Missing docstring in magic method
+ # (D-2) Temporary, to be removed when we are more compliant. Rare cases mmove to (1).
+ "D101", # Missing docstring in public class
+ "D102", # Missing docstring in public method
+ # (D-3) Temporary, before an initial review, either fix ocurrences or move to (2).
+ "D103", # Missing docstring in public function
+
+ # pyupgrade (UP)
+ # https://docs.astral.sh/ruff/rules/#pyupgrade-up
+ "UP",
+
+ # flake8-annotations (ANN)
+ # https://docs.astral.sh/ruff/rules/#flake8-annotations-ann
+ "ANN",
+
+ # flake8-bandit (S)
+ # https://docs.astral.sh/ruff/rules/#flake8-bandit-s
+ "S",
+
+ # flake8-blind-except (BLE)
+ # https://docs.astral.sh/ruff/rules/#flake8-blind-except-ble
+ "BLE",
+
+ # flake8-boolean-trap (FBT)
+ # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt
+ "FBT",
+
+ # flake8-bugbear (B)
+ # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b
+ "B",
+
+ # flake8-builtins (A)
+ # https://docs.astral.sh/ruff/rules/#flake8-builtins-a
+ "A",
+
+ # flake8-comprehensions (C4)
+ # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4
+ "C4",
+
+ # flake8-datetimez (DTZ)
+ # https://docs.astral.sh/ruff/rules/#flake8-datetimez-dtz
+ "DTZ",
+
+ # flake8-errmsg (EM)
+ # https://docs.astral.sh/ruff/rules/#flake8-errmsg-em
+ "EM",
+
+ # flake8-future-annotations (FA)
+ # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa
+ "FA",
+
+ # flake8-logging-format (G)
+ # https://docs.astral.sh/ruff/rules/#flake8-logging-format-g
+ "G",
+
+ # flake8-no-pep420 (INP)
+ # https://docs.astral.sh/ruff/rules/#flake8-no-pep420-inp
+ "INP",
+
+ # flake8-pie (PIE)
+ # https://docs.astral.sh/ruff/rules/#flake8-pie-pie
+ "PIE",
+
+ # flake8-print (T20)
+ # https://docs.astral.sh/ruff/rules/#flake8-print-t20
+ "T20",
+
+ # flake8-pyi (PYI)
+ # https://docs.astral.sh/ruff/rules/#flake8-pyi-pyi
+ "PYI",
+
+ # flake8-pytest-style (PT)
+ # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt
+ "PT",
+
+ # flake8-raise (RSE)
+ # https://docs.astral.sh/ruff/rules/#flake8-raise-rse
+ "RSE",
+
+ # flake8-return (RET)
+ # https://docs.astral.sh/ruff/rules/#flake8-return-ret
+ "RET",
+
+ # flake8-self (SLF)
+ # https://docs.astral.sh/ruff/rules/#flake8-self-slf
+ "SLF",
+
+ # flake8-slots (SLOT)
+ # https://docs.astral.sh/ruff/rules/#flake8-slots-slot
+ "SLOT",
+
+ # flake8-simplify (SIM)
+ # https://docs.astral.sh/ruff/rules/#flake8-simplify-sim
+ "SIM",
+
+ # flake8-tidy-imports (TID)
+ # https://docs.astral.sh/ruff/rules/#flake8-tidy-imports-tid
+ "TID",
+
+ # flake8-type-checking (TCH)
+ # https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch
+ "TCH",
+
+ # flake8-unused-arguments (ARG)
+ # https://docs.astral.sh/ruff/rules/#flake8-unused-arguments-arg
+ "ARG",
+
+ # flake8-use-pathlib (PTH)
+ # https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth
+ "PTH",
+
+ # flake8-todos (TD)
+ # https://docs.astral.sh/ruff/rules/#flake8-todos-td
+ "TD",
+
+ # flake8-fixme (FIX)
+ # https://docs.astral.sh/ruff/rules/#flake8-fixme-fix
+ "FIX",
+
+ # eradicate (ERA)
+ # https://docs.astral.sh/ruff/rules/#eradicate-era
+ "ERA",
+
+ # pandas-vet (PD)
+ # https://docs.astral.sh/ruff/rules/#pandas-vet-pd
+ "PD",
+
+ # pygrep-hooks (PGH)
+ # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh
+ "PGH",
+
+ # Pylint (PL)
+ # https://docs.astral.sh/ruff/rules/#pylint-pl
+ "PL",
+
+ # tryceratops (TRY)
+ # https://docs.astral.sh/ruff/rules/#tryceratops-try
+ "TRY",
+
+ # flynt (FLY)
+ # https://docs.astral.sh/ruff/rules/#flynt-fly
+ "FLY",
+
+ # Perflint (PERF)
+ # https://docs.astral.sh/ruff/rules/#perflint-perf
+ "PERF",
+
+ # Ruff-specific rules (RUF)
+ # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf
+ "RUF",
+]
diff --git a/.stickler.yml b/.stickler.yml
deleted file mode 100644
index 31d097914e..0000000000
--- a/.stickler.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-linters:
- flake8:
-
-files:
- ignore:
- - 'lib/iris/fileformats/um_cf_map.py'
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 32e596e1c1..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,168 +0,0 @@
-# Please update the test data git references below if appropriate.
-#
-# Note: Contrary to the travis documentation,
-# http://about.travis-ci.org/docs/user/languages/python/#Travis-CI-Uses-Isolated-virtualenvs
-# we will use conda to give us a much faster setup time.
-
-
-language: minimal
-dist: xenial
-
-env:
- global:
- # The decryption key for the encrypted .github/deploy_key.scitools-docs.enc.
- - secure: "N9/qBUT5CqfC7KQBDy5mIWZcGNuUJk3e/qmKJpotWYV+zwOI4GghJsRce6nFnlRiwl65l5oBEcvf3+sBvUfbZqh7U0MdHpw2tHhr2FSCmMB3bkvARZblh9M37f4da9G9VmRkqnyBM5G5TImXtoq4dusvNWKvLW0qETciaipq7ws="
- matrix:
- - PYTHON_VERSION=3.6 TEST_TARGET=default TEST_MINIMAL=true
- - PYTHON_VERSION=3.6 TEST_TARGET=default
- - PYTHON_VERSION=3.6 TEST_TARGET=example
-
- - PYTHON_VERSION=3.7 TEST_TARGET=default TEST_MINIMAL=true
- - PYTHON_VERSION=3.7 TEST_TARGET=default
- - PYTHON_VERSION=3.7 TEST_TARGET=example
- - PYTHON_VERSION=3.7 TEST_TARGET=doctest PUSH_BUILT_DOCS=true
-
-git:
- # We need a deep clone so that we can compute the age of the files using their git history.
- depth: 10000
-
-install:
- - >
- export IRIS_TEST_DATA_REF="1696ac3a823a06b95f430670f285ee97671d2cf2";
- export IRIS_TEST_DATA_SUFFIX=$(echo "${IRIS_TEST_DATA_REF}" | sed "s/^v//");
-
- # Install miniconda
- # -----------------
- - >
- echo 'Installing miniconda';
- export CONDA_BASE="https://repo.continuum.io/miniconda/Miniconda";
- wget --quiet ${CONDA_BASE}3-latest-Linux-x86_64.sh -O miniconda.sh;
- bash miniconda.sh -b -p ${HOME}/miniconda;
- export PATH="${HOME}/miniconda/bin:${PATH}";
-
- # Create the basic testing environment
- # ------------------------------------
- # Explicitly add defaults channel, see https://github.com/conda/conda/issues/2675
- - >
- echo 'Configure conda and create an environment';
- conda config --set always_yes yes --set changeps1 no;
- conda config --set show_channel_urls True;
- conda config --add channels conda-forge;
- conda update --quiet conda;
- ENV_NAME='test-environment';
- conda create --quiet -n ${ENV_NAME} python=${PYTHON_VERSION} pip;
- source activate ${ENV_NAME};
-
- # Customise the testing environment
- # ---------------------------------
- - >
- echo 'Install Iris dependencies';
- CONDA_REQS_GROUPS="test";
- if [[ "${TEST_MINIMAL}" != true ]]; then
- CONDA_REQS_GROUPS="${CONDA_REQS_GROUPS} all";
- fi;
- if [[ "${TEST_TARGET}" == 'doctest' ]]; then
- CONDA_REQS_GROUPS="${CONDA_REQS_GROUPS} docs";
- fi;
- CONDA_REQS_FILE="conda-requirements.txt";
- python requirements/gen_conda_requirements.py --groups ${CONDA_REQS_GROUPS} > ${CONDA_REQS_FILE};
- cat ${CONDA_REQS_FILE};
- conda install --quiet -n ${ENV_NAME} --file ${CONDA_REQS_FILE};
-
- - PREFIX="${HOME}/miniconda/envs/${ENV_NAME}"
-
- # Output debug info
- - >
- conda list -n ${ENV_NAME};
- conda list -n ${ENV_NAME} --explicit;
- conda info -a;
-
-# Pre-load Natural Earth data to avoid multiple, overlapping downloads.
-# i.e. There should be no DownloadWarning reports in the log.
- - python -c 'import cartopy; cartopy.io.shapereader.natural_earth()'
-
-# iris test data
- - >
- if [[ "${TEST_MINIMAL}" != true ]]; then
- wget --quiet -O iris-test-data.zip https://github.com/SciTools/iris-test-data/archive/${IRIS_TEST_DATA_REF}.zip;
- unzip -q iris-test-data.zip;
- mv "iris-test-data-${IRIS_TEST_DATA_SUFFIX}" iris-test-data;
- fi
-
-# set config paths
- - >
- SITE_CFG="lib/iris/etc/site.cfg";
- echo "[Resources]" > ${SITE_CFG};
- echo "test_data_dir = $(pwd)/iris-test-data/test_data" >> ${SITE_CFG};
- echo "doc_dir = $(pwd)/docs/iris" >> ${SITE_CFG};
- echo "[System]" >> ${SITE_CFG};
- echo "udunits2_path = ${PREFIX}/lib/libudunits2.so" >> ${SITE_CFG};
-
- - python setup.py --quiet install
-
- # TODO : remove when iris doesn't do an integration test requiring iris-grib.
-# TODO: uncomment and address the 5 failures and 10 errors in iris-grib.
-# - if [[ "${TEST_MINIMAL}" != true ]]; then
-# conda install --quiet -n ${ENV_NAME} python-eccodes;
-# conda install --quiet -n ${ENV_NAME} --no-deps iris-grib;
-# fi
-
-script:
- # Capture install-dir: As a test command must be last for get Travis to check
- # the RC, so it's best to start each operation with an absolute cd.
- - INSTALL_DIR=$(pwd)
-
- - >
- if [[ ${TEST_TARGET} == 'default' ]]; then
- export IRIS_REPO_DIR=${INSTALL_DIR};
- python -m iris.tests.runner --default-tests --system-tests --print-failed-images;
- fi
-
- - if [[ ${TEST_TARGET} == 'example' ]]; then
- python -m iris.tests.runner --example-tests --print-failed-images;
- fi
-
- # A call to check "whatsnew" contributions are valid, because the Iris test
- # for it needs a *developer* install to be able to find the docs.
- - if [[ ${TEST_TARGET} == 'doctest' ]]; then
- cd ${INSTALL_DIR}/docs/iris/src/whatsnew;
- python aggregate_directory.py --checkonly;
- fi
-
- # When pushing built docs, attempt to make a preliminary whatsnew by calling
- # 'aggregate_directory.py', before the build.
- - >
- if [[ ${PUSH_BUILT_DOCS} == 'true' ]]; then
- cd ${INSTALL_DIR}/docs/iris/src/whatsnew;
- WHATSNEW=$(ls -d contributions_* 2>/dev/null);
- if [[ "$WHATSNEW" != "" ]]; then
- python aggregate_directory.py --unreleased;
- fi;
- fi
-
- # Build the docs.
- - >
- if [[ ${TEST_TARGET} == 'doctest' ]]; then
- MPL_RC_DIR="${HOME}/.config/matplotlib";
- mkdir -p ${MPL_RC_DIR};
- echo 'backend : agg' > ${MPL_RC_DIR}/matplotlibrc;
- echo 'image.cmap : viridis' >> ${MPL_RC_DIR}/matplotlibrc;
- cd ${INSTALL_DIR}/docs/iris;
- make clean html && make doctest;
- fi
-
- # Split the organisation out of the slug. See https://stackoverflow.com/a/5257398/741316 for description.
- - ORG=(${TRAVIS_REPO_SLUG//\// })
-
- # When we merge a change to SciTools/iris, we can push docs to github pages.
- # At present, only the Python 3.7 "doctest" job does this.
- # Results appear at https://scitools-docs.github.io/iris/<>/index.html
- - if [[ ${ORG} == "SciTools" && ${TRAVIS_EVENT_TYPE} == 'push' && ${PUSH_BUILT_DOCS} == 'true' ]]; then
- cd ${INSTALL_DIR};
- pip install doctr;
- doctr deploy --deploy-repo SciTools-docs/iris --built-docs docs/iris/build/html
- --key-path .github/deploy_key.scitools-docs.enc
- --no-require-master
- ${TRAVIS_BRANCH:-${TRAVIS_TAG}};
- fi
-
diff --git a/CHANGES b/CHANGES
index 2364de84a4..b3916a97b6 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,5 @@
This file is no longer updated and is provided for historical purposes only.
-Please see docs/iris/src/whatsnew/ for a changelog.
+Please see docs/src/whatsnew/ for a changelog.
Release 1.4 (14 June 2013)
@@ -373,7 +373,7 @@ Features added
Incompatible changes
--------------------
-* The Iris data model is now fully aligned with the `CF data model `_ .
+* The Iris data model is now fully aligned with the `CF data model `_ .
Iris remains file-format independent, as is the underlying CF data model.
* Cube merging has been re-written for the new CF data model with the benefit that some corner cases are now better handled.
Some users may find that their cubes, once merged, now have a smaller total shape and more intelligent handling of dimension coordinate picking.
@@ -433,7 +433,7 @@ Features added
given cubes (see :func:`iris.iterate.izip`).
* Cell methods will now appear in the printout of a cube.
* Supporting software dependency versions have been updated. Of particular note is matplotlib which has gone from version 1.0.1
- up to `1.1.0 `_ . This may have a small impact on
+ up to `1.1.0 `_ . This may have a small impact on
some plot element placements.
Incompatible changes
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 0000000000..c3fcdd26d5
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,23 @@
+cff-version: 1.2.0
+message: "If Iris played an important part in your research then please add us to your reference list by using the references below."
+title: "Iris"
+keywords:
+ - "cf-metadata"
+ - "data-analysis"
+ - "earth-science"
+ - "grib"
+ - "netcdf"
+ - "meteorology"
+ - "oceanography"
+ - "space-weather"
+ - "ugrid"
+ - "visualisation"
+authors:
+ - name: "Iris contributors"
+abstract: "A powerful, format-agnostic, and community-driven Python package for analysing and visualising Earth science data"
+license: "BSD-3-Clause"
+license-url: "https://spdx.org/licenses/BSD-3-Clause.html"
+doi: "10.5281/zenodo.595182"
+url: "http://scitools.org.uk/"
+repository-code: "https://github.com/SciTools/iris"
+type: "software"
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..bb040d21c5
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,133 @@
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+scitools.pub@gmail.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
diff --git a/COPYING b/COPYING
deleted file mode 100644
index 94a9ed024d..0000000000
--- a/COPYING
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
diff --git a/COPYING.LESSER b/COPYING.LESSER
deleted file mode 100644
index 65c5ca88a6..0000000000
--- a/COPYING.LESSER
+++ /dev/null
@@ -1,165 +0,0 @@
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/INSTALL b/INSTALL
deleted file mode 100644
index 9296f97a29..0000000000
--- a/INSTALL
+++ /dev/null
@@ -1,95 +0,0 @@
-You can either install Iris using the conda package manager or from source.
-
-Installing using conda
-----------------------
-
-Iris is available using conda for the following platforms:
- * Linux 64-bit,
- * Mac OSX 64-bit, and
- * Windows 32-bit and 64-bit.
-
-To install Iris using conda, you must first download and install conda,
-for example from http://conda.pydata.org/miniconda.html.
-
-Once conda is installed, you can install Iris using conda on any platform with
-the following command::
-
- conda install -c conda-forge iris
-
-If you wish to run any of the code examples
-(see http://scitools.org.uk/iris/docs/latest/examples/index.html) you will also
-need the Iris sample data. This can also be installed using conda::
-
- conda install -c conda-forge iris-sample-data
-
-Further documentation on using conda and the features it provides can be found
-at http://conda.pydata.org/docs/intro.html.
-
-
-Installing from source
-----------------------
-
-The latest Iris source release is available from
-https://github.com/SciTools/iris.
-
-Iris makes use of a range of other libraries and python modules. These
-dependencies must be in place before you can successfully install
-Iris. Once you have satisfied the requirements detailed in the
-``requirements`` directory, go to the root of Iris' and run::
-
- pip install .
-
-
-In-place build - an alternative for developers
-==============================================
-We are very keen to encourage contributions to Iris. For this type of
-development activity an in-place build can be useful. Once you've cloned
-the Iris git repository you can perform an in-place build with::
-
- pip install -e .
-
-
-Generating conda requirements
-'''''''''''''''''''''''''''''
-
-Requirements for Iris are stored in the ``requirements`` directory in the root of the source repository.
-It is possible to generate a requirements file suitable for use with conda::
-
- python requirements/gen_conda_requirements.py > conda_requirements.txt
-
-This may then be installed with::
-
- conda create -n my_iris_env --file conda_requirements.txt
-
-Alternatively, a full requirements file that includes all optional dependencies can be produced with::
-
- python requirements/gen_conda_requirements.py --groups all > conda_requirements.txt
-
-
-Running the tests
-'''''''''''''''''
-
-In order to run the tests, you will need to use the `test` and `docs` groups (we include the `docs` group so that you can run the pull request tests locally).
-Hence the commands change to::
-
- python requirements/gen_conda_requirements.py --groups test docs > conda_requirements.txt
- conda create -n my_iris_env -c conda-forge --file conda_requirements.txt
- conda activate my_iris_env # or whatever other name you gave it
- pip install -e .
-
-The tests can then be run with
-
- python setup.py test
-
-
-Custom site configuration
-=========================
-The default site configuration values can be overridden by creating the file
-``iris/etc/site.cfg``. For example, the following snippet can be used to
-specify a non-standard location for your dot executable::
-
- [System]
- dot_path = /usr/bin/dot
-
-An example configuration file is available in ``iris/etc/site.cfg.template``.
-See :py:func:`iris.config` for further configuration options.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000..2d1d23e16c
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2010, Met Office.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
index efd0534863..e594303d8f 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,21 +1,33 @@
-# Top-level files
-include CHANGES COPYING COPYING.LESSER INSTALL
+prune .github
+prune benchmarks
+prune docs
+prune etc
+recursive-include lib *.cdl *.cml *.json *.md *.py *.template *.txt *.xml
+prune requirements
+recursive-include requirements *.txt
+prune tools
+exclude .flake8
+exclude .git-blame-ignore-revs
+exclude .git_archival.txt
+exclude .gitattributes
+exclude .gitignore
+exclude .mailmap
+exclude .pre-commit-config.yaml
+exclude .readthedocs.yml
+exclude .ruff.toml
+exclude CHANGES
+exclude CODE_OF_CONDUCT.md
+exclude codecov.yml
+include COPYING
+include COPYING.LESSER
+include CITATION.cff
+include LICENSE
+exclude Makefile
+exclude noxfile.py
-# Files from setup.py package_data that are not automatically added to source distributions
-recursive-include lib/iris/tests/results *.cml *.cdl *.txt *.xml *.json
-recursive-include lib/iris/etc *
-include lib/iris/fileformats/_pyke_rules/*.k?b
-include lib/iris/tests/stock*.npz
-
-include requirements/*.txt
-
-# File required to build docs
-recursive-include docs Makefile *.js *.png *.py *.rst
-prune docs/iris/build
-
-# Files required to build std_names module
-include tools/generate_std_names.py
+# files required to build iris.std_names module
include etc/cf-standard-name-table.xml
+include tools/generate_std_names.py
-global-exclude *.pyc
+global-exclude *.py[cod]
global-exclude __pycache__
diff --git a/Makefile b/Makefile
new file mode 100755
index 0000000000..0bb56edbf9
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,2 @@
+lockfiles:
+ python tools/update_lockfiles.py -o requirements/locks requirements/py*.yml
\ No newline at end of file
diff --git a/README.md b/README.md
index fb8660f2ad..7eec86c6da 100644
--- a/README.md
+++ b/README.md
@@ -1,120 +1,49 @@
- Iris is a powerful, format-agnostic, community-driven Python library for
+ Iris is a powerful, format-agnostic, community-driven Python package for
analysing and visualising Earth science data
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+For documentation see the
+latest
+developer version or the most recent released
+stable version.
-
-
-
-
-
Table of contents
-
-[](TOC)
-
-+ [Overview](#overview)
-+ [Documentation](#documentation)
-+ [Installation](#installation)
-+ [Copyright and licence](#copyright-and-licence)
-+ [Get in touch](#get-in-touch)
-+ [Contributing](#contributing)
-
-[](TOC)
-
-# Overview
-
-Iris implements a data model based on the [CF conventions](http://cfconventions.org/)
-giving you a powerful, format-agnostic interface for working with your data.
-It excels when working with multi-dimensional Earth Science data, where tabular
-representations become unwieldy and inefficient.
-
-[CF Standard names](http://cfconventions.org/standard-names.html),
-[units](https://github.com/SciTools/cf_units), and coordinate metadata
-are built into Iris, giving you a rich and expressive interface for maintaining
-an accurate representation of your data. Its treatment of data and
- associated metadata as first-class objects includes:
-
- * a visualisation interface based on [matplotlib](https://matplotlib.org/) and
- [cartopy](https://scitools.org.uk/cartopy/docs/latest/),
- * unit conversion,
- * subsetting and extraction,
- * merge and concatenate,
- * aggregations and reductions (including min, max, mean and weighted averages),
- * interpolation and regridding (including nearest-neighbor, linear and area-weighted), and
- * operator overloads (``+``, ``-``, ``*``, ``/``, etc.)
-
-A number of file formats are recognised by Iris, including CF-compliant NetCDF, GRIB,
-and PP, and it has a plugin architecture to allow other formats to be added seamlessly.
-
-Building upon [NumPy](http://www.numpy.org/) and [dask](https://dask.pydata.org/en/latest/),
-Iris scales from efficient single-machine workflows right through to multi-core clusters and HPC.
-Interoperability with packages from the wider scientific Python ecosystem comes from Iris'
-use of standard NumPy/dask arrays as its underlying data storage.
+## [#ShowYourStripes](https://showyourstripes.info/s/globe)
-# Documentation
-
- The documentation for *stable released versions* of Iris, including a user guide, example code, and gallery.
-
- The documentation for the *latest development version* of Iris.
-
-
-# Installation
-
-The easiest way to install Iris is with [conda](https://conda.io/miniconda.html):
-
- conda install -c conda-forge iris
-
-Detailed instructions, including information on installing from source,
-are available in [INSTALL](INSTALL).
-
-# Get in touch
-
- * Report bugs, or suggest new features using an Issue or Pull Request on [Github](https://github.com/SciTools/iris). You can also comment on existing Issues and Pull Requests.
- * For discussions from a user perspective you could join our [SciTools Users Google Group](https://groups.google.com/forum/#!forum/scitools-iris).
- * For those involved in developing Iris we also have an [Iris Developers Google Group](https://groups.google.com/forum/#!forum/scitools-iris-dev).
- * [StackOverflow](https://stackoverflow.com/questions/tagged/python-iris) For "How do I?".
+
-# Copyright and licence
+**Graphics and Lead Scientist**: [Ed Hawkins](https://www.met.reading.ac.uk/~ed/home/index.php), National Centre for Atmospheric Science, University of Reading.
-Iris may be freely distributed, modified and used commercially under the terms
-of its [GNU LGPLv3 license](COPYING.LESSER).
+**Data**: Berkeley Earth, NOAA, UK Met Office, MeteoSwiss, DWD, SMHI, UoR, Meteo France & ZAMG.
-# Contributing
-Information on how to contribute can be found in the [Iris developer guide](https://scitools.org.uk/iris/docs/latest/developers_guide/index.html).
+
-(C) British Crown Copyright 2010 - 2019, Met Office
diff --git a/benchmarks/README.md b/benchmarks/README.md
new file mode 100644
index 0000000000..911d5f7833
--- /dev/null
+++ b/benchmarks/README.md
@@ -0,0 +1,163 @@
+# Iris Performance Benchmarking
+
+Iris uses an [Airspeed Velocity](https://github.com/airspeed-velocity/asv)
+(ASV) setup to benchmark performance. This is primarily designed to check for
+performance shifts between commits using statistical analysis, but can also
+be easily repurposed for manual comparative and scalability analyses.
+
+The benchmarks are automatically run overnight
+[by a GitHub Action](../.github/workflows/benchmark.yml), with any notable
+shifts in performance being flagged in a new GitHub issue.
+
+## Running benchmarks
+
+On GitHub: a Pull Request can be benchmarked by adding the
+https://github.com/SciTools/iris/labels/benchmark_this
+label to the PR (to run a second time: just remove and re-add the label).
+Note that a benchmark run could take an hour or more to complete.
+This runs a comparison between the PR branch's ``HEAD`` and its merge-base with
+the PR's base branch, thus showing performance differences introduced
+by the PR. (This run is managed by
+[the aforementioned GitHub Action](../.github/workflows/benchmark.yml)).
+
+To run locally: the **benchmark runner** provides conveniences for
+common benchmark setup and run tasks, including replicating the automated
+overnight run locally. This is accessed via the Nox `benchmarks` session - see
+`nox -s benchmarks -- --help` for detail (_see also:
+[bm_runner.py](./bm_runner.py)_). Alternatively you can directly run `asv ...`
+commands from this directory (you will still need Nox installed - see
+[Benchmark environments](#benchmark-environments)).
+
+A significant portion of benchmark run time is environment management. Run-time
+can be reduced by placing the benchmark environment on the same file system as
+your
+[Conda package cache](https://conda.io/projects/conda/en/latest/user-guide/configuration/use-condarc.html#specify-pkg-directories),
+if it is not already. You can achieve this by either:
+
+- Temporarily reconfiguring `ENV_PARENT` in `delegated_env_commands`
+ in [asv.conf.json](asv.conf.json) to reference a location on the same file
+ system as the Conda package cache.
+- Using an alternative Conda package cache location during the benchmark run,
+ e.g. via the `$CONDA_PKGS_DIRS` environment variable.
+- Moving your Iris repo to the same file system as the Conda package cache.
+
+### Environment variables
+
+* `OVERRIDE_TEST_DATA_REPOSITORY` - required - some benchmarks use
+`iris-test-data` content, and your local `site.cfg` is not available for
+benchmark scripts. The benchmark runner defers to any value already set in
+the shell, but will otherwise download `iris-test-data` and set the variable
+accordingly.
+* `DATA_GEN_PYTHON` - required - path to a Python executable that can be
+used to generate benchmark test objects/files; see
+[Data generation](#data-generation). The benchmark runner sets this
+automatically, but will defer to any value already set in the shell. Note that
+[Mule](https://github.com/metomi/mule) will be automatically installed into
+this environment, and sometimes
+[iris-test-data](https://github.com/SciTools/iris-test-data) (see
+`OVERRIDE_TEST_DATA_REPOSITORY`).
+* `BENCHMARK_DATA` - optional - path to a directory for benchmark synthetic
+test data, which the benchmark scripts will create if it doesn't already
+exist. Defaults to `/benchmarks/.data/` if not set. Note that some of
+the generated files, especially in the 'SPerf' suite, are many GB in size so
+plan accordingly.
+* `ON_DEMAND_BENCHMARKS` - optional - when set (to any value): benchmarks
+decorated with `@on_demand_benchmark` are included in the ASV run. Usually
+coupled with the ASV `--bench` argument to only run the benchmark(s) of
+interest. Is set during the benchmark runner `cperf` and `sperf` sub-commands.
+* `ASV_COMMIT_ENVS` - optional - instruct the
+[delegated environment management](#benchmark-environments) to create a
+dedicated environment for each commit being benchmarked when set (to any
+value). This means that benchmarking commits with different environment
+requirements will not be delayed by repeated environment setup - especially
+relevant given the [benchmark runner](bm_runner.py)'s use of
+[--interleave-rounds](https://asv.readthedocs.io/en/stable/commands.html?highlight=interleave-rounds#asv-run),
+or any time you know you will repeatedly benchmark the same commit. **NOTE:**
+Iris environments are large so this option can consume a lot of disk space.
+
+## Writing benchmarks
+
+[See the ASV docs](https://asv.readthedocs.io/) for full detail.
+
+### What benchmarks to write
+
+It is not possible to maintain a full suite of 'unit style' benchmarks:
+
+* Benchmarks take longer to run than tests.
+* Small benchmarks are more vulnerable to noise - they report a lot of false
+positive regressions.
+
+We therefore recommend writing benchmarks representing scripts or single
+operations that are likely to be run at the user level.
+
+The drawback of this approach: a reported regression is less likely to reveal
+the root cause (e.g. if a commit caused a regression in coordinate-creation
+time, but the only benchmark covering this was for file-loading). Be prepared
+for manual investigations; and consider committing any useful benchmarks as
+[on-demand benchmarks](#on-demand-benchmarks) for future developers to use.
+
+### Data generation
+**Important:** be sure not to use the benchmarking environment to generate any
+test objects/files, as this environment changes with each commit being
+benchmarked, creating inconsistent benchmark 'conditions'. The
+[generate_data](./benchmarks/generate_data/__init__.py) module offers a
+solution; read more detail there.
+
+### ASV re-run behaviour
+
+Note that ASV re-runs a benchmark multiple times between its `setup()` routine.
+This is a problem for benchmarking certain Iris operations such as data
+realisation, since the data will no longer be lazy after the first run.
+Consider writing extra steps to restore objects' original state _within_ the
+benchmark itself.
+
+If adding steps to the benchmark will skew the result too much then re-running
+can be disabled by setting an attribute on the benchmark: `number = 1`. To
+maintain result accuracy this should be accompanied by increasing the number of
+repeats _between_ `setup()` calls using the `repeat` attribute.
+`warmup_time = 0` is also advisable since ASV performs independent re-runs to
+estimate run-time, and these will still be subject to the original problem.
+
+### Custom benchmarks
+
+Iris benchmarking implements custom benchmark types, such as a `tracemalloc`
+benchmark to measure memory growth. See [custom_bms/](./custom_bms) for more
+detail.
+
+### Scaling / non-Scaling Performance Differences
+
+**(We no longer advocate the below for benchmarks run during CI, given the
+limited available runtime and risk of false-positives. It remains useful for
+manual investigations).**
+
+When comparing performance between commits/file-type/whatever it can be helpful
+to know if the differences exist in scaling or non-scaling parts of the Iris
+functionality in question. This can be done using a size parameter, setting
+one value to be as small as possible (e.g. a scalar `Cube`), and the other to
+be significantly larger (e.g. a 1000x1000 `Cube`). Performance differences
+might only be seen for the larger value, or the smaller, or both, getting you
+closer to the root cause.
+
+### On-demand benchmarks
+
+Some benchmarks provide useful insight but are inappropriate to be included in
+a benchmark run by default, e.g. those with long run-times or requiring a local
+file. These benchmarks should be decorated with `@on_demand_benchmark`
+(see [benchmarks init](./benchmarks/__init__.py)), which
+sets the benchmark to only be included in a run when the `ON_DEMAND_BENCHMARKS`
+environment variable is set. Examples include the CPerf and SPerf benchmark
+suites for the UK Met Office NG-VAT project.
+
+## Benchmark environments
+
+We have disabled ASV's standard environment management, instead using an
+environment built using the same Nox scripts as Iris' test environments. This
+is done using ASV's plugin architecture - see
+[asv_delegated_conda.py](asv_delegated_conda.py) and the extra config items in
+[asv.conf.json](asv.conf.json).
+
+(ASV is written to control the environment(s) that benchmarks are run in -
+minimising external factors and also allowing it to compare between a matrix
+of dependencies (each in a separate environment). We have chosen to sacrifice
+these features in favour of testing each commit with its intended dependencies,
+controlled by Nox + lock-files).
diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json
new file mode 100644
index 0000000000..2857c90ad7
--- /dev/null
+++ b/benchmarks/asv.conf.json
@@ -0,0 +1,64 @@
+{
+ "version": 1,
+ "project": "scitools-iris",
+ "project_url": "https://github.com/SciTools/iris",
+ "repo": "..",
+ "environment_type": "delegated",
+ "show_commit_url": "https://github.com/scitools/iris/commit/",
+ "branches": ["upstream/main"],
+
+ "benchmark_dir": "./benchmarks",
+ "env_dir": ".asv/env",
+ "results_dir": ".asv/results",
+ "html_dir": ".asv/html",
+ "plugins": [".asv_delegated"],
+
+ "delegated_env_commands_comment": [
+ "The command(s) that create/update an environment correctly for the",
+ "checked-out commit. Command(s) format follows `build_command`:",
+ " https://asv.readthedocs.io/en/stable/asv.conf.json.html#build-command-install-command-uninstall-command",
+
+ "The commit key indicates the earliest commit where the command(s)",
+ "will work.",
+
+ "Differences from `build_command`:",
+ " * See: https://asv.readthedocs.io/en/stable/asv.conf.json.html#build-command-install-command-uninstall-command",
+ " * Env vars limited to those set outside build time.",
+ " (e.g. `{conf_dir}` available but `{build_dir}` not)",
+ " * Run in the same environment as the ASV install itself.",
+
+ "Mandatory format for the first 'command' within each commit:",
+ " * `ENV_PARENT=path/to/parent/directory/of/env-directory`",
+ " * Can contain env vars (e.g. `{conf_dir}`)",
+ " * `ENV_PARENT` available as `{env_parent}` in subsequent commands",
+ " * The environment will be detected as the most recently updated",
+ " environment in `{env_parent}`."
+
+ ],
+ "delegated_env_commands": {
+ "c8a663a0": [
+ "ENV_PARENT={conf_dir}/.asv/env/nox312",
+ "PY_VER=3.12 nox --envdir={env_parent} --session=tests --install-only --no-error-on-external-run --verbose"
+ ],
+ "d58fca7e": [
+ "ENV_PARENT={conf_dir}/.asv/env/nox311",
+ "PY_VER=3.11 nox --envdir={env_parent} --session=tests --install-only --no-error-on-external-run --verbose"
+ ],
+ "44fae030": [
+ "ENV_PARENT={conf_dir}/.asv/env/nox310",
+ "PY_VER=3.10 nox --envdir={env_parent} --session=tests --install-only --no-error-on-external-run --verbose"
+ ]
+ },
+
+ "command_comment": [
+ "We know that the Nox command takes care of installation in each",
+ "environment, and in the case of Iris no specialised uninstall or",
+ "build commands are needed to get it working.",
+
+ "We do however need to install the custom benchmarks for them to be",
+ "usable."
+ ],
+ "install_command": [],
+ "uninstall_command": [],
+ "build_command": ["python {conf_dir}/custom_bms/install.py"]
+}
diff --git a/benchmarks/asv_delegated.py b/benchmarks/asv_delegated.py
new file mode 100644
index 0000000000..fa5312d392
--- /dev/null
+++ b/benchmarks/asv_delegated.py
@@ -0,0 +1,350 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""ASV plug-in providing an alternative :class:`asv.environments.Environment` subclass.
+
+Preps an environment via custom user scripts, then uses that as the
+benchmarking environment.
+
+"""
+
+from contextlib import contextmanager, suppress
+from os import environ
+from os.path import getmtime
+from pathlib import Path
+import sys
+
+from asv import util as asv_util
+from asv.console import log
+from asv.environment import Environment, EnvironmentUnavailable
+from asv.repo import Repo
+from asv.util import ProcessError
+
+
+class EnvPrepCommands:
+ """A container for the environment preparation commands for a given commit.
+
+ Designed to read a value from the `delegated_env_commands` in the ASV
+ config, and validate that the command(s) are structured correctly.
+ """
+
+ ENV_PARENT_VAR = "ENV_PARENT"
+ env_parent: Path
+ commands: list[str]
+
+ def __init__(self, environment: Environment, raw_commands: tuple[str]):
+ env_var = self.ENV_PARENT_VAR
+ raw_commands_list = list(raw_commands)
+
+ (first_command,) = environment._interpolate_commands(raw_commands_list[0])
+ env: dict
+ command, env, return_codes, cwd = first_command
+
+ valid = command == []
+ valid = valid and return_codes == {0}
+ valid = valid and cwd is None
+ valid = valid and list(env.keys()) == [env_var]
+ if not valid:
+ message = (
+ "First command MUST ONLY "
+ f"define the {env_var} env var, with no command e.g: "
+ f"`{env_var}=foo/`. Got: \n {raw_commands_list[0]}"
+ )
+ raise ValueError(message)
+
+ self.env_parent = Path(env[env_var]).resolve()
+ self.commands = raw_commands_list[1:]
+
+
+class CommitFinder(dict[str, EnvPrepCommands]):
+ """A specialised dict for finding the appropriate env prep script for a commit."""
+
+ def __call__(self, repo: Repo, commit_hash: str):
+ """Return the latest env prep script that is earlier than the given commit."""
+
+ def validate_commit(commit: str, is_lookup: bool) -> None:
+ try:
+ _ = repo.get_date(commit)
+ except ProcessError:
+ if is_lookup:
+ message_start = "Lookup commit"
+ else:
+ message_start = "Requested commit"
+ repo_path = getattr(repo, "_path", "unknown")
+ message = f"{message_start}: {commit} not found in repo: {repo_path}"
+ raise KeyError(message)
+
+ for lookup in self.keys():
+ validate_commit(lookup, is_lookup=True)
+ validate_commit(commit_hash, is_lookup=False)
+
+ def parent_distance(parent_hash: str) -> int:
+ range_spec = repo.get_range_spec(parent_hash, commit_hash)
+ parents = repo.get_hashes_from_range(range_spec)
+
+ if parent_hash[:8] == commit_hash[:8]:
+ distance = 0
+ elif len(parents) == 0:
+ distance = -1
+ else:
+ distance = len(parents)
+ return distance
+
+ parentage = {commit: parent_distance(commit) for commit in self.keys()}
+ parentage = {k: v for k, v in parentage.items() if v >= 0}
+ if len(parentage) == 0:
+ message = f"No env prep script available for commit: {commit_hash} ."
+ raise KeyError(message)
+ else:
+ parentage = dict(sorted(parentage.items(), key=lambda item: item[1]))
+ commit = next(iter(parentage))
+ content = self[commit]
+ return content
+
+
+class Delegated(Environment):
+ """Manage a benchmark environment using custom user scripts, run at each commit.
+
+ Ignores user input variations - ``matrix`` / ``pythons`` /
+ ``exclude``, since environment is being managed outside ASV.
+
+ A vanilla :class:`asv.environment.Environment` is created for containing
+ the expected ASV configuration files and checked-out project. The actual
+ 'functional' environment is created/updated using the command(s) specified
+ in the config ``delegated_env_commands``, then the location is recorded via
+ a symlink within the ASV environment. The symlink is used as the
+ environment path used for any executable calls (e.g.
+ ``python my_script.py``).
+
+ """
+
+ tool_name = "delegated"
+ """Required by ASV as a unique identifier of the environment type."""
+
+ DELEGATED_LINK_NAME = "delegated_env"
+ """The name of the symlink to the delegated environment."""
+
+ COMMIT_ENVS_VAR = "ASV_COMMIT_ENVS"
+ """Env var that instructs a dedicated environment be created per commit."""
+
+ def __init__(self, conf, python, requirements, tagged_env_vars):
+ """Get a 'delegated' environment based on the given ASV config object.
+
+ Parameters
+ ----------
+ conf : dict
+ ASV configuration object.
+
+ python : str
+ Ignored - environment management is delegated. The value is always
+ ``DELEGATED``.
+
+ requirements : dict (str -> str)
+ Ignored - environment management is delegated. The value is always
+ an empty dict.
+
+ tagged_env_vars : dict (tag, key) -> value
+ Ignored - environment management is delegated. The value is always
+ an empty dict.
+
+ Raises
+ ------
+ EnvironmentUnavailable
+ The original environment or delegated environment cannot be created.
+
+ """
+ ignored = []
+ if python:
+ ignored.append(f"{python=}")
+ if requirements:
+ ignored.append(f"{requirements=}")
+ if tagged_env_vars:
+ ignored.append(f"{tagged_env_vars=}")
+ message = (
+ f"Ignoring ASV setting(s): {', '.join(ignored)}. Benchmark "
+ "environment management is delegated to third party script(s)."
+ )
+ log.warning(message)
+ self._python = "DELEGATED"
+ self._requirements = {}
+ self._tagged_env_vars = {}
+ super().__init__(
+ conf,
+ self._python,
+ self._requirements,
+ self._tagged_env_vars,
+ )
+
+ self._path_undelegated = Path(self._path)
+ """Preserves the 'true' path of the environment so that self._path can
+ be safely modified and restored."""
+
+ env_commands = getattr(conf, "delegated_env_commands")
+ try:
+ env_prep_commands = {
+ commit: EnvPrepCommands(self, commands)
+ for commit, commands in env_commands.items()
+ }
+ except ValueError as err:
+ message = f"Problem handling `delegated_env_commands`:\n{err}"
+ log.error(message)
+ raise EnvironmentUnavailable(message)
+ self._env_prep_lookup = CommitFinder(**env_prep_commands)
+ """An object that can be called downstream to get the appropriate
+ env prep script for a given repo and commit."""
+
+ @property
+ def _path_delegated(self) -> Path:
+ """The path of the symlink to the delegated environment."""
+ return self._path_undelegated / self.DELEGATED_LINK_NAME
+
+ @property
+ def _delegated_found(self) -> bool:
+ """Whether self._path_delegated successfully resolves to a directory."""
+ resolved = None
+ with suppress(FileNotFoundError):
+ resolved = self._path_delegated.resolve(strict=True)
+ result = resolved is not None and resolved.is_dir()
+ return result
+
+ def _symlink_to_delegated(self, delegated_env_path: Path) -> None:
+ """Create the symlink to the delegated environment."""
+ self._path_delegated.unlink(missing_ok=True)
+ self._path_delegated.parent.mkdir(parents=True, exist_ok=True)
+ self._path_delegated.symlink_to(delegated_env_path, target_is_directory=True)
+ assert self._delegated_found
+
+ def _setup(self):
+ """Temporarily try to set the user's active env as the delegated env.
+
+ Environment prep will be run anyway once ASV starts checking out
+ commits, but this step tries to provide a usable environment (with
+ python, etc.) at the moment that ASV expects it.
+
+ """
+ current_env = Path(sys.executable).parents[1]
+ message = (
+ "Temporarily using user's active environment as benchmarking "
+ f"environment: {current_env} . "
+ )
+ try:
+ self._symlink_to_delegated(current_env)
+ _ = self.find_executable("python")
+ except Exception:
+ message = (
+ f"Delegated environment {self.name} not yet set up (unable to "
+ "determine current environment)."
+ )
+ self._path_delegated.unlink(missing_ok=True)
+
+ message += "Correct environment will be set up at the first commit checkout."
+ log.warning(message)
+
+ def _prep_env(self, repo: Repo, commit_hash: str) -> None:
+ """Prepare the delegated environment for the given commit hash."""
+ message = (
+ f"Running delegated environment management for: {self.name} "
+ f"at commit: {commit_hash[:8]}"
+ )
+ log.info(message)
+
+ env_prep: EnvPrepCommands
+ try:
+ env_prep = self._env_prep_lookup(repo, commit_hash)
+ except KeyError as err:
+ message = f"Problem finding env prep commands: {err}"
+ log.error(message)
+ raise EnvironmentUnavailable(message)
+
+ new_env_per_commit = self.COMMIT_ENVS_VAR in environ
+ if new_env_per_commit:
+ env_parent = env_prep.env_parent / commit_hash[:8]
+ else:
+ env_parent = env_prep.env_parent
+
+ # See :meth:`Environment._interpolate_commands`.
+ # All ASV-namespaced env vars are available in the below format when
+ # interpolating commands:
+ # ASV_FOO_BAR = {foo_bar}
+ # We want the env parent path to be one of those available.
+ global_key = f"ASV_{EnvPrepCommands.ENV_PARENT_VAR}"
+ self._global_env_vars[global_key] = str(env_parent)
+
+ # The project checkout.
+ build_dir = Path(self._build_root) / self._repo_subdir
+
+ # Run the script(s) for delegated environment creation/updating.
+ # (An adaptation of :meth:`Environment._interpolate_and_run_commands`).
+ for command, env, return_codes, cwd in self._interpolate_commands(
+ env_prep.commands
+ ):
+ local_envs = dict(environ)
+ local_envs.update(env)
+ if cwd is None:
+ cwd = str(build_dir)
+ _ = asv_util.check_output(
+ command,
+ timeout=self._install_timeout,
+ cwd=cwd,
+ env=local_envs,
+ valid_return_codes=return_codes,
+ )
+
+ # Find the environment created/updated by running env_prep.commands.
+ # The most recently updated directory in env_parent.
+ delegated_env_path = sorted(
+ env_parent.glob("*"),
+ key=getmtime,
+ reverse=True,
+ )[0]
+ # Record the environment's path via a symlink within this environment.
+ self._symlink_to_delegated(delegated_env_path)
+
+ message = f"Environment {self.name} updated to spec at {commit_hash[:8]}"
+ log.info(message)
+
+ def checkout_project(self, repo: Repo, commit_hash: str) -> None:
+ """Check out the working tree of the project at given commit hash."""
+ super().checkout_project(repo, commit_hash)
+ self._prep_env(repo, commit_hash)
+
+ @contextmanager
+ def _delegate_path(self):
+ """Context manager to use the delegated env path as this env's path."""
+ if not self._delegated_found:
+ message = f"Delegated environment not found at: {self._path_delegated}"
+ log.error(message)
+ raise EnvironmentUnavailable(message)
+
+ try:
+ self._path = str(self._path_delegated)
+ yield
+ finally:
+ self._path = str(self._path_undelegated)
+
+ def find_executable(self, executable):
+ """Find an executable (e.g. python, pip) in the DELEGATED environment.
+
+ Raises
+ ------
+ OSError
+ If the executable is not found in the environment.
+ """
+ if not self._delegated_found:
+ # Required during environment setup. OSError expected if executable
+ # not found.
+ raise OSError
+
+ with self._delegate_path():
+ return super().find_executable(executable)
+
+ def run_executable(self, executable, args, **kwargs):
+ """Run a given executable (e.g. python, pip) in the DELEGATED environment."""
+ with self._delegate_path():
+ return super().run_executable(executable, args, **kwargs)
+
+ def run(self, args, **kwargs):
+ # This is not a specialisation - just implementing the abstract method.
+ log.debug(f"Running '{' '.join(args)}' in {self.name}")
+ return self.run_executable("python", args, **kwargs)
diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py
new file mode 100644
index 0000000000..30a991a879
--- /dev/null
+++ b/benchmarks/benchmarks/__init__.py
@@ -0,0 +1,55 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Common code for benchmarks."""
+
+from os import environ
+import tracemalloc
+
+import numpy as np
+
+
+def disable_repeat_between_setup(benchmark_object):
+ """Benchmark where object persistence would be inappropriate (decorator).
+
+ E.g:
+
+ * Benchmarking data realisation
+ * Benchmarking Cube coord addition
+
+ Can be applied to benchmark classes/methods/functions.
+
+ https://asv.readthedocs.io/en/stable/benchmarks.html#timing-benchmarks
+
+ """
+ # Prevent repeat runs between setup() runs - object(s) will persist after 1st.
+ benchmark_object.number = 1
+ # Compensate for reduced certainty by increasing number of repeats.
+ # (setup() is run between each repeat).
+ # Minimum 5 repeats, run up to 30 repeats / 20 secs whichever comes first.
+ benchmark_object.repeat = (5, 30, 20.0)
+ # ASV uses warmup to estimate benchmark time before planning the real run.
+ # Prevent this, since object(s) will persist after first warmup run,
+ # which would give ASV misleading info (warmups ignore ``number``).
+ benchmark_object.warmup_time = 0.0
+
+ return benchmark_object
+
+
+def on_demand_benchmark(benchmark_object):
+ """Disable these benchmark(s) unless ON_DEMAND_BENCHARKS env var is set.
+
+ This is a decorator.
+
+ For benchmarks that, for whatever reason, should not be run by default.
+ E.g:
+
+ * Require a local file
+ * Used for scalability analysis instead of commit monitoring.
+
+ Can be applied to benchmark classes/methods/functions.
+
+ """
+ if "ON_DEMAND_BENCHMARKS" in environ:
+ return benchmark_object
diff --git a/benchmarks/benchmarks/aggregate_collapse.py b/benchmarks/benchmarks/aggregate_collapse.py
new file mode 100644
index 0000000000..4d5d2923bc
--- /dev/null
+++ b/benchmarks/benchmarks/aggregate_collapse.py
@@ -0,0 +1,212 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmarks relating to :meth:`iris.cube.CubeList.merge` and ``concatenate``."""
+
+import warnings
+
+import numpy as np
+
+from iris import analysis, coords, cube
+from iris.warnings import IrisVagueMetadataWarning
+
+from .generate_data.stock import realistic_4d_w_everything
+
+
+class AggregationMixin:
+ params = [[False, True]]
+ param_names = ["Lazy operations"]
+
+ def setup(self, lazy_run: bool):
+ warnings.filterwarnings("ignore", message="Ignoring a datum")
+ warnings.filterwarnings("ignore", category=IrisVagueMetadataWarning)
+ cube = realistic_4d_w_everything(lazy=lazy_run)
+
+ for cm in cube.cell_measures():
+ cube.remove_cell_measure(cm)
+ for av in cube.ancillary_variables():
+ cube.remove_ancillary_variable(av)
+
+ agg_mln_data = np.arange(0, 70, 10)
+ agg_mln_repeat = np.repeat(agg_mln_data, 10)
+
+ cube = cube[..., :10, :10]
+
+ self.mln_aux = "aggregatable"
+ self.mln = "model_level_number"
+ agg_mln_coord = coords.AuxCoord(points=agg_mln_repeat, long_name=self.mln_aux)
+
+ if lazy_run:
+ agg_mln_coord.points = agg_mln_coord.lazy_points()
+ cube.add_aux_coord(agg_mln_coord, 1)
+ self.cube = cube
+
+
+class Aggregation(AggregationMixin):
+ def time_aggregated_by_MEAN(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.MEAN).data
+
+ def time_aggregated_by_COUNT(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux, analysis.COUNT, function=lambda values: values > 280
+ ).data
+
+ def time_aggregated_by_GMEAN(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.GMEAN).data
+
+ def time_aggregated_by_HMEAN(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.HMEAN).data
+
+ def time_aggregated_by_MAX_RUN(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux, analysis.MAX_RUN, function=lambda values: values > 280
+ ).data
+
+ def time_aggregated_by_MAX(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.MAX).data
+
+ def time_aggregated_by_MEDIAN(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.MEDIAN).data
+
+ def time_aggregated_by_MIN(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.MIN).data
+
+ def time_aggregated_by_PEAK(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.PEAK).data
+
+ def time_aggregated_by_PERCENTILE(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux, analysis.PERCENTILE, percent=[10, 50, 90]
+ ).data
+
+ def time_aggregated_by_FAST_PERCENTILE(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux,
+ analysis.PERCENTILE,
+ mdtol=0,
+ percent=[10, 50, 90],
+ fast_percentile_method=True,
+ ).data
+
+ def time_aggregated_by_PROPORTION(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux,
+ analysis.PROPORTION,
+ function=lambda values: values > 280,
+ ).data
+
+ def time_aggregated_by_STD_DEV(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.STD_DEV).data
+
+ def time_aggregated_by_VARIANCE(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.VARIANCE).data
+
+ def time_aggregated_by_RMS(self, _):
+ _ = self.cube.aggregated_by(self.mln_aux, analysis.RMS).data
+
+ def time_collapsed_by_MEAN(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.MEAN).data
+
+ def time_collapsed_by_COUNT(self, _):
+ _ = self.cube.collapsed(
+ self.mln, analysis.COUNT, function=lambda values: values > 280
+ ).data
+
+ def time_collapsed_by_GMEAN(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.GMEAN).data
+
+ def time_collapsed_by_HMEAN(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.HMEAN).data
+
+ def time_collapsed_by_MAX_RUN(self, _):
+ _ = self.cube.collapsed(
+ self.mln, analysis.MAX_RUN, function=lambda values: values > 280
+ ).data
+
+ def time_collapsed_by_MAX(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.MAX).data
+
+ def time_collapsed_by_MEDIAN(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.MEDIAN).data
+
+ def time_collapsed_by_MIN(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.MIN).data
+
+ def time_collapsed_by_PEAK(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.PEAK).data
+
+ def time_collapsed_by_PERCENTILE(self, _):
+ _ = self.cube.collapsed(
+ self.mln, analysis.PERCENTILE, percent=[10, 50, 90]
+ ).data
+
+ def time_collapsed_by_FAST_PERCENTILE(self, _):
+ _ = self.cube.collapsed(
+ self.mln,
+ analysis.PERCENTILE,
+ mdtol=0,
+ percent=[10, 50, 90],
+ fast_percentile_method=True,
+ ).data
+
+ def time_collapsed_by_PROPORTION(self, _):
+ _ = self.cube.collapsed(
+ self.mln, analysis.PROPORTION, function=lambda values: values > 280
+ ).data
+
+ def time_collapsed_by_STD_DEV(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.STD_DEV).data
+
+ def time_collapsed_by_VARIANCE(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.VARIANCE).data
+
+ def time_collapsed_by_RMS(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.RMS).data
+
+
+class WeightedAggregation(AggregationMixin):
+ def setup(self, lazy_run):
+ super().setup(lazy_run)
+
+ weights = np.linspace(0, 1, 70)
+ weights = np.broadcast_to(weights, self.cube.shape[:2])
+ weights = np.broadcast_to(weights.T, self.cube.shape[::-1])
+ weights = weights.T
+
+ self.weights = weights
+
+ ## currently has problems with indexing weights
+ # def time_w_aggregated_by_WPERCENTILE(self, _):
+ # _ = self.cube.aggregated_by(
+ # self.mln_aux, analysis.WPERCENTILE, weights=self.weights, percent=[10, 50, 90]
+ # ).data
+
+ def time_w_aggregated_by_SUM(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux, analysis.SUM, weights=self.weights
+ ).data
+
+ def time_w_aggregated_by_RMS(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux, analysis.RMS, weights=self.weights
+ ).data
+
+ def time_w_aggregated_by_MEAN(self, _):
+ _ = self.cube.aggregated_by(
+ self.mln_aux, analysis.MEAN, weights=self.weights
+ ).data
+
+ def time_w_collapsed_by_WPERCENTILE(self, _):
+ _ = self.cube.collapsed(
+ self.mln, analysis.WPERCENTILE, weights=self.weights, percent=[10, 50, 90]
+ ).data
+
+ def time_w_collapsed_by_SUM(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.SUM, weights=self.weights).data
+
+ def time_w_collapsed_by_RMS(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.RMS, weights=self.weights).data
+
+ def time_w_collapsed_by_MEAN(self, _):
+ _ = self.cube.collapsed(self.mln, analysis.MEAN, weights=self.weights).data
diff --git a/benchmarks/benchmarks/cperf/__init__.py b/benchmarks/benchmarks/cperf/__init__.py
new file mode 100644
index 0000000000..05a086bc44
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/__init__.py
@@ -0,0 +1,92 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
+
+CPerf = comparing performance working with data in UM versus LFRic formats.
+
+Files available from the UK Met Office:
+ moo ls moose:/adhoc/projects/avd/asv/data_for_nightly_tests/
+"""
+
+import numpy as np
+
+from iris import load_cube
+
+from ..generate_data import BENCHMARK_DATA
+from ..generate_data.ugrid import make_cubesphere_testfile
+
+# The data of the core test UM files has dtype=np.float32 shape=(1920, 2560)
+_UM_DIMS_YX = (1920, 2560)
+# The closest cubesphere size in terms of datapoints is sqrt(1920*2560 / 6)
+# This gives ~= 905, i.e. "C905"
+_N_CUBESPHERE_UM_EQUIVALENT = int(np.sqrt(np.prod(_UM_DIMS_YX) / 6))
+
+
+class SingleDiagnosticMixin:
+ """For use in any benchmark classes that work on a single diagnostic file."""
+
+ params = [
+ ["LFRic", "UM", "UM_lbpack0", "UM_netcdf"],
+ [False, True],
+ [False, True],
+ ]
+ param_names = ["file type", "height dim (len 71)", "time dim (len 3)"]
+
+ def setup(self, file_type, three_d, three_times):
+ if file_type == "LFRic":
+ # Generate an appropriate synthetic LFRic file.
+ if three_times:
+ n_times = 3
+ else:
+ n_times = 1
+
+ # Use a cubesphere size ~equivalent to our UM test data.
+ cells_per_panel_edge = _N_CUBESPHERE_UM_EQUIVALENT
+ create_kwargs = dict(c_size=cells_per_panel_edge, n_times=n_times)
+
+ if three_d:
+ create_kwargs["n_levels"] = 71
+
+ # Will reuse a file if already present.
+ file_path = make_cubesphere_testfile(**create_kwargs)
+
+ else:
+ # Locate the appropriate UM file.
+ if three_times:
+ # pa/pb003 files
+ numeric = "003"
+ else:
+ # pa/pb000 files
+ numeric = "000"
+
+ if three_d:
+ # theta diagnostic, N1280 file w/ 71 levels (1920, 2560, 71)
+ file_name = f"umglaa_pb{numeric}-theta"
+ else:
+ # surface_temp diagnostic, N1280 file (1920, 2560)
+ file_name = f"umglaa_pa{numeric}-surfacetemp"
+
+ file_suffices = {
+ "UM": "", # packed FF (WGDOS lbpack = 1)
+ "UM_lbpack0": ".uncompressed", # unpacked FF (lbpack = 0)
+ "UM_netcdf": ".nc", # UM file -> Iris -> NetCDF file
+ }
+ suffix = file_suffices[file_type]
+
+ file_path = (BENCHMARK_DATA / file_name).with_suffix(suffix)
+ if not file_path.exists():
+ message = "\n".join(
+ [
+ f"Expected local file not found: {file_path}",
+ "Available from the UK Met Office.",
+ ]
+ )
+ raise FileNotFoundError(message)
+
+ self.file_path = file_path
+ self.file_type = file_type
+
+ def load(self):
+ return load_cube(str(self.file_path))
diff --git a/benchmarks/benchmarks/cperf/equality.py b/benchmarks/benchmarks/cperf/equality.py
new file mode 100644
index 0000000000..ffe61ef938
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/equality.py
@@ -0,0 +1,55 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Equality benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project."""
+
+from .. import on_demand_benchmark
+from . import SingleDiagnosticMixin
+
+
+class EqualityMixin(SingleDiagnosticMixin):
+ r"""Use :class:`SingleDiagnosticMixin` as the realistic case.
+
+ Uses :class:`SingleDiagnosticMixin` as the realistic case will be comparing
+ :class:`~iris.cube.Cube`\\ s that have been loaded from file.
+
+ """
+
+ # Cut down the parent parameters.
+ params = [["LFRic", "UM"]]
+
+ def setup(self, file_type, three_d=False, three_times=False):
+ super().setup(file_type, three_d, three_times)
+ self.cube = self.load()
+ self.other_cube = self.load()
+
+
+@on_demand_benchmark
+class CubeEquality(EqualityMixin):
+ r"""Benchmark time & memory costs of comparing LFRic & UM :class:`~iris.cube.Cube`\\ s."""
+
+ def _comparison(self):
+ _ = self.cube == self.other_cube
+
+ def peakmem_eq(self, file_type):
+ self._comparison()
+
+ def time_eq(self, file_type):
+ self._comparison()
+
+
+@on_demand_benchmark
+class MeshEquality(EqualityMixin):
+ """Provides extra context for :class:`CubeEquality`."""
+
+ params = [["LFRic"]]
+
+ def _comparison(self):
+ _ = self.cube.mesh == self.other_cube.mesh
+
+ def peakmem_eq(self, file_type):
+ self._comparison()
+
+ def time_eq(self, file_type):
+ self._comparison()
diff --git a/benchmarks/benchmarks/cperf/load.py b/benchmarks/benchmarks/cperf/load.py
new file mode 100644
index 0000000000..07c2de9e79
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/load.py
@@ -0,0 +1,55 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""File loading benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project."""
+
+from .. import on_demand_benchmark
+from . import SingleDiagnosticMixin
+
+
+@on_demand_benchmark
+class SingleDiagnosticLoad(SingleDiagnosticMixin):
+ def time_load(self, _, __, ___):
+ """Perform a 'real world comparison'.
+
+ * UM coords are always realised (DimCoords).
+ * LFRic coords are not realised by default (MeshCoords).
+
+ """
+ cube = self.load()
+ assert cube.has_lazy_data()
+ # UM files load lon/lat as DimCoords, which are always realised.
+ expecting_lazy_coords = self.file_type == "LFRic"
+ for coord_name in "longitude", "latitude":
+ coord = cube.coord(coord_name)
+ assert coord.has_lazy_points() == expecting_lazy_coords
+ assert coord.has_lazy_bounds() == expecting_lazy_coords
+
+ def time_load_w_realised_coords(self, _, __, ___):
+ """Valuable extra comparison where both UM and LFRic coords are realised."""
+ cube = self.load()
+ for coord_name in "longitude", "latitude":
+ coord = cube.coord(coord_name)
+ # Don't touch actual points/bounds objects - permanent
+ # realisation plays badly with ASV's re-run strategy.
+ if coord.has_lazy_points():
+ coord.core_points().compute()
+ if coord.has_lazy_bounds():
+ coord.core_bounds().compute()
+
+
+@on_demand_benchmark
+class SingleDiagnosticRealise(SingleDiagnosticMixin):
+ # The larger files take a long time to realise.
+ timeout = 600.0
+
+ def setup(self, file_type, three_d, three_times):
+ super().setup(file_type, three_d, three_times)
+ self.loaded_cube = self.load()
+
+ def time_realise(self, _, __, ___):
+ # Don't touch loaded_cube.data - permanent realisation plays badly with
+ # ASV's re-run strategy.
+ assert self.loaded_cube.has_lazy_data()
+ self.loaded_cube.core_data().compute()
diff --git a/benchmarks/benchmarks/cperf/save.py b/benchmarks/benchmarks/cperf/save.py
new file mode 100644
index 0000000000..6dcd0b3bcf
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/save.py
@@ -0,0 +1,40 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""File saving benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project."""
+
+from iris import save
+
+from .. import on_demand_benchmark
+from ..generate_data.ugrid import make_cube_like_2d_cubesphere, make_cube_like_umfield
+from . import _N_CUBESPHERE_UM_EQUIVALENT, _UM_DIMS_YX
+
+
+@on_demand_benchmark
+class NetcdfSave:
+ """Benchmark time and memory costs of saving ~large-ish data cubes to netcdf.
+
+ Parametrised by file type.
+
+ """
+
+ params = ["LFRic", "UM"]
+ param_names = ["data type"]
+
+ def setup(self, data_type):
+ if data_type == "LFRic":
+ self.cube = make_cube_like_2d_cubesphere(
+ n_cube=_N_CUBESPHERE_UM_EQUIVALENT, with_mesh=True
+ )
+ else:
+ self.cube = make_cube_like_umfield(_UM_DIMS_YX)
+
+ def _save_data(self, cube):
+ save(cube, "tmp.nc")
+
+ def time_save_data_netcdf(self, data_type):
+ self._save_data(self.cube)
+
+ def tracemalloc_save_data_netcdf(self, data_type):
+ self._save_data(self.cube)
diff --git a/benchmarks/benchmarks/cube.py b/benchmarks/benchmarks/cube.py
new file mode 100644
index 0000000000..0b6829ee2d
--- /dev/null
+++ b/benchmarks/benchmarks/cube.py
@@ -0,0 +1,116 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Cube benchmark tests."""
+
+from collections.abc import Iterable
+
+from iris import coords
+from iris.cube import Cube
+
+from .generate_data.stock import realistic_4d_w_everything
+
+
+class CubeCreation:
+ params = [[False, True], ["instantiate", "construct"]]
+ param_names = ["Cube has mesh", "Cube creation strategy"]
+
+ cube_kwargs: dict
+
+ def setup(self, w_mesh: bool, _) -> None:
+ # Loaded as two cubes due to the hybrid height.
+ source_cube = realistic_4d_w_everything(w_mesh=w_mesh)
+
+ def get_coords_and_dims(
+ coords_iter: Iterable[coords._DimensionalMetadata],
+ ) -> list[tuple[coords._DimensionalMetadata, tuple[int, ...]]]:
+ return [(c, c.cube_dims(source_cube)) for c in coords_iter]
+
+ self.cube_kwargs = dict(
+ data=source_cube.data,
+ standard_name=source_cube.standard_name,
+ long_name=source_cube.long_name,
+ var_name=source_cube.var_name,
+ units=source_cube.units,
+ attributes=source_cube.attributes,
+ cell_methods=source_cube.cell_methods,
+ dim_coords_and_dims=get_coords_and_dims(source_cube.dim_coords),
+ aux_coords_and_dims=get_coords_and_dims(source_cube.aux_coords),
+ aux_factories=source_cube.aux_factories,
+ cell_measures_and_dims=get_coords_and_dims(source_cube.cell_measures()),
+ ancillary_variables_and_dims=get_coords_and_dims(
+ source_cube.ancillary_variables()
+ ),
+ )
+
+ def time_create(self, _, cube_creation_strategy: str) -> None:
+ if cube_creation_strategy == "instantiate":
+ _ = Cube(**self.cube_kwargs)
+
+ elif cube_creation_strategy == "construct":
+ new_cube = Cube(data=self.cube_kwargs["data"])
+ new_cube.standard_name = self.cube_kwargs["standard_name"]
+ new_cube.long_name = self.cube_kwargs["long_name"]
+ new_cube.var_name = self.cube_kwargs["var_name"]
+ new_cube.units = self.cube_kwargs["units"]
+ new_cube.attributes = self.cube_kwargs["attributes"]
+ new_cube.cell_methods = self.cube_kwargs["cell_methods"]
+ for coord, dims in self.cube_kwargs["dim_coords_and_dims"]:
+ assert isinstance(coord, coords.DimCoord) # Type hint to help linters.
+ new_cube.add_dim_coord(coord, dims)
+ for coord, dims in self.cube_kwargs["aux_coords_and_dims"]:
+ new_cube.add_aux_coord(coord, dims)
+ for aux_factory in self.cube_kwargs["aux_factories"]:
+ new_cube.add_aux_factory(aux_factory)
+ for cell_measure, dims in self.cube_kwargs["cell_measures_and_dims"]:
+ new_cube.add_cell_measure(cell_measure, dims)
+ for ancillary_variable, dims in self.cube_kwargs[
+ "ancillary_variables_and_dims"
+ ]:
+ new_cube.add_ancillary_variable(ancillary_variable, dims)
+
+ else:
+ message = f"Unknown cube creation strategy: {cube_creation_strategy}"
+ raise NotImplementedError(message)
+
+
+class CubeEquality:
+ params = [
+ [False, True],
+ [False, True],
+ ["metadata_inequality", "coord_inequality", "data_inequality", "all_equal"],
+ ]
+ param_names = ["Cubes are lazy", "Cubes have meshes", "Scenario"]
+
+ cube_1: Cube
+ cube_2: Cube
+ coord_name = "surface_altitude"
+
+ def setup(self, lazy: bool, w_mesh: bool, scenario: str) -> None:
+ self.cube_1 = realistic_4d_w_everything(w_mesh=w_mesh, lazy=lazy)
+ # Using Cube.copy() produces different results due to sharing of the
+ # Mesh instance.
+ self.cube_2 = realistic_4d_w_everything(w_mesh=w_mesh, lazy=lazy)
+
+ match scenario:
+ case "metadata_inequality":
+ self.cube_2.long_name = "different"
+ case "coord_inequality":
+ coord = self.cube_2.coord(self.coord_name)
+ coord.points = coord.core_points() * 2
+ case "data_inequality":
+ self.cube_2.data = self.cube_2.core_data() * 2
+ case "all_equal":
+ pass
+ case _:
+ message = f"Unknown scenario: {scenario}"
+ raise NotImplementedError(message)
+
+ def time_equality(self, lazy: bool, __, ___) -> None:
+ _ = self.cube_1 == self.cube_2
+ if lazy:
+ for cube in (self.cube_1, self.cube_2):
+ # Confirm that this benchmark is safe for repetition.
+ assert cube.coord(self.coord_name).has_lazy_points()
+ assert cube.has_lazy_data()
diff --git a/benchmarks/benchmarks/generate_data/__init__.py b/benchmarks/benchmarks/generate_data/__init__.py
new file mode 100644
index 0000000000..bb53e26b2f
--- /dev/null
+++ b/benchmarks/benchmarks/generate_data/__init__.py
@@ -0,0 +1,116 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Scripts for generating supporting data for benchmarking.
+
+Data generated using Iris should use :func:`run_function_elsewhere`, which
+means that data is generated using a fixed version of Iris and a fixed
+environment, rather than those that get changed when the benchmarking run
+checks out a new commit.
+
+Downstream use of data generated 'elsewhere' requires saving; usually in a
+NetCDF file. Could also use pickling but there is a potential risk if the
+benchmark sequence runs over two different Python versions.
+
+"""
+
+from contextlib import contextmanager
+from inspect import getsource
+from os import environ
+from pathlib import Path
+from subprocess import CalledProcessError, check_output, run
+from textwrap import dedent
+from warnings import warn
+
+from iris._lazy_data import as_concrete_data
+from iris.fileformats import netcdf
+
+#: Python executable used by :func:`run_function_elsewhere`, set via env
+#: variable of same name. Must be path of Python within an environment that
+#: includes Iris (including dependencies and test modules) and Mule.
+try:
+ DATA_GEN_PYTHON = environ["DATA_GEN_PYTHON"]
+ _ = check_output([DATA_GEN_PYTHON, "-c", "a = True"])
+except KeyError:
+ error = "Env variable DATA_GEN_PYTHON not defined."
+ raise KeyError(error)
+except (CalledProcessError, FileNotFoundError, PermissionError):
+ error = "Env variable DATA_GEN_PYTHON not a runnable python executable path."
+ raise ValueError(error)
+
+# The default location of data files used in benchmarks. Used by CI.
+default_data_dir = (Path(__file__).parents[2] / ".data").resolve()
+# Optionally override the default data location with environment variable.
+BENCHMARK_DATA = Path(environ.get("BENCHMARK_DATA", default_data_dir))
+if BENCHMARK_DATA == default_data_dir:
+ BENCHMARK_DATA.mkdir(exist_ok=True)
+ message = (
+ f"No BENCHMARK_DATA env var, defaulting to {BENCHMARK_DATA}. "
+ "Note that some benchmark files are GB in size."
+ )
+ warn(message)
+elif not BENCHMARK_DATA.is_dir():
+ message = f"Not a directory: {BENCHMARK_DATA} ."
+ raise ValueError(message)
+
+# Manual flag to allow the rebuilding of synthetic data.
+# False forces a benchmark run to re-make all the data files.
+REUSE_DATA = True
+
+
+def run_function_elsewhere(func_to_run, *args, **kwargs):
+ """Run a given function using the :const:`DATA_GEN_PYTHON` executable.
+
+ This structure allows the function to be written natively.
+
+ Parameters
+ ----------
+ func_to_run : FunctionType
+ The function object to be run.
+ NOTE: the function must be completely self-contained, i.e. perform all
+ its own imports (within the target :const:`DATA_GEN_PYTHON`
+ environment).
+ *args : tuple, optional
+ Function call arguments. Must all be expressible as simple literals,
+ i.e. the ``repr`` must be a valid literal expression.
+ **kwargs: dict, optional
+ Function call keyword arguments. All values must be expressible as
+ simple literals (see ``*args``).
+
+ Returns
+ -------
+ str
+ The ``stdout`` from the run.
+
+ """
+ func_string = dedent(getsource(func_to_run))
+ func_string = func_string.replace("@staticmethod\n", "")
+ func_call_term_strings = [repr(arg) for arg in args]
+ func_call_term_strings += [f"{name}={repr(val)}" for name, val in kwargs.items()]
+ func_call_string = (
+ f"{func_to_run.__name__}(" + ",".join(func_call_term_strings) + ")"
+ )
+ python_string = "\n".join([func_string, func_call_string])
+ result = run(
+ [DATA_GEN_PYTHON, "-c", python_string], capture_output=True, check=True
+ )
+ return result.stdout
+
+
+@contextmanager
+def load_realised():
+ """Force NetCDF loading with realised arrays.
+
+ Since passing between data generation and benchmarking environments is via
+ file loading, but some benchmarks are only meaningful if starting with real
+ arrays.
+ """
+ from iris.fileformats.netcdf.loader import _get_cf_var_data as pre_patched
+
+ def patched(cf_var, filename):
+ return as_concrete_data(pre_patched(cf_var, filename))
+
+ netcdf._get_cf_var_data = patched
+ yield netcdf
+ netcdf._get_cf_var_data = pre_patched
diff --git a/benchmarks/benchmarks/generate_data/stock.py b/benchmarks/benchmarks/generate_data/stock.py
new file mode 100644
index 0000000000..04698e8ff5
--- /dev/null
+++ b/benchmarks/benchmarks/generate_data/stock.py
@@ -0,0 +1,183 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Wrappers for using :mod:`iris.tests.stock` methods for benchmarking.
+
+See :mod:`benchmarks.generate_data` for an explanation of this structure.
+"""
+
+from contextlib import nullcontext
+from hashlib import sha256
+import json
+from pathlib import Path
+
+import iris
+from iris import cube
+from iris.mesh import load_mesh
+
+from . import BENCHMARK_DATA, REUSE_DATA, load_realised, run_function_elsewhere
+
+
+def hash_args(*args, **kwargs):
+ """Convert arguments into a short hash - for preserving args in filenames."""
+ arg_string = str(args)
+ kwarg_string = json.dumps(kwargs)
+ full_string = arg_string + kwarg_string
+ return sha256(full_string.encode()).hexdigest()[:10]
+
+
+def _create_file__xios_common(func_name, **kwargs):
+ def _external(func_name_, temp_file_dir, **kwargs_):
+ from iris.tests.stock import netcdf
+
+ func = getattr(netcdf, func_name_)
+ print(func(temp_file_dir, **kwargs_), end="")
+
+ args_hash = hash_args(**kwargs)
+ save_path = (BENCHMARK_DATA / f"{func_name}_{args_hash}").with_suffix(".nc")
+ if not REUSE_DATA or not save_path.is_file():
+ # The xios functions take control of save location so need to move to
+ # a more specific name that allows reuse.
+ actual_path = run_function_elsewhere(
+ _external,
+ func_name_=func_name,
+ temp_file_dir=str(BENCHMARK_DATA),
+ **kwargs,
+ )
+ Path(actual_path.decode()).replace(save_path)
+ return save_path
+
+
+def create_file__xios_2d_face_half_levels(
+ temp_file_dir, dataset_name, n_faces=866, n_times=1
+):
+ """Create file wrapper for :meth:`iris.tests.stock.netcdf.create_file__xios_2d_face_half_levels`.
+
+ Have taken control of temp_file_dir
+
+ todo: is create_file__xios_2d_face_half_levels still appropriate now we can
+ properly save Mesh Cubes?
+ """
+ return _create_file__xios_common(
+ func_name="create_file__xios_2d_face_half_levels",
+ dataset_name=dataset_name,
+ n_faces=n_faces,
+ n_times=n_times,
+ )
+
+
+def create_file__xios_3d_face_half_levels(
+ temp_file_dir, dataset_name, n_faces=866, n_times=1, n_levels=38
+):
+ """Create file wrapper for :meth:`iris.tests.stock.netcdf.create_file__xios_3d_face_half_levels`.
+
+ Have taken control of temp_file_dir
+
+ todo: is create_file__xios_3d_face_half_levels still appropriate now we can
+ properly save Mesh Cubes?
+ """
+ return _create_file__xios_common(
+ func_name="create_file__xios_3d_face_half_levels",
+ dataset_name=dataset_name,
+ n_faces=n_faces,
+ n_times=n_times,
+ n_levels=n_levels,
+ )
+
+
+def sample_mesh(n_nodes=None, n_faces=None, n_edges=None, lazy_values=False):
+ """Sample mesh wrapper for :meth:iris.tests.stock.mesh.sample_mesh`."""
+
+ def _external(*args, **kwargs):
+ from iris.mesh import save_mesh
+ from iris.tests.stock.mesh import sample_mesh
+
+ save_path_ = kwargs.pop("save_path")
+ # Always saving, so laziness is irrelevant. Use lazy to save time.
+ kwargs["lazy_values"] = True
+ new_mesh = sample_mesh(*args, **kwargs)
+ save_mesh(new_mesh, save_path_)
+
+ arg_list = [n_nodes, n_faces, n_edges]
+ args_hash = hash_args(*arg_list)
+ save_path = (BENCHMARK_DATA / f"sample_mesh_{args_hash}").with_suffix(".nc")
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(_external, *arg_list, save_path=str(save_path))
+ if not lazy_values:
+ # Realise everything.
+ with load_realised():
+ mesh = load_mesh(str(save_path))
+ else:
+ mesh = load_mesh(str(save_path))
+ return mesh
+
+
+def sample_meshcoord(sample_mesh_kwargs=None, location="face", axis="x"):
+ """Sample meshcoord wrapper for :meth:`iris.tests.stock.mesh.sample_meshcoord`.
+
+ Parameters deviate from the original as cannot pass a
+ :class:`iris.mesh.Mesh to the separate Python instance - must
+ instead generate the Mesh as well.
+
+ MeshCoords cannot be saved to file, so the _external method saves the
+ MeshCoord's Mesh, then the original Python instance loads in that Mesh and
+ regenerates the MeshCoord from there.
+ """
+
+ def _external(sample_mesh_kwargs_, save_path_):
+ from iris.mesh import save_mesh
+ from iris.tests.stock.mesh import sample_mesh, sample_meshcoord
+
+ if sample_mesh_kwargs_:
+ input_mesh = sample_mesh(**sample_mesh_kwargs_)
+ else:
+ input_mesh = None
+ # Don't parse the location or axis arguments - only saving the Mesh at
+ # this stage.
+ new_meshcoord = sample_meshcoord(mesh=input_mesh)
+ save_mesh(new_meshcoord.mesh, save_path_)
+
+ args_hash = hash_args(**sample_mesh_kwargs)
+ save_path = (BENCHMARK_DATA / f"sample_mesh_coord_{args_hash}").with_suffix(".nc")
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(
+ _external,
+ sample_mesh_kwargs_=sample_mesh_kwargs,
+ save_path_=str(save_path),
+ )
+ with load_realised():
+ source_mesh = load_mesh(str(save_path))
+ # Regenerate MeshCoord from its Mesh, which we saved.
+ return source_mesh.to_MeshCoord(location=location, axis=axis)
+
+
+def realistic_4d_w_everything(w_mesh=False, lazy=False) -> iris.cube.Cube:
+ """Run :func:`iris.tests.stock.realistic_4d_w_everything` in ``DATA_GEN_PYTHON``.
+
+ Parameters
+ ----------
+ w_mesh : bool
+ See :func:`iris.tests.stock.realistic_4d_w_everything` for details.
+ lazy : bool
+ If True, the Cube will be returned with all arrays as they would
+ normally be loaded from file (i.e. most will still be lazy Dask
+ arrays). If False, all arrays will be realised NumPy arrays.
+
+ """
+
+ def _external(w_mesh_: str, save_path_: str):
+ import iris
+ from iris.tests.stock import realistic_4d_w_everything
+
+ cube = realistic_4d_w_everything(w_mesh=bool(w_mesh_))
+ iris.save(cube, save_path_)
+
+ save_path = (BENCHMARK_DATA / f"realistic_4d_w_everything_{w_mesh}").with_suffix(
+ ".nc"
+ )
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(_external, w_mesh_=w_mesh, save_path_=str(save_path))
+ context = nullcontext() if lazy else load_realised()
+ with context:
+ return iris.load_cube(save_path, "air_potential_temperature")
diff --git a/benchmarks/benchmarks/generate_data/ugrid.py b/benchmarks/benchmarks/generate_data/ugrid.py
new file mode 100644
index 0000000000..2cef4752ee
--- /dev/null
+++ b/benchmarks/benchmarks/generate_data/ugrid.py
@@ -0,0 +1,190 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Scripts for generating supporting data for UGRID-related benchmarking."""
+
+from iris import load_cube as iris_loadcube
+
+from . import BENCHMARK_DATA, REUSE_DATA, load_realised, run_function_elsewhere
+from .stock import (
+ create_file__xios_2d_face_half_levels,
+ create_file__xios_3d_face_half_levels,
+)
+
+
+def generate_cube_like_2d_cubesphere(n_cube: int, with_mesh: bool, output_path: str):
+ """Construct and save to file an LFRIc cubesphere-like cube.
+
+ Construct and save to file an LFRIc cubesphere-like cube for a given
+ cubesphere size, *or* a simpler structured (UM-like) cube of equivalent
+ size.
+
+ NOTE: this function is *NEVER* called from within this actual package.
+ Instead, it is to be called via benchmarks.remote_data_generation,
+ so that it can use up-to-date facilities, independent of the ASV controlled
+ environment which contains the "Iris commit under test".
+
+ This means:
+
+ * it must be completely self-contained : i.e. it includes all its
+ own imports, and saves results to an output file.
+
+ """
+ from iris import save
+ from iris.tests.stock.mesh import sample_mesh, sample_mesh_cube
+
+ n_face_nodes = n_cube * n_cube
+ n_faces = 6 * n_face_nodes
+
+ # Set n_nodes=n_faces and n_edges=2*n_faces
+ # : Not exact, but similar to a 'real' cubesphere.
+ n_nodes = n_faces
+ n_edges = 2 * n_faces
+ if with_mesh:
+ mesh = sample_mesh(
+ n_nodes=n_nodes, n_faces=n_faces, n_edges=n_edges, lazy_values=True
+ )
+ cube = sample_mesh_cube(mesh=mesh, n_z=1)
+ else:
+ cube = sample_mesh_cube(nomesh_faces=n_faces, n_z=1)
+
+ # Strip off the 'extra' aux-coord mapping the mesh, which sample-cube adds
+ # but which we don't want.
+ cube.remove_coord("mesh_face_aux")
+
+ # Save the result to a named file.
+ save(cube, output_path)
+
+
+def make_cube_like_2d_cubesphere(n_cube: int, with_mesh: bool):
+ """Generate an LFRIc cubesphere-like cube.
+
+ Generate an LFRIc cubesphere-like cube for a given cubesphere size,
+ *or* a simpler structured (UM-like) cube of equivalent size.
+
+ All the cube data, coords and mesh content are LAZY, and produced without
+ allocating large real arrays (to allow peak-memory testing).
+
+ NOTE: the actual cube generation is done in a stable Iris environment via
+ benchmarks.remote_data_generation, so it is all channeled via cached netcdf
+ files in our common testdata directory.
+
+ """
+ identifying_filename = f"cube_like_2d_cubesphere_C{n_cube}_Mesh={with_mesh}.nc"
+ filepath = BENCHMARK_DATA / identifying_filename
+ if not filepath.exists():
+ # Create the required testfile, by running the generation code remotely
+ # in a 'fixed' python environment.
+ run_function_elsewhere(
+ generate_cube_like_2d_cubesphere,
+ n_cube,
+ with_mesh=with_mesh,
+ output_path=str(filepath),
+ )
+
+ # File now *should* definitely exist: content is simply the desired cube.
+ cube = iris_loadcube(str(filepath))
+
+ # Ensure correct laziness.
+ _ = cube.data
+ for coord in cube.coords(mesh_coords=False):
+ assert not coord.has_lazy_points()
+ assert not coord.has_lazy_bounds()
+ if cube.mesh:
+ for coord in cube.mesh.coords():
+ assert coord.has_lazy_points()
+ for conn in cube.mesh.connectivities():
+ assert conn.has_lazy_indices()
+
+ return cube
+
+
+def make_cube_like_umfield(xy_dims):
+ """Create a "UM-like" cube with lazy content, for save performance testing.
+
+ Roughly equivalent to a single current UM cube, to be compared with
+ a "make_cube_like_2d_cubesphere(n_cube=_N_CUBESPHERE_UM_EQUIVALENT)"
+ (see below).
+
+ Note: probably a bit over-simplified, as there is no time coord, but that
+ is probably equally true of our LFRic-style synthetic data.
+
+ Parameters
+ ----------
+ xy_dims : 2-tuple
+ Set the horizontal dimensions = n-lats, n-lons.
+
+ """
+
+ def _external(xy_dims_, save_path_):
+ from dask import array as da
+ import numpy as np
+
+ from iris import save
+ from iris.coords import DimCoord
+ from iris.cube import Cube
+
+ nz, ny, nx = (1,) + xy_dims_
+
+ # Base data : Note this is float32 not float64 like LFRic/XIOS outputs.
+ lazy_data = da.zeros((nz, ny, nx), dtype=np.float32)
+ cube = Cube(lazy_data, long_name="structured_phenom")
+
+ # Add simple dim coords also.
+ z_dimco = DimCoord(np.arange(nz), long_name="level", units=1)
+ y_dimco = DimCoord(
+ np.linspace(-90.0, 90.0, ny),
+ standard_name="latitude",
+ units="degrees",
+ )
+ x_dimco = DimCoord(
+ np.linspace(-180.0, 180.0, nx),
+ standard_name="longitude",
+ units="degrees",
+ )
+ for idim, co in enumerate([z_dimco, y_dimco, x_dimco]):
+ cube.add_dim_coord(co, idim)
+
+ save(cube, save_path_)
+
+ save_path = (BENCHMARK_DATA / f"make_cube_like_umfield_{xy_dims}").with_suffix(
+ ".nc"
+ )
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(_external, xy_dims, str(save_path))
+ with load_realised():
+ cube = iris_loadcube(str(save_path))
+
+ return cube
+
+
+def make_cubesphere_testfile(c_size, n_levels=0, n_times=1):
+ """Build a C cubesphere testfile in a given directory.
+
+ Build a C cubesphere testfile in a given directory, with a standard naming.
+ If n_levels > 0 specified: 3d file with the specified number of levels.
+ Return the file path.
+
+ TODO: is create_file__xios... still appropriate now we can properly save Mesh Cubes?
+
+ """
+ n_faces = 6 * c_size * c_size
+ stem_name = f"mesh_cubesphere_C{c_size}_t{n_times}"
+ kwargs = dict(
+ temp_file_dir=None,
+ dataset_name=stem_name, # N.B. function adds the ".nc" extension
+ n_times=n_times,
+ n_faces=n_faces,
+ )
+
+ three_d = n_levels > 0
+ if three_d:
+ kwargs["n_levels"] = n_levels
+ kwargs["dataset_name"] += f"_{n_levels}levels"
+ func = create_file__xios_3d_face_half_levels
+ else:
+ func = create_file__xios_2d_face_half_levels
+
+ file_path = func(**kwargs)
+ return file_path
diff --git a/benchmarks/benchmarks/generate_data/um_files.py b/benchmarks/benchmarks/generate_data/um_files.py
new file mode 100644
index 0000000000..40bf83e79c
--- /dev/null
+++ b/benchmarks/benchmarks/generate_data/um_files.py
@@ -0,0 +1,188 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Generate FF, PP and NetCDF files based on a minimal synthetic FF file.
+
+NOTE: uses the Mule package, so depends on an environment with Mule installed.
+"""
+
+
+def _create_um_files(
+ len_x: int, len_y: int, len_z: int, len_t: int, compress, save_paths: dict
+) -> None:
+ """Generate an FF object of given shape and compression, save to FF/PP/NetCDF.
+
+ This is run externally
+ (:func:`benchmarks.generate_data.run_function_elsewhere`), so all imports
+ are self-contained and input parameters are simple types.
+ """
+ from copy import deepcopy
+ from datetime import datetime
+ from tempfile import NamedTemporaryFile
+
+ from mule import ArrayDataProvider, Field3, FieldsFile
+ from mule.pp import fields_to_pp_file
+ import numpy as np
+
+ from iris import load_cube
+ from iris import save as save_cube
+
+ template = {
+ "fixed_length_header": {"dataset_type": 3, "grid_staggering": 3},
+ "integer_constants": {
+ "num_p_levels": len_z,
+ "num_cols": len_x,
+ "num_rows": len_y,
+ },
+ "real_constants": {},
+ "level_dependent_constants": {"dims": (len_z + 1, None)},
+ }
+ new_ff = FieldsFile.from_template(deepcopy(template))
+
+ data_array = np.arange(len_x * len_y).reshape(len_x, len_y)
+ array_provider = ArrayDataProvider(data_array)
+
+ def add_field(level_: int, time_step_: int) -> None:
+ """Add a minimal field to the new :class:`~mule.FieldsFile`.
+
+ Includes the minimum information to allow Mule saving and Iris
+ loading, as well as incrementation for vertical levels and time
+ steps to allow generation of z and t dimensions.
+ """
+ new_field = Field3.empty()
+ # To correspond to the header-release 3 class used.
+ new_field.lbrel = 3
+ # Mule uses the first element of the lookup to test for
+ # unpopulated fields (and skips them), so the first element should
+ # be set to something. The year will do.
+ new_field.raw[1] = datetime.now().year
+
+ # Horizontal.
+ new_field.lbcode = 1
+ new_field.lbnpt = len_x
+ new_field.lbrow = len_y
+ new_field.bdx = new_ff.real_constants.col_spacing
+ new_field.bdy = new_ff.real_constants.row_spacing
+ new_field.bzx = new_ff.real_constants.start_lon - 0.5 * new_field.bdx
+ new_field.bzy = new_ff.real_constants.start_lat - 0.5 * new_field.bdy
+
+ # Hemisphere.
+ new_field.lbhem = 32
+ # Processing.
+ new_field.lbproc = 0
+
+ # Vertical.
+ # Hybrid height values by simulating sequences similar to those in a
+ # theta file.
+ new_field.lbvc = 65
+ if level_ == 0:
+ new_field.lblev = 9999
+ else:
+ new_field.lblev = level_
+
+ level_1 = level_ + 1
+ six_rec = 20 / 3
+ three_rec = six_rec / 2
+
+ new_field.blev = level_1**2 * six_rec - six_rec
+ new_field.brsvd1 = level_1**2 * six_rec + (six_rec * level_1) - three_rec
+
+ brsvd2_simulated = np.linspace(0.995, 0, len_z)
+ shift = min(len_z, 2)
+ bhrlev_simulated = np.concatenate([np.ones(shift), brsvd2_simulated[:-shift]])
+ new_field.brsvd2 = brsvd2_simulated[level_]
+ new_field.bhrlev = bhrlev_simulated[level_]
+
+ # Time.
+ new_field.lbtim = 11
+
+ new_field.lbyr = time_step_
+ for attr_name in ["lbmon", "lbdat", "lbhr", "lbmin", "lbsec"]:
+ setattr(new_field, attr_name, 0)
+
+ new_field.lbyrd = time_step_ + 1
+ for attr_name in ["lbmond", "lbdatd", "lbhrd", "lbmind", "lbsecd"]:
+ setattr(new_field, attr_name, 0)
+
+ # Data and packing.
+ new_field.lbuser1 = 1
+ new_field.lbpack = int(compress)
+ new_field.bacc = 0
+ new_field.bmdi = -1
+ new_field.lbext = 0
+ new_field.set_data_provider(array_provider)
+
+ new_ff.fields.append(new_field)
+
+ for time_step in range(len_t):
+ for level in range(len_z):
+ add_field(level, time_step + 1)
+
+ ff_path = save_paths.get("FF", None)
+ pp_path = save_paths.get("PP", None)
+ nc_path = save_paths.get("NetCDF", None)
+
+ if ff_path:
+ new_ff.to_file(ff_path)
+ if pp_path:
+ fields_to_pp_file(str(pp_path), new_ff.fields)
+ if nc_path:
+ temp_ff_path = None
+ # Need an Iris Cube from the FF content.
+ if ff_path:
+ # Use the existing file.
+ ff_cube = load_cube(ff_path)
+ else:
+ # Make a temporary file.
+ temp_ff_path = NamedTemporaryFile()
+ new_ff.to_file(temp_ff_path.name)
+ ff_cube = load_cube(temp_ff_path.name)
+
+ save_cube(ff_cube, nc_path, zlib=compress)
+ if temp_ff_path:
+ temp_ff_path.close()
+
+
+FILE_EXTENSIONS = {"FF": "", "PP": ".pp", "NetCDF": ".nc"}
+
+
+def create_um_files(
+ len_x: int,
+ len_y: int,
+ len_z: int,
+ len_t: int,
+ compress: bool,
+ file_types: list,
+) -> dict:
+ """Generate FF-based FF / PP / NetCDF files with specified shape and compression.
+
+ All files representing a given shape are saved in a dedicated directory. A
+ dictionary of the saved paths is returned.
+
+ If the required files exist, they are re-used, unless
+ :const:`benchmarks.REUSE_DATA` is ``False``.
+ """
+ # Self contained imports to avoid linting confusion with _create_um_files().
+ from . import BENCHMARK_DATA, REUSE_DATA, run_function_elsewhere
+
+ save_name_sections = ["UM", len_x, len_y, len_z, len_t]
+ save_name = "_".join(str(section) for section in save_name_sections)
+ save_dir = BENCHMARK_DATA / save_name
+ if not save_dir.is_dir():
+ save_dir.mkdir(parents=True)
+
+ save_paths = {}
+ files_exist = True
+ for file_type in file_types:
+ file_ext = FILE_EXTENSIONS[file_type]
+ save_path = (save_dir / f"{compress}").with_suffix(file_ext)
+ files_exist = files_exist and save_path.is_file()
+ save_paths[file_type] = str(save_path)
+
+ if not REUSE_DATA or not files_exist:
+ _ = run_function_elsewhere(
+ _create_um_files, len_x, len_y, len_z, len_t, compress, save_paths
+ )
+
+ return save_paths
diff --git a/benchmarks/benchmarks/import_iris.py b/benchmarks/benchmarks/import_iris.py
new file mode 100644
index 0000000000..ff5f19e421
--- /dev/null
+++ b/benchmarks/benchmarks/import_iris.py
@@ -0,0 +1,278 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+
+"""Import iris benchmarking."""
+
+from importlib import import_module, reload
+
+################
+# Prepare info for reset_colormaps:
+
+# Import and capture colormaps.
+from matplotlib import colormaps # isort:skip
+
+_COLORMAPS_ORIG = set(colormaps)
+
+# Import iris.palette, which modifies colormaps.
+import iris.palette
+
+# Derive which colormaps have been added by iris.palette.
+_COLORMAPS_MOD = set(colormaps)
+COLORMAPS_EXTRA = _COLORMAPS_MOD - _COLORMAPS_ORIG
+
+# Touch iris.palette to prevent linters complaining.
+_ = iris.palette
+
+################
+
+
+class Iris:
+ @staticmethod
+ def _import(module_name, reset_colormaps=False):
+ """Have experimented with adding sleep() commands into the imported modules.
+
+ The results reveal:
+
+ ASV avoids invoking `import x` if nothing gets called in the
+ benchmark (some imports were timed, but only those where calls
+ happened during import).
+
+ Using reload() is not identical to importing, but does produce
+ results that are very close to expected import times, so this is fine
+ for monitoring for regressions.
+ It is also ideal for accurate repetitions, without the need to mess
+ with the ASV `number` attribute etc, since cached imports are not used
+ and the repetitions are therefore no faster than the first run.
+ """
+ mod = import_module(module_name)
+
+ if reset_colormaps:
+ # Needed because reload() will attempt to register new colormaps a
+ # second time, which errors by default.
+ for cm_name in COLORMAPS_EXTRA:
+ colormaps.unregister(cm_name)
+
+ reload(mod)
+
+ def time_iris(self):
+ self._import("iris")
+
+ def time__concatenate(self):
+ self._import("iris._concatenate")
+
+ def time__constraints(self):
+ self._import("iris._constraints")
+
+ def time__data_manager(self):
+ self._import("iris._data_manager")
+
+ def time__deprecation(self):
+ self._import("iris._deprecation")
+
+ def time__lazy_data(self):
+ self._import("iris._lazy_data")
+
+ def time__merge(self):
+ self._import("iris._merge")
+
+ def time__representation(self):
+ self._import("iris._representation")
+
+ def time_analysis(self):
+ self._import("iris.analysis")
+
+ def time_analysis__area_weighted(self):
+ self._import("iris.analysis._area_weighted")
+
+ def time_analysis__grid_angles(self):
+ self._import("iris.analysis._grid_angles")
+
+ def time_analysis__interpolation(self):
+ self._import("iris.analysis._interpolation")
+
+ def time_analysis__regrid(self):
+ self._import("iris.analysis._regrid")
+
+ def time_analysis__scipy_interpolate(self):
+ self._import("iris.analysis._scipy_interpolate")
+
+ def time_analysis_calculus(self):
+ self._import("iris.analysis.calculus")
+
+ def time_analysis_cartography(self):
+ self._import("iris.analysis.cartography")
+
+ def time_analysis_geomerty(self):
+ self._import("iris.analysis.geometry")
+
+ def time_analysis_maths(self):
+ self._import("iris.analysis.maths")
+
+ def time_analysis_stats(self):
+ self._import("iris.analysis.stats")
+
+ def time_analysis_trajectory(self):
+ self._import("iris.analysis.trajectory")
+
+ def time_aux_factory(self):
+ self._import("iris.aux_factory")
+
+ def time_common(self):
+ self._import("iris.common")
+
+ def time_common_lenient(self):
+ self._import("iris.common.lenient")
+
+ def time_common_metadata(self):
+ self._import("iris.common.metadata")
+
+ def time_common_mixin(self):
+ self._import("iris.common.mixin")
+
+ def time_common_resolve(self):
+ self._import("iris.common.resolve")
+
+ def time_config(self):
+ self._import("iris.config")
+
+ def time_coord_categorisation(self):
+ self._import("iris.coord_categorisation")
+
+ def time_coord_systems(self):
+ self._import("iris.coord_systems")
+
+ def time_coords(self):
+ self._import("iris.coords")
+
+ def time_cube(self):
+ self._import("iris.cube")
+
+ def time_exceptions(self):
+ self._import("iris.exceptions")
+
+ def time_experimental(self):
+ self._import("iris.experimental")
+
+ def time_fileformats(self):
+ self._import("iris.fileformats")
+
+ def time_fileformats__ff(self):
+ self._import("iris.fileformats._ff")
+
+ def time_fileformats__ff_cross_references(self):
+ self._import("iris.fileformats._ff_cross_references")
+
+ def time_fileformats__pp_lbproc_pairs(self):
+ self._import("iris.fileformats._pp_lbproc_pairs")
+
+ def time_fileformats_structured_array_identification(self):
+ self._import("iris.fileformats._structured_array_identification")
+
+ def time_fileformats_abf(self):
+ self._import("iris.fileformats.abf")
+
+ def time_fileformats_cf(self):
+ self._import("iris.fileformats.cf")
+
+ def time_fileformats_dot(self):
+ self._import("iris.fileformats.dot")
+
+ def time_fileformats_name(self):
+ self._import("iris.fileformats.name")
+
+ def time_fileformats_name_loaders(self):
+ self._import("iris.fileformats.name_loaders")
+
+ def time_fileformats_netcdf(self):
+ self._import("iris.fileformats.netcdf")
+
+ def time_fileformats_nimrod(self):
+ self._import("iris.fileformats.nimrod")
+
+ def time_fileformats_nimrod_load_rules(self):
+ self._import("iris.fileformats.nimrod_load_rules")
+
+ def time_fileformats_pp(self):
+ self._import("iris.fileformats.pp")
+
+ def time_fileformats_pp_load_rules(self):
+ self._import("iris.fileformats.pp_load_rules")
+
+ def time_fileformats_pp_save_rules(self):
+ self._import("iris.fileformats.pp_save_rules")
+
+ def time_fileformats_rules(self):
+ self._import("iris.fileformats.rules")
+
+ def time_fileformats_um(self):
+ self._import("iris.fileformats.um")
+
+ def time_fileformats_um__fast_load(self):
+ self._import("iris.fileformats.um._fast_load")
+
+ def time_fileformats_um__fast_load_structured_fields(self):
+ self._import("iris.fileformats.um._fast_load_structured_fields")
+
+ def time_fileformats_um__ff_replacement(self):
+ self._import("iris.fileformats.um._ff_replacement")
+
+ def time_fileformats_um__optimal_array_structuring(self):
+ self._import("iris.fileformats.um._optimal_array_structuring")
+
+ def time_fileformats_um_cf_map(self):
+ self._import("iris.fileformats.um_cf_map")
+
+ def time_io(self):
+ self._import("iris.io")
+
+ def time_io_format_picker(self):
+ self._import("iris.io.format_picker")
+
+ def time_iterate(self):
+ self._import("iris.iterate")
+
+ def time_palette(self):
+ self._import("iris.palette", reset_colormaps=True)
+
+ def time_plot(self):
+ self._import("iris.plot")
+
+ def time_quickplot(self):
+ self._import("iris.quickplot")
+
+ def time_std_names(self):
+ self._import("iris.std_names")
+
+ def time_symbols(self):
+ self._import("iris.symbols")
+
+ def time_tests(self):
+ self._import("iris.tests")
+
+ def time_time(self):
+ self._import("iris.time")
+
+ def time_util(self):
+ self._import("iris.util")
+
+ # third-party imports
+
+ def time_third_party_cartopy(self):
+ self._import("cartopy")
+
+ def time_third_party_cf_units(self):
+ self._import("cf_units")
+
+ def time_third_party_cftime(self):
+ self._import("cftime")
+
+ def time_third_party_matplotlib(self):
+ self._import("matplotlib")
+
+ def time_third_party_numpy(self):
+ self._import("numpy")
+
+ def time_third_party_scipy(self):
+ self._import("scipy")
diff --git a/benchmarks/benchmarks/iterate.py b/benchmarks/benchmarks/iterate.py
new file mode 100644
index 0000000000..664bcf8ba2
--- /dev/null
+++ b/benchmarks/benchmarks/iterate.py
@@ -0,0 +1,26 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Iterate benchmark tests."""
+
+import numpy as np
+
+from iris import coords, cube, iterate
+
+
+class IZip:
+ def setup(self):
+ data_2d = np.zeros((1000,) * 2)
+ data_1d = data_2d[0]
+ local_cube = cube.Cube(data_2d)
+ coord_a = coords.AuxCoord(points=data_1d, long_name="a")
+ coord_b = coords.AuxCoord(points=data_1d, long_name="b")
+ self.coord_names = (coord.long_name for coord in (coord_a, coord_b))
+
+ local_cube.add_aux_coord(coord_a, 0)
+ local_cube.add_aux_coord(coord_b, 1)
+ self.cube = local_cube
+
+ def time_izip(self):
+ iterate.izip(self.cube, coords=self.coord_names)
diff --git a/benchmarks/benchmarks/load/__init__.py b/benchmarks/benchmarks/load/__init__.py
new file mode 100644
index 0000000000..a4dfb40d19
--- /dev/null
+++ b/benchmarks/benchmarks/load/__init__.py
@@ -0,0 +1,169 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""File loading benchmark tests."""
+
+from iris import AttributeConstraint, Constraint, load, load_cube
+from iris.cube import Cube
+from iris.fileformats.um import structured_um_loading
+
+from ..generate_data import BENCHMARK_DATA, REUSE_DATA, run_function_elsewhere
+from ..generate_data.um_files import create_um_files
+
+
+class LoadAndRealise:
+ # For data generation
+ timeout = 600.0
+ params = (
+ [(50, 50, 2), (1280, 960, 5), (2, 2, 1000)],
+ [False, True],
+ ["FF", "PP", "NetCDF"],
+ )
+ param_names = ["xyz", "compressed", "file_format"]
+
+ def setup_cache(self) -> dict:
+ file_type_args = self.params[2]
+ file_path_dict: dict[tuple[int, int, int], dict[bool, dict[str, str]]] = {}
+ for xyz in self.params[0]:
+ file_path_dict[xyz] = {}
+ x, y, z = xyz
+ for compress in self.params[1]:
+ file_path_dict[xyz][compress] = create_um_files(
+ x, y, z, 1, compress, file_type_args
+ )
+ return file_path_dict
+
+ def setup(
+ self,
+ file_path_dict: dict,
+ xyz: tuple,
+ compress: bool,
+ file_format: str,
+ ) -> None:
+ self.file_path = file_path_dict[xyz][compress][file_format]
+ self.cube = self.load()
+
+ def load(self) -> Cube:
+ return load_cube(self.file_path)
+
+ def time_load(self, _, __, ___, ____) -> None:
+ _ = self.load()
+
+ def time_realise(self, _, __, ___, ____) -> None:
+ # Don't touch cube.data - permanent realisation plays badly with ASV's
+ # re-run strategy.
+ assert self.cube.has_lazy_data()
+ self.cube.core_data().compute()
+
+
+class STASHConstraint:
+ # xyz sizes mimic LoadAndRealise to maximise file reuse.
+ params = ([(2, 2, 2), (1280, 960, 5), (2, 2, 1000)], ["FF", "PP"])
+ param_names = ["xyz", "file_format"]
+
+ def setup_cache(self) -> dict:
+ file_type_args = self.params[1]
+ file_path_dict = {}
+ for xyz in self.params[0]:
+ x, y, z = xyz
+ file_path_dict[xyz] = create_um_files(x, y, z, 1, False, file_type_args)
+ return file_path_dict
+
+ def setup(self, file_path_dict: dict, xyz: tuple, file_format: str) -> None:
+ self.file_path = file_path_dict[xyz][file_format]
+
+ def time_stash_constraint(self, _, __, ___) -> None:
+ _ = load_cube(self.file_path, AttributeConstraint(STASH="m??s??i901"))
+
+
+class TimeConstraint:
+ params = ([3, 20], ["FF", "PP", "NetCDF"])
+ param_names = ["time_dim_len", "file_format"]
+
+ def setup_cache(self) -> dict:
+ file_type_args = self.params[1]
+ file_path_dict = {}
+ for time_dim_len in self.params[0]:
+ file_path_dict[time_dim_len] = create_um_files(
+ 20, 20, 5, time_dim_len, False, file_type_args
+ )
+ return file_path_dict
+
+ def setup(self, file_path_dict: dict, time_dim_len: int, file_format: str) -> None:
+ self.file_path = file_path_dict[time_dim_len][file_format]
+ self.time_constr = Constraint(time=lambda cell: cell.point.year < 3)
+
+ def time_time_constraint(self, _, __, ___) -> None:
+ _ = load_cube(self.file_path, self.time_constr)
+
+
+class ManyVars:
+ FILE_PATH = BENCHMARK_DATA / "many_var_file.nc"
+
+ @staticmethod
+ def _create_file(save_path: str) -> None:
+ """Run externally - everything must be self-contained."""
+ import numpy as np
+
+ from iris import save
+ from iris.coords import AuxCoord
+ from iris.cube import Cube
+
+ data_len = 8
+ data = np.arange(data_len)
+ cube = Cube(data, units="unknown")
+ extra_vars = 80
+ names = ["coord_" + str(i) for i in range(extra_vars)]
+ for name in names:
+ coord = AuxCoord(data, long_name=name, units="unknown")
+ cube.add_aux_coord(coord, 0)
+ save(cube, save_path)
+
+ def setup_cache(self) -> None:
+ if not REUSE_DATA or not self.FILE_PATH.is_file():
+ # See :mod:`benchmarks.generate_data` docstring for full explanation.
+ _ = run_function_elsewhere(
+ self._create_file,
+ str(self.FILE_PATH),
+ )
+
+ def time_many_var_load(self) -> None:
+ _ = load(str(self.FILE_PATH))
+
+
+class StructuredFF:
+ """Test structured loading of a large-ish fieldsfile.
+
+ Structured load of the larger size should show benefit over standard load,
+ avoiding the cost of merging.
+ """
+
+ params = ([(2, 2, 2), (1280, 960, 5), (2, 2, 1000)], [False, True])
+ param_names = ["xyz", "structured_loading"]
+
+ def setup_cache(self) -> dict:
+ file_path_dict = {}
+ for xyz in self.params[0]:
+ x, y, z = xyz
+ file_path_dict[xyz] = create_um_files(x, y, z, 1, False, ["FF"])
+ return file_path_dict
+
+ def setup(self, file_path_dict, xyz, structured_load):
+ self.file_path = file_path_dict[xyz]["FF"]
+ self.structured_load = structured_load
+
+ def load(self):
+ """Load the whole file (in fact there is only 1 cube)."""
+
+ def _load():
+ _ = load(self.file_path)
+
+ if self.structured_load:
+ with structured_um_loading():
+ _load()
+ else:
+ _load()
+
+ def time_structured_load(self, _, __, ___):
+ self.load()
diff --git a/benchmarks/benchmarks/load/ugrid.py b/benchmarks/benchmarks/load/ugrid.py
new file mode 100644
index 0000000000..5ad0086ef3
--- /dev/null
+++ b/benchmarks/benchmarks/load/ugrid.py
@@ -0,0 +1,115 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Mesh data loading benchmark tests."""
+
+from iris import load_cube as iris_load_cube
+from iris.mesh import load_mesh as iris_load_mesh
+
+from ..generate_data.stock import create_file__xios_2d_face_half_levels
+
+
+def synthetic_data(**kwargs):
+ # Ensure all uses of the synthetic data function use the common directory.
+ # File location is controlled by :mod:`generate_data`, hence temp_file_dir=None.
+ return create_file__xios_2d_face_half_levels(temp_file_dir=None, **kwargs)
+
+
+def load_cube(*args, **kwargs):
+ return iris_load_cube(*args, **kwargs)
+
+
+def load_mesh(*args, **kwargs):
+ return iris_load_mesh(*args, **kwargs)
+
+
+class BasicLoading:
+ params = [1, int(2e5)]
+ param_names = ["number of faces"]
+
+ def setup_common(self, **kwargs):
+ self.data_path = synthetic_data(**kwargs)
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=args[0])
+
+ def time_load_file(self, *args):
+ _ = load_cube(str(self.data_path))
+
+ def time_load_mesh(self, *args):
+ _ = load_mesh(str(self.data_path))
+
+
+class BasicLoadingTime(BasicLoading):
+ """Same as BasicLoading, but scaling over a time series - an unlimited dimension."""
+
+ # NOTE iris#4834 - careful how big the time dimension is (time dimension
+ # is UNLIMITED).
+
+ param_names = ["number of time steps"]
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=1, n_times=args[0])
+
+
+class DataRealisation:
+ # Prevent repeat runs between setup() runs - data won't be lazy after 1st.
+ number = 1
+ # Compensate for reduced certainty by increasing number of repeats.
+ repeat = (10, 10, 10.0)
+ # Prevent ASV running its warmup, which ignores `number` and would
+ # therefore get a false idea of typical run time since the data would stop
+ # being lazy.
+ warmup_time = 0.0
+ timeout = 300.0
+
+ params = [int(1e4), int(2e5)]
+ param_names = ["number of faces"]
+
+ def setup_common(self, **kwargs):
+ data_path = synthetic_data(**kwargs)
+ self.cube = load_cube(str(data_path))
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Realisation", n_faces=args[0])
+
+ def time_realise_data(self, *args):
+ assert self.cube.has_lazy_data()
+ _ = self.cube.data[0]
+
+
+class DataRealisationTime(DataRealisation):
+ """Same as DataRealisation, but scaling over a time series - an unlimited dimension."""
+
+ param_names = ["number of time steps"]
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Realisation", n_faces=1, n_times=args[0])
+
+
+class Callback:
+ params = [1, int(2e5)]
+ param_names = ["number of faces"]
+
+ def setup_common(self, **kwargs):
+ def callback(cube, field, filename):
+ return cube[::2]
+
+ self.data_path = synthetic_data(**kwargs)
+ self.callback = callback
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=args[0])
+
+ def time_load_file_callback(self, *args):
+ _ = load_cube(str(self.data_path), callback=self.callback)
+
+
+class CallbackTime(Callback):
+ """Same as Callback, but scaling over a time series - an unlimited dimension."""
+
+ param_names = ["number of time steps"]
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=1, n_times=args[0])
diff --git a/benchmarks/benchmarks/merge_concat.py b/benchmarks/benchmarks/merge_concat.py
new file mode 100644
index 0000000000..2d3738683a
--- /dev/null
+++ b/benchmarks/benchmarks/merge_concat.py
@@ -0,0 +1,72 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmarks relating to :meth:`iris.cube.CubeList.merge` and ``concatenate``."""
+
+import warnings
+
+import numpy as np
+
+from iris.cube import CubeList
+from iris.warnings import IrisVagueMetadataWarning
+
+from .generate_data.stock import realistic_4d_w_everything
+
+
+class Merge:
+ # TODO: Improve coverage.
+
+ cube_list: CubeList
+
+ def setup(self):
+ source_cube = realistic_4d_w_everything()
+
+ # Merge does not yet fully support cell measures and ancillary variables.
+ for cm in source_cube.cell_measures():
+ source_cube.remove_cell_measure(cm)
+ for av in source_cube.ancillary_variables():
+ source_cube.remove_ancillary_variable(av)
+
+ second_cube = source_cube.copy()
+ scalar_coord = second_cube.coords(dimensions=[])[0]
+ scalar_coord.points = scalar_coord.points + 1
+ self.cube_list = CubeList([source_cube, second_cube])
+
+ def time_merge(self):
+ _ = self.cube_list.merge_cube()
+
+ def tracemalloc_merge(self):
+ _ = self.cube_list.merge_cube()
+
+ tracemalloc_merge.number = 3 # type: ignore[attr-defined]
+
+
+class Concatenate:
+ # TODO: Improve coverage.
+
+ cube_list: CubeList
+
+ params = [[False, True]]
+ param_names = ["Lazy operations"]
+
+ def setup(self, lazy_run: bool):
+ warnings.filterwarnings("ignore", message="Ignoring a datum")
+ warnings.filterwarnings("ignore", category=IrisVagueMetadataWarning)
+ source_cube = realistic_4d_w_everything(lazy=lazy_run)
+ self.cube_list = CubeList([source_cube])
+ for _ in range(24):
+ next_cube = self.cube_list[-1].copy()
+ first_dim_coord = next_cube.coord(dimensions=0, dim_coords=True)
+ first_dim_coord.points = (
+ first_dim_coord.points + np.ptp(first_dim_coord.points) + 1
+ )
+ self.cube_list.append(next_cube)
+
+ def time_concatenate(self, _):
+ _ = self.cube_list.concatenate_cube()
+
+ def tracemalloc_concatenate(self, _):
+ _ = self.cube_list.concatenate_cube()
+
+ tracemalloc_concatenate.number = 3 # type: ignore[attr-defined]
diff --git a/benchmarks/benchmarks/mesh/__init__.py b/benchmarks/benchmarks/mesh/__init__.py
new file mode 100644
index 0000000000..9cc76ce0aa
--- /dev/null
+++ b/benchmarks/benchmarks/mesh/__init__.py
@@ -0,0 +1,5 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmark tests for the iris.mesh module."""
diff --git a/benchmarks/benchmarks/mesh/utils/__init__.py b/benchmarks/benchmarks/mesh/utils/__init__.py
new file mode 100644
index 0000000000..e20973c0a7
--- /dev/null
+++ b/benchmarks/benchmarks/mesh/utils/__init__.py
@@ -0,0 +1,5 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmark tests for the iris.mesh.utils module."""
diff --git a/benchmarks/benchmarks/mesh/utils/regions_combine.py b/benchmarks/benchmarks/mesh/utils/regions_combine.py
new file mode 100644
index 0000000000..a61deea56d
--- /dev/null
+++ b/benchmarks/benchmarks/mesh/utils/regions_combine.py
@@ -0,0 +1,227 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmarks stages of operation.
+
+Benchmarks stages of operation of the function
+:func:`iris.mesh.utils.recombine_submeshes`.
+
+"""
+
+import os
+
+import dask.array as da
+import numpy as np
+
+from iris import load, load_cube, save
+from iris.mesh.utils import recombine_submeshes
+
+from ...generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+class MixinCombineRegions:
+ # Characterise time taken + memory-allocated, for various stages of combine
+ # operations on cubesphere-like test data.
+ params = [50, 500]
+ param_names = ["cubesphere-N"]
+
+ def _parametrised_cache_filename(self, n_cubesphere, content_name):
+ return f"cube_C{n_cubesphere}_{content_name}.nc"
+
+ def _make_region_cubes(self, full_mesh_cube):
+ """Make a fixed number of region cubes from a full meshcube."""
+ # Divide the cube into regions.
+ n_faces = full_mesh_cube.shape[-1]
+ # Start with a simple list of face indices
+ # first extend to multiple of 5
+ n_faces_5s = 5 * ((n_faces + 1) // 5)
+ i_faces = np.arange(n_faces_5s, dtype=int)
+ # reshape (5N,) to (N, 5)
+ i_faces = i_faces.reshape((n_faces_5s // 5, 5))
+ # reorder [2, 3, 4, 0, 1] within each block of 5
+ i_faces = np.concatenate([i_faces[:, 2:], i_faces[:, :2]], axis=1)
+ # flatten to get [2 3 4 0 1 (-) 8 9 10 6 7 (-) 13 14 15 11 12 ...]
+ i_faces = i_faces.flatten()
+ # reduce back to original length, wrap any overflows into valid range
+ i_faces = i_faces[:n_faces] % n_faces
+
+ # Divide into regions -- always slightly uneven, since 7 doesn't divide
+ n_regions = 7
+ n_facesperregion = n_faces // n_regions
+ i_face_regions = (i_faces // n_facesperregion) % n_regions
+ region_inds = [
+ np.where(i_face_regions == i_region)[0] for i_region in range(n_regions)
+ ]
+ # NOTE: this produces 7 regions, with near-adjacent value ranges but
+ # with some points "moved" to an adjacent region.
+ # Also, region-0 is bigger (because of not dividing by 7).
+
+ # Finally, make region cubes with these indices.
+ region_cubes = [full_mesh_cube[..., inds] for inds in region_inds]
+ return region_cubes
+
+ def setup_cache(self):
+ """Cache all the necessary source data on disk."""
+ # Control dask, to minimise memory usage + allow largest data.
+ self.fix_dask_settings()
+
+ for n_cubesphere in self.params:
+ # Do for each parameter, since "setup_cache" is NOT parametrised
+ mesh_cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=True
+ )
+ # Save to files which include the parameter in the names.
+ save(
+ mesh_cube,
+ self._parametrised_cache_filename(n_cubesphere, "meshcube"),
+ )
+ region_cubes = self._make_region_cubes(mesh_cube)
+ save(
+ region_cubes,
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes"),
+ )
+
+ def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=True):
+ """Combine tests "standard" setup operation.
+
+ Load the source cubes (full-mesh + region) from disk.
+ These are specific to the cubesize parameter.
+ The data is cached on disk rather than calculated, to avoid any
+ pre-loading of the process memory allocation.
+
+ If 'imaginary_data' is set (default), the region cubes data is replaced
+ with lazy data in the form of a da.zeros(). Otherwise, the region data
+ is lazy data from the files.
+
+ If 'create_result_cube' is set, create "self.combined_cube" containing
+ the (still lazy) result.
+
+ NOTE: various test classes override + extend this.
+
+ """
+ # Load source cubes (full-mesh and regions)
+ self.full_mesh_cube = load_cube(
+ self._parametrised_cache_filename(n_cubesphere, "meshcube")
+ )
+ self.region_cubes = load(
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes")
+ )
+
+ # Remove all var-names from loaded cubes, which can otherwise cause
+ # problems. Also implement 'imaginary' data.
+ for cube in self.region_cubes + [self.full_mesh_cube]:
+ cube.var_name = None
+ for coord in cube.coords():
+ coord.var_name = None
+ if imaginary_data:
+ # Replace cube data (lazy file data) with 'imaginary' data.
+ # This has the same lazy-array attributes, but is allocated by
+ # creating chunks on demand instead of loading from file.
+ data = cube.lazy_data()
+ data = da.zeros(data.shape, dtype=data.dtype, chunks=data.chunksize)
+ cube.data = data
+
+ if create_result_cube:
+ self.recombined_cube = self.recombine()
+
+ # Fix dask usage mode for all the subsequent performance tests.
+ self.fix_dask_settings()
+
+ def fix_dask_settings(self):
+ """Fix "standard" dask behaviour for time+space testing.
+
+ Currently this is single-threaded mode, with known chunksize,
+ which is optimised for space saving so we can test largest data.
+
+ """
+ import dask.config as dcfg
+
+ # Use single-threaded, to avoid process-switching costs and minimise memory usage.
+ # N.B. generally may be slower, but use less memory ?
+ dcfg.set(scheduler="single-threaded")
+ # Configure iris._lazy_data.as_lazy_data to aim for 100Mb chunks
+ dcfg.set({"array.chunk-size": "128Mib"})
+
+ def recombine(self):
+ # A handy general shorthand for the main "combine" operation.
+ result = recombine_submeshes(
+ self.full_mesh_cube,
+ self.region_cubes,
+ index_coord_name="i_mesh_face",
+ )
+ return result
+
+
+class CombineRegionsCreateCube(MixinCombineRegions):
+ """Time+memory costs of creating a combined-regions cube.
+
+ The result is lazy, and we don't do the actual calculation.
+
+ """
+
+ def setup(self, n_cubesphere):
+ # In this case only, do *not* create the result cube.
+ # That is the operation we want to test.
+ super().setup(n_cubesphere, create_result_cube=False)
+
+ def time_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+ def tracemalloc_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+
+class CombineRegionsComputeRealData(MixinCombineRegions):
+ """Time+memory costs of computing combined-regions data."""
+
+ def time_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+ def tracemalloc_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+
+class CombineRegionsSaveData(MixinCombineRegions):
+ """Test saving *only*.
+
+ Test saving *only*, having replaced the input cube data with 'imaginary'
+ array data, so that input data is not loaded from disk during the save
+ operation.
+
+
+ """
+
+ def time_save(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ save(self.recombined_cube, "tmp.nc")
+
+ def tracemalloc_save(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
+
+ def track_filesize_saved(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
+ return os.path.getsize("tmp.nc") * 1.0e-6
+
+
+CombineRegionsSaveData.track_filesize_saved.unit = "Mb" # type: ignore[attr-defined]
+
+
+class CombineRegionsFileStreamedCalc(MixinCombineRegions):
+ """Test the whole cost of file-to-file streaming.
+
+ Uses the combined cube which is based on lazy data loading from the region
+ cubes on disk.
+ """
+
+ def setup(self, n_cubesphere):
+ # In this case only, do *not* replace the loaded regions data with
+ # 'imaginary' data, as we want to test file-to-file calculation+save.
+ super().setup(n_cubesphere, imaginary_data=False)
+
+ def time_stream_file2file(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ save(self.recombined_cube, "tmp.nc")
+
+ def tracemalloc_stream_file2file(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
diff --git a/benchmarks/benchmarks/plot.py b/benchmarks/benchmarks/plot.py
new file mode 100644
index 0000000000..e8fbb5372d
--- /dev/null
+++ b/benchmarks/benchmarks/plot.py
@@ -0,0 +1,34 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Plot benchmark tests."""
+
+import matplotlib as mpl
+import numpy as np
+
+from iris import coords, cube, plot
+
+mpl.use("agg")
+
+
+class AuxSort:
+ def setup(self):
+ # Manufacture data from which contours can be derived.
+ # Should generate 10 distinct contours, regardless of dim size.
+ dim_size = 200
+ repeat_number = int(dim_size / 10)
+ repeat_range = range(int((dim_size**2) / repeat_number))
+ data = np.repeat(repeat_range, repeat_number)
+ data = data.reshape((dim_size,) * 2)
+
+ # These benchmarks are from a user perspective, so setting up a
+ # user-level case that will prompt the calling of aux_coords.sort in plot.py.
+ dim_coord = coords.DimCoord(np.arange(dim_size))
+ local_cube = cube.Cube(data)
+ local_cube.add_aux_coord(dim_coord, 0)
+ self.cube = local_cube
+
+ def time_aux_sort(self):
+ # Contour plot arbitrarily picked. Known to prompt aux_coords.sort.
+ plot.contour(self.cube)
diff --git a/benchmarks/benchmarks/regridding.py b/benchmarks/benchmarks/regridding.py
new file mode 100644
index 0000000000..e227da0ec6
--- /dev/null
+++ b/benchmarks/benchmarks/regridding.py
@@ -0,0 +1,119 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Regridding benchmark test."""
+
+# import iris tests first so that some things can be initialised before
+# importing anything else
+from iris import tests # isort:skip
+
+import numpy as np
+
+import iris
+from iris.analysis import AreaWeighted, PointInCell
+from iris.coords import AuxCoord
+
+
+class HorizontalChunkedRegridding:
+ def setup(self) -> None:
+ # Prepare a cube and a template
+
+ cube_file_path = tests.get_data_path(["NetCDF", "regrid", "regrid_xyt.nc"])
+ self.cube = iris.load_cube(cube_file_path)
+
+ # Prepare a tougher cube and chunk it
+ chunked_cube_file_path = tests.get_data_path(
+ ["NetCDF", "regrid", "regrid_xyt.nc"]
+ )
+ self.chunked_cube = iris.load_cube(chunked_cube_file_path)
+
+ # Chunked data makes the regridder run repeatedly
+ self.cube.data = self.cube.lazy_data().rechunk((1, -1, -1))
+
+ template_file_path = tests.get_data_path(
+ ["NetCDF", "regrid", "regrid_template_global_latlon.nc"]
+ )
+ self.template_cube = iris.load_cube(template_file_path)
+
+ # Prepare a regridding scheme
+ self.scheme_area_w = AreaWeighted()
+
+ def time_regrid_area_w(self) -> None:
+ # Regrid the cube onto the template.
+ out = self.cube.regrid(self.template_cube, self.scheme_area_w)
+ # Realise the data
+ out.data
+
+ def time_regrid_area_w_new_grid(self) -> None:
+ # Regrid the chunked cube
+ out = self.chunked_cube.regrid(self.template_cube, self.scheme_area_w)
+ # Realise data
+ out.data
+
+ def tracemalloc_regrid_area_w(self) -> None:
+ # Regrid the chunked cube
+ out = self.cube.regrid(self.template_cube, self.scheme_area_w)
+ # Realise data
+ out.data
+
+ tracemalloc_regrid_area_w.number = 3 # type: ignore[attr-defined]
+
+ def tracemalloc_regrid_area_w_new_grid(self) -> None:
+ # Regrid the chunked cube
+ out = self.chunked_cube.regrid(self.template_cube, self.scheme_area_w)
+ # Realise data
+ out.data
+
+ tracemalloc_regrid_area_w_new_grid.number = 3 # type: ignore[attr-defined]
+
+
+class CurvilinearRegridding:
+ def setup(self) -> None:
+ # Prepare a cube and a template
+
+ cube_file_path = tests.get_data_path(["NetCDF", "regrid", "regrid_xyt.nc"])
+ self.cube = iris.load_cube(cube_file_path)
+
+ # Make the source cube curvilinear
+ x_coord = self.cube.coord("longitude")
+ y_coord = self.cube.coord("latitude")
+ xx, yy = np.meshgrid(x_coord.points, y_coord.points)
+ self.cube.remove_coord(x_coord)
+ self.cube.remove_coord(y_coord)
+ x_coord_2d = AuxCoord(
+ xx,
+ standard_name=x_coord.standard_name,
+ units=x_coord.units,
+ coord_system=x_coord.coord_system,
+ )
+ y_coord_2d = AuxCoord(
+ yy,
+ standard_name=y_coord.standard_name,
+ units=y_coord.units,
+ coord_system=y_coord.coord_system,
+ )
+ self.cube.add_aux_coord(x_coord_2d, (1, 2))
+ self.cube.add_aux_coord(y_coord_2d, (1, 2))
+
+ template_file_path = tests.get_data_path(
+ ["NetCDF", "regrid", "regrid_template_global_latlon.nc"]
+ )
+ self.template_cube = iris.load_cube(template_file_path)
+
+ # Prepare a regridding scheme
+ self.scheme_pic = PointInCell()
+
+ def time_regrid_pic(self) -> None:
+ # Regrid the cube onto the template.
+ out = self.cube.regrid(self.template_cube, self.scheme_pic)
+ # Realise the data
+ out.data
+
+ def tracemalloc_regrid_pic(self) -> None:
+ # Regrid the cube onto the template.
+ out = self.cube.regrid(self.template_cube, self.scheme_pic)
+ # Realise the data
+ out.data
+
+ tracemalloc_regrid_pic.number = 3 # type: ignore[attr-defined]
diff --git a/benchmarks/benchmarks/save.py b/benchmarks/benchmarks/save.py
new file mode 100644
index 0000000000..4bac1b1450
--- /dev/null
+++ b/benchmarks/benchmarks/save.py
@@ -0,0 +1,43 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""File saving benchmarks."""
+
+from iris import save
+from iris.mesh import save_mesh
+
+from .generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+class NetcdfSave:
+ params = [[50, 600], [False, True]]
+ param_names = ["cubesphere-N", "is_unstructured"]
+
+ def setup(self, n_cubesphere, is_unstructured):
+ self.cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=is_unstructured
+ )
+
+ def _save_data(self, cube, do_copy=True):
+ if do_copy:
+ # Copy the cube, to avoid distorting the results by changing it
+ # Because we known that older Iris code realises lazy coords
+ cube = cube.copy()
+ save(cube, "tmp.nc")
+
+ def _save_mesh(self, cube):
+ # In this case, we are happy that the mesh is *not* modified
+ save_mesh(cube.mesh, "mesh.nc")
+
+ def time_netcdf_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_data(self.cube)
+
+ def time_netcdf_save_mesh(self, n_cubesphere, is_unstructured):
+ if is_unstructured:
+ self._save_mesh(self.cube)
+
+ def tracemalloc_netcdf_save(self, n_cubesphere, is_unstructured):
+ # Don't need to copy the cube here since track_ benchmarks don't
+ # do repeats between self.setup() calls.
+ self._save_data(self.cube, do_copy=False)
diff --git a/benchmarks/benchmarks/sperf/__init__.py b/benchmarks/benchmarks/sperf/__init__.py
new file mode 100644
index 0000000000..2b8b508fd5
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/__init__.py
@@ -0,0 +1,38 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.
+
+SPerf = assessing performance against a series of increasingly large LFRic
+datasets.
+"""
+
+from iris import load_cube
+
+from ..generate_data.ugrid import make_cubesphere_testfile
+
+
+class FileMixin:
+ """For use in any benchmark classes that work on a file."""
+
+ # Allows time for large file generation.
+ timeout = 3600.0
+ # Largest file with these params: ~90GB.
+ # Total disk space: ~410GB.
+ params = [
+ [12, 384, 640, 960, 1280, 1668],
+ [1, 36, 72],
+ [1, 3, 10],
+ ]
+ param_names = ["cubesphere_C", "N levels", "N time steps"]
+ # cubesphere_C: notation refers to faces per panel.
+ # e.g. C1 is 6 faces, 8 nodes
+
+ def setup(self, c_size, n_levels, n_times):
+ self.file_path = make_cubesphere_testfile(
+ c_size=c_size, n_levels=n_levels, n_times=n_times
+ )
+
+ def load_cube(self):
+ return load_cube(str(self.file_path))
diff --git a/benchmarks/benchmarks/sperf/combine_regions.py b/benchmarks/benchmarks/sperf/combine_regions.py
new file mode 100644
index 0000000000..591b7bb9be
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/combine_regions.py
@@ -0,0 +1,234 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Region combine benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project."""
+
+import os.path
+
+from dask import array as da
+import numpy as np
+
+from iris import load, load_cube, save
+from iris.mesh.utils import recombine_submeshes
+
+from .. import on_demand_benchmark
+from ..generate_data.ugrid import BENCHMARK_DATA, make_cube_like_2d_cubesphere
+
+
+class Mixin:
+ # Characterise time taken + memory-allocated, for various stages of combine
+ # operations on cubesphere-like test data.
+ timeout = 300.0
+ params = [100, 200, 300, 500, 1000, 1668]
+ param_names = ["cubesphere_C"]
+ # Fix result units for the tracking benchmarks.
+ unit = "Mb"
+ temp_save_path = BENCHMARK_DATA / "tmp.nc"
+
+ def _parametrised_cache_filename(self, n_cubesphere, content_name):
+ return BENCHMARK_DATA / f"cube_C{n_cubesphere}_{content_name}.nc"
+
+ def _make_region_cubes(self, full_mesh_cube):
+ """Make a fixed number of region cubes from a full meshcube."""
+ # Divide the cube into regions.
+ n_faces = full_mesh_cube.shape[-1]
+ # Start with a simple list of face indices
+ # first extend to multiple of 5
+ n_faces_5s = 5 * ((n_faces + 1) // 5)
+ i_faces = np.arange(n_faces_5s, dtype=int)
+ # reshape (5N,) to (N, 5)
+ i_faces = i_faces.reshape((n_faces_5s // 5, 5))
+ # reorder [2, 3, 4, 0, 1] within each block of 5
+ i_faces = np.concatenate([i_faces[:, 2:], i_faces[:, :2]], axis=1)
+ # flatten to get [2 3 4 0 1 (-) 8 9 10 6 7 (-) 13 14 15 11 12 ...]
+ i_faces = i_faces.flatten()
+ # reduce back to original length, wrap any overflows into valid range
+ i_faces = i_faces[:n_faces] % n_faces
+
+ # Divide into regions -- always slightly uneven, since 7 doesn't divide
+ n_regions = 7
+ n_facesperregion = n_faces // n_regions
+ i_face_regions = (i_faces // n_facesperregion) % n_regions
+ region_inds = [
+ np.where(i_face_regions == i_region)[0] for i_region in range(n_regions)
+ ]
+ # NOTE: this produces 7 regions, with near-adjacent value ranges but
+ # with some points "moved" to an adjacent region.
+ # Also, region-0 is bigger (because of not dividing by 7).
+
+ # Finally, make region cubes with these indices.
+ region_cubes = [full_mesh_cube[..., inds] for inds in region_inds]
+ return region_cubes
+
+ def setup_cache(self):
+ """Cache all the necessary source data on disk."""
+ # Control dask, to minimise memory usage + allow largest data.
+ self.fix_dask_settings()
+
+ for n_cubesphere in self.params:
+ # Do for each parameter, since "setup_cache" is NOT parametrised
+ mesh_cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=True
+ )
+ # Save to files which include the parameter in the names.
+ save(
+ mesh_cube,
+ self._parametrised_cache_filename(n_cubesphere, "meshcube"),
+ )
+ region_cubes = self._make_region_cubes(mesh_cube)
+ save(
+ region_cubes,
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes"),
+ )
+
+ def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=True):
+ """Combine tests "standard" setup operation.
+
+ Load the source cubes (full-mesh + region) from disk.
+ These are specific to the cubesize parameter.
+ The data is cached on disk rather than calculated, to avoid any
+ pre-loading of the process memory allocation.
+
+ If 'imaginary_data' is set (default), the region cubes data is replaced
+ with lazy data in the form of a da.zeros(). Otherwise, the region data
+ is lazy data from the files.
+
+ If 'create_result_cube' is set, create "self.combined_cube" containing
+ the (still lazy) result.
+
+ NOTE: various test classes override + extend this.
+
+ """
+ # Load source cubes (full-mesh and regions)
+ self.full_mesh_cube = load_cube(
+ self._parametrised_cache_filename(n_cubesphere, "meshcube")
+ )
+ self.region_cubes = load(
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes")
+ )
+
+ # Remove all var-names from loaded cubes, which can otherwise cause
+ # problems. Also implement 'imaginary' data.
+ for cube in self.region_cubes + [self.full_mesh_cube]:
+ cube.var_name = None
+ for coord in cube.coords():
+ coord.var_name = None
+ if imaginary_data:
+ # Replace cube data (lazy file data) with 'imaginary' data.
+ # This has the same lazy-array attributes, but is allocated by
+ # creating chunks on demand instead of loading from file.
+ data = cube.lazy_data()
+ data = da.zeros(data.shape, dtype=data.dtype, chunks=data.chunksize)
+ cube.data = data
+
+ if create_result_cube:
+ self.recombined_cube = self.recombine()
+
+ # Fix dask usage mode for all the subsequent performance tests.
+ self.fix_dask_settings()
+
+ def teardown(self, _):
+ self.temp_save_path.unlink(missing_ok=True)
+
+ def fix_dask_settings(self):
+ """Fix "standard" dask behaviour for time+space testing.
+
+ Currently this is single-threaded mode, with known chunksize,
+ which is optimised for space saving so we can test largest data.
+
+ """
+ import dask.config as dcfg
+
+ # Use single-threaded, to avoid process-switching costs and minimise memory usage.
+ # N.B. generally may be slower, but use less memory ?
+ dcfg.set(scheduler="single-threaded")
+ # Configure iris._lazy_data.as_lazy_data to aim for 100Mb chunks
+ dcfg.set({"array.chunk-size": "128Mib"})
+
+ def recombine(self):
+ # A handy general shorthand for the main "combine" operation.
+ result = recombine_submeshes(
+ self.full_mesh_cube,
+ self.region_cubes,
+ index_coord_name="i_mesh_face",
+ )
+ return result
+
+ def save_recombined_cube(self):
+ save(self.recombined_cube, self.temp_save_path)
+
+
+@on_demand_benchmark
+class CreateCube(Mixin):
+ """Time+memory costs of creating a combined-regions cube.
+
+ The result is lazy, and we don't do the actual calculation.
+
+ """
+
+ def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=False):
+ # In this case only, do *not* create the result cube.
+ # That is the operation we want to test.
+ super().setup(n_cubesphere, imaginary_data, create_result_cube)
+
+ def time_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+ def tracemalloc_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+
+@on_demand_benchmark
+class ComputeRealData(Mixin):
+ """Time+memory costs of computing combined-regions data."""
+
+ def time_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+ def tracemalloc_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+
+@on_demand_benchmark
+class SaveData(Mixin):
+ """Test saving *only*.
+
+ Test saving *only*, having replaced the input cube data with 'imaginary'
+ array data, so that input data is not loaded from disk during the save
+ operation.
+
+ """
+
+ def time_save(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ self.save_recombined_cube()
+
+ def tracemalloc_save(self, n_cubesphere):
+ self.save_recombined_cube()
+
+ def track_filesize_saved(self, n_cubesphere):
+ self.save_recombined_cube()
+ return self.temp_save_path.stat().st_size * 1.0e-6
+
+
+@on_demand_benchmark
+class FileStreamedCalc(Mixin):
+ """Test the whole cost of file-to-file streaming.
+
+ Uses the combined cube which is based on lazy data loading from the region
+ cubes on disk.
+
+ """
+
+ def setup(self, n_cubesphere, imaginary_data=False, create_result_cube=True):
+ # In this case only, do *not* replace the loaded regions data with
+ # 'imaginary' data, as we want to test file-to-file calculation+save.
+ super().setup(n_cubesphere, imaginary_data, create_result_cube)
+
+ def time_stream_file2file(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ self.save_recombined_cube()
+
+ def tracemalloc_stream_file2file(self, n_cubesphere):
+ self.save_recombined_cube()
diff --git a/benchmarks/benchmarks/sperf/equality.py b/benchmarks/benchmarks/sperf/equality.py
new file mode 100644
index 0000000000..ddee90cd28
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/equality.py
@@ -0,0 +1,35 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Equality benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project."""
+
+from .. import on_demand_benchmark
+from . import FileMixin
+
+
+@on_demand_benchmark
+class CubeEquality(FileMixin):
+ r"""Benchmark time and memory costs.
+
+ Benchmark time and memory costs of comparing :class:`~iris.cube.Cube`\\ s
+ with attached :class:`~iris.mesh.MeshXY`\\ es.
+
+ Uses :class:`FileMixin` as the realistic case will be comparing
+ :class:`~iris.cube.Cube`\\ s that have been loaded from file.
+
+ """
+
+ # Cut down paremt parameters.
+ params = [FileMixin.params[0]]
+
+ def setup(self, c_size, n_levels=1, n_times=1):
+ super().setup(c_size, n_levels, n_times)
+ self.cube = self.load_cube()
+ self.other_cube = self.load_cube()
+
+ def peakmem_eq(self, n_cube):
+ _ = self.cube == self.other_cube
+
+ def time_eq(self, n_cube):
+ _ = self.cube == self.other_cube
diff --git a/benchmarks/benchmarks/sperf/load.py b/benchmarks/benchmarks/sperf/load.py
new file mode 100644
index 0000000000..d304a30c82
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/load.py
@@ -0,0 +1,27 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""File loading benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project."""
+
+from .. import on_demand_benchmark
+from . import FileMixin
+
+
+@on_demand_benchmark
+class Load(FileMixin):
+ def time_load_cube(self, _, __, ___):
+ _ = self.load_cube()
+
+
+@on_demand_benchmark
+class Realise(FileMixin):
+ def setup(self, c_size, n_levels, n_times):
+ super().setup(c_size, n_levels, n_times)
+ self.loaded_cube = self.load_cube()
+
+ def time_realise_cube(self, _, __, ___):
+ # Don't touch loaded_cube.data - permanent realisation plays badly with
+ # ASV's re-run strategy.
+ assert self.loaded_cube.has_lazy_data()
+ self.loaded_cube.core_data().compute()
diff --git a/benchmarks/benchmarks/sperf/save.py b/benchmarks/benchmarks/sperf/save.py
new file mode 100644
index 0000000000..a715ec2424
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/save.py
@@ -0,0 +1,50 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""File saving benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project."""
+
+import os.path
+
+from iris import save
+from iris.mesh import save_mesh
+
+from .. import on_demand_benchmark
+from ..generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+@on_demand_benchmark
+class NetcdfSave:
+ """Benchmark time and memory costs of saving ~large-ish data cubes to netcdf."""
+
+ params = [[1, 100, 200, 300, 500, 1000, 1668], [False, True]]
+ param_names = ["cubesphere_C", "is_unstructured"]
+ # Fix result units for the tracking benchmarks.
+ unit = "Mb"
+
+ def setup(self, n_cubesphere, is_unstructured):
+ self.cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=is_unstructured
+ )
+
+ def _save_cube(self, cube):
+ save(cube, "tmp.nc")
+
+ def _save_mesh(self, cube):
+ save_mesh(cube.mesh, "mesh.nc")
+
+ def time_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_cube(self.cube)
+
+ def tracemalloc_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_cube(self.cube)
+
+ def time_save_mesh(self, n_cubesphere, is_unstructured):
+ if is_unstructured:
+ self._save_mesh(self.cube)
+
+ # The filesizes make a good reference point for the 'addedmem' memory
+ # usage results.
+ def track_filesize_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_cube(self.cube)
+ return os.path.getsize("tmp.nc") * 1.0e-6
diff --git a/benchmarks/benchmarks/stats.py b/benchmarks/benchmarks/stats.py
new file mode 100644
index 0000000000..fbab12cd4b
--- /dev/null
+++ b/benchmarks/benchmarks/stats.py
@@ -0,0 +1,52 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Stats benchmark tests."""
+
+import iris
+from iris.analysis.stats import pearsonr
+import iris.tests
+
+
+class PearsonR:
+ def setup(self):
+ cube_temp = iris.load_cube(
+ iris.tests.get_data_path(
+ ("NetCDF", "global", "xyt", "SMALL_total_column_co2.nc")
+ )
+ )
+
+ # Make data non-lazy.
+ cube_temp.data
+
+ self.cube_a = cube_temp[:6]
+ self.cube_b = cube_temp[20:26]
+ self.cube_b.replace_coord(self.cube_a.coord("time"))
+ for name in ["latitude", "longitude"]:
+ self.cube_b.coord(name).guess_bounds()
+ self.weights = iris.analysis.cartography.area_weights(self.cube_b)
+
+ def time_real(self):
+ pearsonr(self.cube_a, self.cube_b, weights=self.weights)
+
+ def tracemalloc_real(self):
+ pearsonr(self.cube_a, self.cube_b, weights=self.weights)
+
+ tracemalloc_real.number = 3 # type: ignore[attr-defined]
+
+ def time_lazy(self):
+ for cube in self.cube_a, self.cube_b:
+ cube.data = cube.lazy_data()
+
+ result = pearsonr(self.cube_a, self.cube_b, weights=self.weights)
+ result.data
+
+ def tracemalloc_lazy(self):
+ for cube in self.cube_a, self.cube_b:
+ cube.data = cube.lazy_data()
+
+ result = pearsonr(self.cube_a, self.cube_b, weights=self.weights)
+ result.data
+
+ tracemalloc_lazy.number = 3 # type: ignore[attr-defined]
diff --git a/benchmarks/benchmarks/trajectory.py b/benchmarks/benchmarks/trajectory.py
new file mode 100644
index 0000000000..77825ef2f2
--- /dev/null
+++ b/benchmarks/benchmarks/trajectory.py
@@ -0,0 +1,56 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Trajectory benchmark test."""
+
+# import iris tests first so that some things can be initialised before
+# importing anything else
+from iris import tests # isort:skip
+
+import numpy as np
+
+import iris
+from iris.analysis.trajectory import interpolate
+
+
+class TrajectoryInterpolation:
+ def setup(self) -> None:
+ # Prepare a cube and a template
+
+ cube_file_path = tests.get_data_path(["NetCDF", "regrid", "regrid_xyt.nc"])
+ self.cube = iris.load_cube(cube_file_path)
+
+ trajectory = np.array([np.array((-50 + i, -50 + i)) for i in range(100)])
+ self.sample_points = [
+ ("longitude", trajectory[:, 0]),
+ ("latitude", trajectory[:, 1]),
+ ]
+
+ def time_trajectory_linear(self) -> None:
+ # Regrid the cube onto the template.
+ out_cube = interpolate(self.cube, self.sample_points, method="linear")
+ # Realise the data
+ out_cube.data
+
+ def tracemalloc_trajectory_linear(self) -> None:
+ # Regrid the cube onto the template.
+ out_cube = interpolate(self.cube, self.sample_points, method="linear")
+ # Realise the data
+ out_cube.data
+
+ tracemalloc_trajectory_linear.number = 3 # type: ignore[attr-defined]
+
+ def time_trajectory_nearest(self) -> None:
+ # Regrid the cube onto the template.
+ out_cube = interpolate(self.cube, self.sample_points, method="nearest")
+ # Realise the data
+ out_cube.data
+
+ def tracemalloc_trajectory_nearest(self) -> None:
+ # Regrid the cube onto the template.
+ out_cube = interpolate(self.cube, self.sample_points, method="nearest")
+ # Realise the data
+ out_cube.data
+
+ tracemalloc_trajectory_nearest.number = 3 # type: ignore[attr-defined]
diff --git a/benchmarks/benchmarks/unit_style/__init__disabled.py b/benchmarks/benchmarks/unit_style/__init__disabled.py
new file mode 100644
index 0000000000..d7f84c2b91
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/__init__disabled.py
@@ -0,0 +1,16 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Small-scope benchmarks that can help with performance investigations.
+
+By renaming ``__init__.py`` these are all disabled by default:
+
+- They bloat benchmark run-time.
+- They are too vulnerable to 'noise' due to their small scope - small objects,
+ short operations - they report a lot of false positive regressions.
+- We rely on the wider-scope integration-style benchmarks to flag performance
+ changes, upon which we expect to do some manual investigation - these
+ smaller benchmarks can be run then.
+
+"""
diff --git a/benchmarks/benchmarks/unit_style/aux_factory.py b/benchmarks/benchmarks/unit_style/aux_factory.py
new file mode 100644
index 0000000000..329a2b0bda
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/aux_factory.py
@@ -0,0 +1,52 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Small-scope AuxFactory benchmark tests."""
+
+import numpy as np
+
+from iris import aux_factory, coords
+
+
+class FactoryCommon:
+ # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released:
+ # * make class an ABC
+ # * remove NotImplementedError
+ # * combine setup_common into setup
+ """Run a generalised suite of benchmarks for any factory.
+
+ A base class running a generalised suite of benchmarks for any factory.
+ Factory to be specified in a subclass.
+
+ ASV will run the benchmarks within this class for any subclasses.
+
+ Should only be instantiated within subclasses, but cannot enforce this
+ since ASV cannot handle classes that include abstract methods.
+ """
+
+ def setup(self):
+ """Prevent ASV instantiating (must therefore override setup() in any subclasses.)."""
+ raise NotImplementedError
+
+ def setup_common(self):
+ """Shared setup code that can be called by subclasses."""
+ self.factory = self.create()
+
+ def time_create(self):
+ """Create an instance of the benchmarked factory.
+
+ Create method is specified in the subclass.
+ """
+ self.create()
+
+
+class HybridHeightFactory(FactoryCommon):
+ def setup(self):
+ data_1d = np.zeros(1000)
+ self.coord = coords.AuxCoord(points=data_1d, units="m")
+
+ self.setup_common()
+
+ def create(self):
+ return aux_factory.HybridHeightFactory(delta=self.coord)
diff --git a/benchmarks/benchmarks/unit_style/coords.py b/benchmarks/benchmarks/unit_style/coords.py
new file mode 100644
index 0000000000..704746f190
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/coords.py
@@ -0,0 +1,129 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Small-scope Coord benchmark tests."""
+
+import numpy as np
+
+from iris import coords
+
+from .. import disable_repeat_between_setup
+
+
+def setup():
+ """General variables needed by multiple benchmark classes."""
+ global data_1d
+
+ data_1d = np.zeros(1000)
+
+
+class CoordCommon:
+ # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released:
+ # * make class an ABC
+ # * remove NotImplementedError
+ # * combine setup_common into setup
+ """Run a generalised suite of benchmarks for any coord.
+
+ A base class running a generalised suite of benchmarks for any coord.
+ Coord to be specified in a subclass.
+
+ ASV will run the benchmarks within this class for any subclasses.
+
+ Should only be instantiated within subclasses, but cannot enforce this
+ since ASV cannot handle classes that include abstract methods.
+ """
+
+ def setup(self):
+ """Prevent ASV instantiating (must therefore override setup() in any subclasses.)."""
+ raise NotImplementedError
+
+ def setup_common(self):
+ """Shared setup code that can be called by subclasses."""
+ self.component = self.create()
+
+ def time_create(self):
+ """Create an instance of the benchmarked factory.
+
+ Create method is specified in the subclass.
+ """
+ self.create()
+
+
+class DimCoord(CoordCommon):
+ def setup(self):
+ point_values = np.arange(1000)
+ bounds = np.array([point_values - 1, point_values + 1]).transpose()
+
+ self.create_kwargs = {
+ "points": point_values,
+ "bounds": bounds,
+ "units": "days since 1970-01-01",
+ "climatological": True,
+ }
+
+ self.setup_common()
+
+ def create(self):
+ return coords.DimCoord(**self.create_kwargs)
+
+ def time_regular(self):
+ coords.DimCoord.from_regular(0, 1, 1000)
+
+
+class AuxCoord(CoordCommon):
+ def setup(self):
+ bounds = np.array([data_1d - 1, data_1d + 1]).transpose()
+
+ self.create_kwargs = {
+ "points": data_1d,
+ "bounds": bounds,
+ "units": "days since 1970-01-01",
+ "climatological": True,
+ }
+
+ self.setup_common()
+
+ def create(self):
+ return coords.AuxCoord(**self.create_kwargs)
+
+ def time_points(self):
+ _ = self.component.points
+
+ def time_bounds(self):
+ _ = self.component.bounds
+
+
+@disable_repeat_between_setup
+class AuxCoordLazy(AuxCoord):
+ """Lazy equivalent of :class:`AuxCoord`."""
+
+ def setup(self):
+ super().setup()
+ self.create_kwargs["points"] = self.component.lazy_points()
+ self.create_kwargs["bounds"] = self.component.lazy_bounds()
+ self.setup_common()
+
+
+class CellMeasure(CoordCommon):
+ def setup(self):
+ self.setup_common()
+
+ def create(self):
+ return coords.CellMeasure(data_1d)
+
+
+class CellMethod(CoordCommon):
+ def setup(self):
+ self.setup_common()
+
+ def create(self):
+ return coords.CellMethod("test")
+
+
+class AncillaryVariable(CoordCommon):
+ def setup(self):
+ self.setup_common()
+
+ def create(self):
+ return coords.AncillaryVariable(data_1d)
diff --git a/benchmarks/benchmarks/unit_style/cube.py b/benchmarks/benchmarks/unit_style/cube.py
new file mode 100644
index 0000000000..780418aa14
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/cube.py
@@ -0,0 +1,252 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Small-scope Cube benchmark tests."""
+
+import numpy as np
+
+from iris import analysis, aux_factory, coords, cube
+
+from .. import disable_repeat_between_setup
+from ..generate_data.stock import sample_meshcoord
+
+
+def setup(*params):
+ """General variables needed by multiple benchmark classes."""
+ global data_1d
+ global data_2d
+ global general_cube
+
+ data_2d = np.zeros((1000,) * 2)
+ data_1d = data_2d[0]
+ general_cube = cube.Cube(data_2d)
+
+
+class ComponentCommon:
+ # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released:
+ # * make class an ABC
+ # * remove NotImplementedError
+ # * combine setup_common into setup
+ """Run a generalised suite of benchmarks for cubes.
+
+ A base class running a generalised suite of benchmarks for cubes that
+ include a specified component (e.g. Coord, CellMeasure etc.). Component to
+ be specified in a subclass.
+
+ ASV will run the benchmarks within this class for any subclasses.
+
+ Should only be instantiated within subclasses, but cannot enforce this
+ since ASV cannot handle classes that include abstract methods.
+ """
+
+ def setup(self):
+ """Prevent ASV instantiating (must therefore override setup() in any subclasses.)."""
+ raise NotImplementedError
+
+ def create(self):
+ """Create a cube (generic).
+
+ cube_kwargs allow dynamic inclusion of different components;
+ specified in subclasses.
+ """
+ return cube.Cube(data=data_2d, **self.cube_kwargs)
+
+ def setup_common(self):
+ """Shared setup code that can be called by subclasses."""
+ self.cube = self.create()
+
+ def time_create(self):
+ """Create a cube that includes an instance of the benchmarked component."""
+ self.create()
+
+ def time_add(self):
+ """Add an instance of the benchmarked component to an existing cube."""
+ # Unable to create the copy during setup since this needs to be re-done
+ # for every repeat of the test (some components disallow duplicates).
+ general_cube_copy = general_cube.copy(data=data_2d)
+ self.add_method(general_cube_copy, *self.add_args)
+
+
+class Cube:
+ def time_basic(self):
+ cube.Cube(data_2d)
+
+ def time_rename(self):
+ general_cube.name = "air_temperature"
+
+
+class AuxCoord(ComponentCommon):
+ def setup(self):
+ self.coord_name = "test"
+ coord_bounds = np.array([data_1d - 1, data_1d + 1]).transpose()
+ aux_coord = coords.AuxCoord(
+ long_name=self.coord_name,
+ points=data_1d,
+ bounds=coord_bounds,
+ units="days since 1970-01-01",
+ climatological=True,
+ )
+
+ # Variables needed by the ComponentCommon base class.
+ self.cube_kwargs = {"aux_coords_and_dims": [(aux_coord, 0)]}
+ self.add_method = cube.Cube.add_aux_coord
+ self.add_args = (aux_coord, (0))
+
+ self.setup_common()
+
+ def time_return_coords(self):
+ self.cube.coords()
+
+ def time_return_coord_dims(self):
+ self.cube.coord_dims(self.coord_name)
+
+
+class AuxFactory(ComponentCommon):
+ def setup(self):
+ coord = coords.AuxCoord(points=data_1d, units="m")
+ self.hybrid_factory = aux_factory.HybridHeightFactory(delta=coord)
+
+ # Variables needed by the ComponentCommon base class.
+ self.cube_kwargs = {
+ "aux_coords_and_dims": [(coord, 0)],
+ "aux_factories": [self.hybrid_factory],
+ }
+
+ self.setup_common()
+
+ # Variables needed by the overridden time_add benchmark in this subclass.
+ cube_w_coord = self.cube.copy()
+ [cube_w_coord.remove_aux_factory(i) for i in cube_w_coord.aux_factories]
+ self.cube_w_coord = cube_w_coord
+
+ def time_add(self):
+ # Requires override from super().time_add because the cube needs an
+ # additional coord.
+ self.cube_w_coord.add_aux_factory(self.hybrid_factory)
+
+
+class CellMeasure(ComponentCommon):
+ def setup(self):
+ cell_measure = coords.CellMeasure(data_1d)
+
+ # Variables needed by the ComponentCommon base class.
+ self.cube_kwargs = {"cell_measures_and_dims": [(cell_measure, 0)]}
+ self.add_method = cube.Cube.add_cell_measure
+ self.add_args = (cell_measure, 0)
+
+ self.setup_common()
+
+
+class CellMethod(ComponentCommon):
+ def setup(self):
+ cell_method = coords.CellMethod("test")
+
+ # Variables needed by the ComponentCommon base class.
+ self.cube_kwargs = {"cell_methods": [cell_method]}
+ self.add_method = cube.Cube.add_cell_method
+ self.add_args = [cell_method]
+
+ self.setup_common()
+
+
+class AncillaryVariable(ComponentCommon):
+ def setup(self):
+ ancillary_variable = coords.AncillaryVariable(data_1d)
+
+ # Variables needed by the ComponentCommon base class.
+ self.cube_kwargs = {"ancillary_variables_and_dims": [(ancillary_variable, 0)]}
+ self.add_method = cube.Cube.add_ancillary_variable
+ self.add_args = (ancillary_variable, 0)
+
+ self.setup_common()
+
+
+class MeshCoord:
+ params = [
+ 6, # minimal cube-sphere
+ int(1e6), # realistic cube-sphere size
+ 1000, # To match size in :class:`AuxCoord`
+ ]
+ param_names = ["number of faces"]
+
+ def setup(self, n_faces):
+ mesh_kwargs = dict(n_nodes=n_faces + 2, n_edges=n_faces * 2, n_faces=n_faces)
+
+ self.mesh_coord = sample_meshcoord(sample_mesh_kwargs=mesh_kwargs)
+ self.data = np.zeros(n_faces)
+ self.cube_blank = cube.Cube(data=self.data)
+ self.cube = self.create()
+
+ def create(self):
+ return cube.Cube(data=self.data, aux_coords_and_dims=[(self.mesh_coord, 0)])
+
+ def time_create(self, n_faces):
+ _ = self.create()
+
+ @disable_repeat_between_setup
+ def time_add(self, n_faces):
+ self.cube_blank.add_aux_coord(self.mesh_coord, 0)
+
+ @disable_repeat_between_setup
+ def time_remove(self, n_faces):
+ self.cube.remove_coord(self.mesh_coord)
+
+
+class Merge:
+ def setup(self):
+ self.cube_list = cube.CubeList()
+ for i in np.arange(2):
+ i_cube = general_cube.copy()
+ i_coord = coords.AuxCoord([i])
+ i_cube.add_aux_coord(i_coord)
+ self.cube_list.append(i_cube)
+
+ def time_merge(self):
+ self.cube_list.merge()
+
+
+class Concatenate:
+ def setup(self):
+ dim_size = 1000
+ self.cube_list = cube.CubeList()
+ for i in np.arange(dim_size * 2, step=dim_size):
+ i_cube = general_cube.copy()
+ i_coord = coords.DimCoord(np.arange(dim_size) + (i * dim_size))
+ i_cube.add_dim_coord(i_coord, 0)
+ self.cube_list.append(i_cube)
+
+ def time_concatenate(self):
+ self.cube_list.concatenate()
+
+
+class Equality:
+ def setup(self):
+ self.cube_a = general_cube.copy()
+ self.cube_b = general_cube.copy()
+
+ aux_coord = coords.AuxCoord(data_1d)
+ self.cube_a.add_aux_coord(aux_coord, 0)
+ self.cube_b.add_aux_coord(aux_coord, 1)
+
+ def time_equality(self):
+ self.cube_a == self.cube_b
+
+
+class Aggregation:
+ def setup(self):
+ repeat_number = 10
+ repeat_range = range(int(1000 / repeat_number))
+ array_repeat = np.repeat(repeat_range, repeat_number)
+ array_unique = np.arange(len(array_repeat))
+
+ coord_repeat = coords.AuxCoord(points=array_repeat, long_name="repeat")
+ coord_unique = coords.DimCoord(points=array_unique, long_name="unique")
+
+ local_cube = general_cube.copy()
+ local_cube.add_aux_coord(coord_repeat, 0)
+ local_cube.add_dim_coord(coord_unique, 0)
+ self.cube = local_cube
+
+ def time_aggregated_by(self):
+ self.cube.aggregated_by("repeat", analysis.MEAN)
diff --git a/benchmarks/benchmarks/unit_style/mesh.py b/benchmarks/benchmarks/unit_style/mesh.py
new file mode 100644
index 0000000000..ed3aad1428
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/mesh.py
@@ -0,0 +1,187 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Benchmark tests for the iris.mesh module."""
+
+from copy import deepcopy
+
+import numpy as np
+
+from iris import mesh
+
+from .. import disable_repeat_between_setup
+from ..generate_data.stock import sample_mesh
+
+
+class UGridCommon:
+ """Run a generalised suite of benchmarks for any mesh object.
+
+ A base class running a generalised suite of benchmarks for any mesh object.
+ Object to be specified in a subclass.
+
+ ASV will run the benchmarks within this class for any subclasses.
+
+ ASV will not benchmark this class as setup() triggers a NotImplementedError.
+ (ASV has not yet released ABC/abstractmethod support - asv#838).
+
+ """
+
+ params = [
+ 6, # minimal cube-sphere
+ int(1e6), # realistic cube-sphere size
+ ]
+ param_names = ["number of faces"]
+
+ def setup(self, *params):
+ self.object = self.create()
+
+ def create(self):
+ raise NotImplementedError
+
+ def time_create(self, *params):
+ """Create an instance of the benchmarked object.
+
+ create() method is specified in the subclass.
+ """
+ self.create()
+
+
+class Connectivity(UGridCommon):
+ def setup(self, n_faces):
+ self.array = np.zeros([n_faces, 3], dtype=int)
+ super().setup(n_faces)
+
+ def create(self):
+ return mesh.Connectivity(indices=self.array, cf_role="face_node_connectivity")
+
+ def time_indices(self, n_faces):
+ _ = self.object.indices
+
+ def time_location_lengths(self, n_faces):
+ # Proofed against the Connectivity name change (633ed17).
+ if getattr(self.object, "src_lengths", False):
+ meth = self.object.src_lengths
+ else:
+ meth = self.object.location_lengths
+ _ = meth()
+
+ def time_validate_indices(self, n_faces):
+ self.object.validate_indices()
+
+
+@disable_repeat_between_setup
+class ConnectivityLazy(Connectivity):
+ """Lazy equivalent of :class:`Connectivity`."""
+
+ def setup(self, n_faces):
+ super().setup(n_faces)
+ self.array = self.object.lazy_indices()
+ self.object = self.create()
+
+
+class MeshXY(UGridCommon):
+ def setup(self, n_faces, lazy=False):
+ ####
+ # Steal everything from the sample mesh for benchmarking creation of a
+ # brand new mesh.
+ source_mesh = sample_mesh(
+ n_nodes=n_faces + 2,
+ n_edges=n_faces * 2,
+ n_faces=n_faces,
+ lazy_values=lazy,
+ )
+
+ def get_coords_and_axes(location):
+ return [
+ (source_mesh.coord(axis=axis, location=location), axis)
+ for axis in ("x", "y")
+ ]
+
+ self.mesh_kwargs = dict(
+ topology_dimension=source_mesh.topology_dimension,
+ node_coords_and_axes=get_coords_and_axes("node"),
+ connectivities=source_mesh.connectivities(),
+ edge_coords_and_axes=get_coords_and_axes("edge"),
+ face_coords_and_axes=get_coords_and_axes("face"),
+ )
+ ####
+
+ super().setup(n_faces)
+
+ self.face_node = self.object.face_node_connectivity
+ self.node_x = self.object.node_coords.node_x
+ # Kwargs for reuse in search and remove methods.
+ self.connectivities_kwarg = dict(cf_role="edge_node_connectivity")
+ self.coords_kwarg = dict(location="face")
+
+ # TODO: an opportunity for speeding up runtime if needed, since
+ # eq_object is not needed for all benchmarks. Just don't generate it
+ # within a benchmark - the execution time is large enough that it
+ # could be a significant portion of the benchmark - makes regressions
+ # smaller and could even pick up regressions in copying instead!
+ self.eq_object = deepcopy(self.object)
+
+ def create(self):
+ return mesh.MeshXY(**self.mesh_kwargs)
+
+ def time_add_connectivities(self, n_faces):
+ self.object.add_connectivities(self.face_node)
+
+ def time_add_coords(self, n_faces):
+ self.object.add_coords(node_x=self.node_x)
+
+ def time_connectivities(self, n_faces):
+ _ = self.object.connectivities(**self.connectivities_kwarg)
+
+ def time_coords(self, n_faces):
+ _ = self.object.coords(**self.coords_kwarg)
+
+ def time_eq(self, n_faces):
+ _ = self.object == self.eq_object
+
+ def time_remove_connectivities(self, n_faces):
+ self.object.remove_connectivities(**self.connectivities_kwarg)
+
+ def time_remove_coords(self, n_faces):
+ self.object.remove_coords(**self.coords_kwarg)
+
+
+@disable_repeat_between_setup
+class MeshXYLazy(MeshXY):
+ """Lazy equivalent of :class:`MeshXY`."""
+
+ def setup(self, n_faces, lazy=True):
+ super().setup(n_faces, lazy=lazy)
+
+
+class MeshCoord(UGridCommon):
+ # Add extra parameter value to match AuxCoord benchmarking.
+ params = UGridCommon.params + [1000]
+
+ def setup(self, n_faces, lazy=False):
+ self.mesh = sample_mesh(
+ n_nodes=n_faces + 2,
+ n_edges=n_faces * 2,
+ n_faces=n_faces,
+ lazy_values=lazy,
+ )
+
+ super().setup(n_faces)
+
+ def create(self):
+ return mesh.MeshCoord(mesh=self.mesh, location="face", axis="x")
+
+ def time_points(self, n_faces):
+ _ = self.object.points
+
+ def time_bounds(self, n_faces):
+ _ = self.object.bounds
+
+
+@disable_repeat_between_setup
+class MeshCoordLazy(MeshCoord):
+ """Lazy equivalent of :class:`MeshCoord`."""
+
+ def setup(self, n_faces, lazy=True):
+ super().setup(n_faces, lazy=lazy)
diff --git a/benchmarks/benchmarks/unit_style/metadata_manager_factory.py b/benchmarks/benchmarks/unit_style/metadata_manager_factory.py
new file mode 100644
index 0000000000..0af055fa82
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/metadata_manager_factory.py
@@ -0,0 +1,83 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Small-scope metadata manager factory benchmark tests."""
+
+from iris.common import (
+ AncillaryVariableMetadata,
+ BaseMetadata,
+ CellMeasureMetadata,
+ CoordMetadata,
+ CubeMetadata,
+ DimCoordMetadata,
+ metadata_manager_factory,
+)
+
+
+class MetadataManagerFactory__create:
+ params = [1, 10, 100]
+
+ def time_AncillaryVariableMetadata(self, n):
+ [metadata_manager_factory(AncillaryVariableMetadata) for _ in range(n)]
+
+ def time_BaseMetadata(self, n):
+ [metadata_manager_factory(BaseMetadata) for _ in range(n)]
+
+ def time_CellMeasureMetadata(self, n):
+ [metadata_manager_factory(CellMeasureMetadata) for _ in range(n)]
+
+ def time_CoordMetadata(self, n):
+ [metadata_manager_factory(CoordMetadata) for _ in range(n)]
+
+ def time_CubeMetadata(self, n):
+ [metadata_manager_factory(CubeMetadata) for _ in range(n)]
+
+ def time_DimCoordMetadata(self, n):
+ [metadata_manager_factory(DimCoordMetadata) for _ in range(n)]
+
+
+class MetadataManagerFactory:
+ def setup(self):
+ self.ancillary = metadata_manager_factory(AncillaryVariableMetadata)
+ self.base = metadata_manager_factory(BaseMetadata)
+ self.cell = metadata_manager_factory(CellMeasureMetadata)
+ self.coord = metadata_manager_factory(CoordMetadata)
+ self.cube = metadata_manager_factory(CubeMetadata)
+ self.dim = metadata_manager_factory(DimCoordMetadata)
+
+ def time_AncillaryVariableMetadata_fields(self):
+ self.ancillary.fields
+
+ def time_AncillaryVariableMetadata_values(self):
+ self.ancillary.values
+
+ def time_BaseMetadata_fields(self):
+ self.base.fields
+
+ def time_BaseMetadata_values(self):
+ self.base.values
+
+ def time_CellMeasuresMetadata_fields(self):
+ self.cell.fields
+
+ def time_CellMeasuresMetadata_values(self):
+ self.cell.values
+
+ def time_CoordMetadata_fields(self):
+ self.coord.fields
+
+ def time_CoordMetadata_values(self):
+ self.coord.values
+
+ def time_CubeMetadata_fields(self):
+ self.cube.fields
+
+ def time_CubeMetadata_values(self):
+ self.cube.values
+
+ def time_DimCoordMetadata_fields(self):
+ self.dim.fields
+
+ def time_DimCoordMetadata_values(self):
+ self.dim.values
diff --git a/benchmarks/benchmarks/unit_style/mixin.py b/benchmarks/benchmarks/unit_style/mixin.py
new file mode 100644
index 0000000000..92de5e7ad9
--- /dev/null
+++ b/benchmarks/benchmarks/unit_style/mixin.py
@@ -0,0 +1,78 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Small-scope CFVariableMixin benchmark tests."""
+
+import numpy as np
+
+from iris import coords
+from iris.common.metadata import AncillaryVariableMetadata
+
+LONG_NAME = "air temperature"
+STANDARD_NAME = "air_temperature"
+VAR_NAME = "air_temp"
+UNITS = "degrees"
+ATTRIBUTES = dict(a=1)
+DICT = dict(
+ standard_name=STANDARD_NAME,
+ long_name=LONG_NAME,
+ var_name=VAR_NAME,
+ units=UNITS,
+ attributes=ATTRIBUTES,
+)
+METADATA = AncillaryVariableMetadata(**DICT)
+TUPLE = tuple(DICT.values())
+
+
+class CFVariableMixin:
+ def setup(self):
+ data_1d = np.zeros(1000)
+
+ # These benchmarks are from a user perspective, so using a user-level
+ # subclass of CFVariableMixin to test behaviour. AncillaryVariable is
+ # the simplest so using that.
+ self.cfm_proxy = coords.AncillaryVariable(data_1d)
+ self.cfm_proxy.long_name = "test"
+
+ def time_get_long_name(self):
+ self.cfm_proxy.long_name
+
+ def time_set_long_name(self):
+ self.cfm_proxy.long_name = LONG_NAME
+
+ def time_get_standard_name(self):
+ self.cfm_proxy.standard_name
+
+ def time_set_standard_name(self):
+ self.cfm_proxy.standard_name = STANDARD_NAME
+
+ def time_get_var_name(self):
+ self.cfm_proxy.var_name
+
+ def time_set_var_name(self):
+ self.cfm_proxy.var_name = VAR_NAME
+
+ def time_get_units(self):
+ self.cfm_proxy.units
+
+ def time_set_units(self):
+ self.cfm_proxy.units = UNITS
+
+ def time_get_attributes(self):
+ self.cfm_proxy.attributes
+
+ def time_set_attributes(self):
+ self.cfm_proxy.attributes = ATTRIBUTES
+
+ def time_get_metadata(self):
+ self.cfm_proxy.metadata
+
+ def time_set_metadata__dict(self):
+ self.cfm_proxy.metadata = DICT
+
+ def time_set_metadata__tuple(self):
+ self.cfm_proxy.metadata = TUPLE
+
+ def time_set_metadata__metadata(self):
+ self.cfm_proxy.metadata = METADATA
diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py
new file mode 100644
index 0000000000..afc08ff6fa
--- /dev/null
+++ b/benchmarks/bm_runner.py
@@ -0,0 +1,660 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Argparse conveniences for executing common types of benchmark runs."""
+
+from abc import ABC, abstractmethod
+import argparse
+from datetime import datetime
+from importlib import import_module
+from os import environ
+from pathlib import Path
+import re
+import shlex
+import subprocess
+from tempfile import NamedTemporaryFile
+from textwrap import dedent
+from typing import Literal, Protocol
+
+# The threshold beyond which shifts are 'notable'. See `asv compare`` docs
+# for more.
+COMPARE_FACTOR = 1.2
+
+BENCHMARKS_DIR = Path(__file__).parent
+ROOT_DIR = BENCHMARKS_DIR.parent
+# Storage location for reports used in GitHub actions.
+GH_REPORT_DIR = ROOT_DIR.joinpath(".github", "workflows", "benchmark_reports")
+
+# Common ASV arguments for all run_types except `custom`.
+ASV_HARNESS = "run {posargs} --attribute rounds=4 --interleave-rounds --show-stderr"
+
+
+def echo(echo_string: str):
+ # Use subprocess for printing to reduce chance of printing out of sequence
+ # with the subsequent calls.
+ subprocess.run(["echo", f"BM_RUNNER DEBUG: {echo_string}"])
+
+
+def _subprocess_runner(args, asv=False, **kwargs):
+ # Avoid permanent modifications if the same arguments are used more than once.
+ args = args.copy()
+ kwargs = kwargs.copy()
+ if asv:
+ args.insert(0, "asv")
+ kwargs["cwd"] = BENCHMARKS_DIR
+ echo(" ".join(args))
+ kwargs.setdefault("check", True)
+ return subprocess.run(args, **kwargs)
+
+
+def _subprocess_runner_capture(args, **kwargs) -> str:
+ result = _subprocess_runner(args, capture_output=True, **kwargs)
+ return result.stdout.decode().rstrip()
+
+
+def _check_requirements(package: str) -> None:
+ try:
+ import_module(package)
+ except ImportError as exc:
+ message = (
+ f"No {package} install detected. Benchmarks can only "
+ f"be run in an environment including {package}."
+ )
+ raise Exception(message) from exc
+
+
+def _prep_data_gen_env() -> None:
+ """Create or access a separate, unchanging environment for generating test data."""
+ python_version = "3.12"
+ data_gen_var = "DATA_GEN_PYTHON"
+ if data_gen_var in environ:
+ echo("Using existing data generation environment.")
+ else:
+ echo("Setting up the data generation environment ...")
+ # Get Nox to build an environment for the `tests` session, but don't
+ # run the session. Will reuse a cached environment if appropriate.
+ _subprocess_runner(
+ [
+ "nox",
+ f"--noxfile={ROOT_DIR / 'noxfile.py'}",
+ "--session=tests",
+ "--install-only",
+ f"--python={python_version}",
+ ]
+ )
+ # Find the environment built above, set it to be the data generation
+ # environment.
+ data_gen_python = next(
+ (ROOT_DIR / ".nox").rglob(f"tests*/bin/python{python_version}")
+ ).resolve()
+ environ[data_gen_var] = str(data_gen_python)
+
+ def clone_resource(name: str, clone_source: str) -> Path:
+ resource_dir = data_gen_python.parents[1] / "resources"
+ resource_dir.mkdir(exist_ok=True)
+ clone_dir = resource_dir / name
+ if not clone_dir.is_dir():
+ _subprocess_runner(["git", "clone", clone_source, str(clone_dir)])
+ return clone_dir
+
+ echo("Installing Mule into data generation environment ...")
+ mule_dir = clone_resource("mule", "https://github.com/metomi/mule.git")
+ _subprocess_runner(
+ [
+ str(data_gen_python),
+ "-m",
+ "pip",
+ "install",
+ str(mule_dir / "mule"),
+ ]
+ )
+
+ test_data_var = "OVERRIDE_TEST_DATA_REPOSITORY"
+ if test_data_var not in environ:
+ echo("Installing iris-test-data into data generation environment ...")
+ test_data_dir = clone_resource(
+ "iris-test-data", "https://github.com/SciTools/iris-test-data.git"
+ )
+ environ[test_data_var] = str(test_data_dir / "test_data")
+
+ echo("Data generation environment ready.")
+
+
+def _setup_common() -> None:
+ _check_requirements("asv")
+ _check_requirements("nox")
+
+ _prep_data_gen_env()
+
+ echo("Setting up ASV ...")
+ _subprocess_runner(["machine", "--yes"], asv=True)
+
+ echo("Setup complete.")
+
+
+def _asv_compare(*commits: str, overnight_mode: bool = False) -> None:
+ """Run through a list of commits comparing each one to the next."""
+ commits = tuple(commit[:8] for commit in commits)
+ for i in range(len(commits) - 1):
+ before = commits[i]
+ after = commits[i + 1]
+ asv_command = shlex.split(
+ f"compare {before} {after} --factor={COMPARE_FACTOR} --split"
+ )
+
+ comparison = _subprocess_runner_capture(asv_command, asv=True)
+ echo(comparison)
+ shifts = _subprocess_runner_capture([*asv_command, "--only-changed"], asv=True)
+
+ if shifts or (not overnight_mode):
+ # For the overnight run: only post if there are shifts.
+ _gh_create_reports(after, comparison, shifts)
+
+
+def _gh_create_reports(commit_sha: str, results_full: str, results_shifts: str) -> None:
+ """If running under GitHub Actions: record the results in report(s).
+
+ Posting the reports is done by :func:`_gh_post_reports`, which must be run
+ within a separate action to comply with GHA's security limitations.
+ """
+ if "GITHUB_ACTIONS" not in environ:
+ # Only run when within GHA.
+ return
+
+ pr_number = environ.get("PR_NUMBER", None)
+ on_pull_request = pr_number is not None
+ run_id = environ["GITHUB_RUN_ID"]
+ repo = environ["GITHUB_REPOSITORY"]
+ gha_run_link = f"[`{run_id}`](https://github.com/{repo}/actions/runs/{run_id})"
+
+ GH_REPORT_DIR.mkdir(exist_ok=True)
+ commit_dir = GH_REPORT_DIR / commit_sha
+ commit_dir.mkdir()
+ command_path = commit_dir / "command.txt"
+ body_path = commit_dir / "body.txt"
+
+ performance_report = dedent(
+ (
+ """
+ # :stopwatch: Performance Benchmark Report: {commit_sha}
+
+
+ Performance shifts
+
+ ```
+ {results_shifts}
+ ```
+
+
+
+
+ Full benchmark results
+
+ ```
+ {results_full}
+ ```
+
+
+
+ Generated by GHA run {gha_run_link}
+ """
+ )
+ )
+ performance_report = performance_report.format(
+ commit_sha=commit_sha,
+ results_shifts=results_shifts,
+ results_full=results_full,
+ gha_run_link=gha_run_link,
+ )
+
+ if on_pull_request:
+ # Command to post the report as a comment on the active PR.
+ body_path.write_text(performance_report)
+ command = (
+ f"gh pr comment {pr_number} "
+ f"--body-file {body_path.absolute()} "
+ f"--repo {repo}"
+ )
+ command_path.write_text(command)
+
+ else:
+ # Command to post the report as new issue.
+ commit_msg = _subprocess_runner_capture(
+ f"git log {commit_sha}^! --oneline".split(" ")
+ )
+ # Intended for benchmarking commits on trunk - should include a PR
+ # number due to our squash policy.
+ pr_tag_match = re.search("#[0-9]*", commit_msg)
+
+ assignee = ""
+ pr_tag = "pull request number unavailable"
+ if pr_tag_match is not None:
+ pr_tag = pr_tag_match.group(0)
+
+ for login_type in ("author", "mergedBy"):
+ gh_query = f'.["{login_type}"]["login"]'
+ commandlist = shlex.split(
+ f"gh pr view {pr_tag[1:]} "
+ f"--json {login_type} -q '{gh_query}' "
+ f"--repo {repo}"
+ )
+ login = _subprocess_runner_capture(commandlist)
+
+ commandlist = [
+ "curl",
+ "-s",
+ f"https://api.github.com/users/{login}",
+ ]
+ login_info = _subprocess_runner_capture(commandlist)
+ is_user = '"type": "User"' in login_info
+ if is_user:
+ assignee = login
+ break
+
+ title = f"Performance Shift(s): `{commit_sha}`"
+ body = dedent(
+ (
+ f"""
+ Benchmark comparison has identified performance shifts at:
+
+ * commit {commit_sha} ({pr_tag}).
+
+
+ Please review the report below and
+ take corrective/congratulatory action as appropriate
+ :slightly_smiling_face:
+
+ """
+ )
+ )
+ body += performance_report
+ body_path.write_text(body)
+
+ command = (
+ "gh issue create "
+ f'--title "{title}" '
+ f"--body-file {body_path.absolute()} "
+ '--label "Bot" '
+ '--label "Type: Performance" '
+ f"--repo {repo}"
+ )
+ if assignee:
+ command += f" --assignee {assignee}"
+ command_path.write_text(command)
+
+
+def _gh_post_reports() -> None:
+ """If running under GitHub Actions: post pre-prepared benchmark reports.
+
+ Reports are prepared by :func:`_gh_create_reports`, which must be run
+ within a separate action to comply with GHA's security limitations.
+ """
+ if "GITHUB_ACTIONS" not in environ:
+ # Only run when within GHA.
+ return
+
+ commit_dirs = [x for x in GH_REPORT_DIR.iterdir() if x.is_dir()]
+ for commit_dir in commit_dirs:
+ command_path = commit_dir / "command.txt"
+ command = command_path.read_text()
+
+ # Security: only accept certain commands to run.
+ assert command.startswith(("gh issue create", "gh pr comment"))
+
+ _subprocess_runner(shlex.split(command))
+
+
+class _SubParserGenerator(ABC):
+ """Convenience for holding all the necessary argparse info in 1 place."""
+
+ name: str = NotImplemented
+ description: str = NotImplemented
+ epilog: str = NotImplemented
+
+ class _SubParsersType(Protocol):
+ """Duck typing since argparse._SubParsersAction is private."""
+
+ def add_parser(self, name, **kwargs) -> argparse.ArgumentParser: ...
+
+ def __init__(self, subparsers: _SubParsersType) -> None:
+ self.subparser = subparsers.add_parser(
+ self.name,
+ description=self.description,
+ epilog=self.epilog,
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+ self.add_arguments()
+ self.add_asv_arguments()
+ self.subparser.set_defaults(func=self.func)
+
+ @abstractmethod
+ def add_arguments(self) -> None:
+ """All custom self.subparser.add_argument() calls."""
+ _ = NotImplemented
+
+ def add_asv_arguments(self) -> None:
+ self.subparser.add_argument(
+ "asv_args",
+ nargs=argparse.REMAINDER,
+ help="Any number of arguments to pass down to the ASV benchmark command.",
+ )
+
+ @staticmethod
+ @abstractmethod
+ def func(args: argparse.Namespace):
+ """Return when the subparser is parsed.
+
+ `func` is then called, performing the user's selected sub-command.
+
+ """
+ _ = args
+ return NotImplemented
+
+
+class Overnight(_SubParserGenerator):
+ name = "overnight"
+ description = (
+ "Benchmarks all commits between the input **first_commit** to ``HEAD``, "
+ "comparing each to its parent for performance shifts. If running on "
+ "GitHub Actions: performance shift(s) will be reported in a new issue.\n"
+ "Designed for checking the previous 24 hours' commits, typically in a "
+ "scheduled script.\n"
+ "Uses `asv run`."
+ )
+ epilog = (
+ "e.g. python bm_runner.py overnight a1b23d4\n"
+ "e.g. python bm_runner.py overnight a1b23d4 --bench=regridding"
+ )
+
+ def add_arguments(self) -> None:
+ self.subparser.add_argument(
+ "first_commit",
+ type=str,
+ help="The first commit in the benchmarking commit sequence.",
+ )
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _setup_common()
+
+ commit_range = f"{args.first_commit}^^.."
+ # git rev-list --first-parent is the command ASV uses.
+ git_command = shlex.split(f"git rev-list --first-parent {commit_range}")
+ commit_string = _subprocess_runner_capture(git_command)
+ commit_list = commit_string.split("\n")
+
+ asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range))
+ try:
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
+ finally:
+ # Designed for long running - want to compare/post any valid
+ # results even if some are broken.
+ _asv_compare(*reversed(commit_list), overnight_mode=True)
+
+
+class Branch(_SubParserGenerator):
+ name = "branch"
+ description = (
+ "Performs the same operations as ``overnight``, but always on two commits "
+ "only - ``HEAD``, and ``HEAD``'s merge-base with the input "
+ "**base_branch**. If running on GitHub Actions: HEAD will be GitHub's "
+ "merge commit and merge-base will be the merge target. Performance "
+ "comparisons will be posted in a comment on the relevant pull request.\n"
+ "Designed "
+ "for testing if the active branch's changes cause performance shifts - "
+ "anticipating what would be caught by ``overnight`` once merged.\n\n"
+ "**For maximum accuracy, avoid using the machine that is running this "
+ "session. Run time could be >1 hour for the full benchmark suite.**\n"
+ "Uses `asv run`."
+ )
+ epilog = (
+ "e.g. python bm_runner.py branch upstream/main\n"
+ "e.g. python bm_runner.py branch upstream/main --bench=regridding"
+ )
+
+ def add_arguments(self) -> None:
+ self.subparser.add_argument(
+ "base_branch",
+ type=str,
+ help="A branch that has the merge-base with ``HEAD`` - ``HEAD`` will be benchmarked against that merge-base.",
+ )
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _setup_common()
+
+ git_command = shlex.split("git rev-parse HEAD")
+ head_sha = _subprocess_runner_capture(git_command)[:8]
+
+ git_command = shlex.split(f"git merge-base {head_sha} {args.base_branch}")
+ merge_base = _subprocess_runner_capture(git_command)[:8]
+
+ with NamedTemporaryFile("w") as hashfile:
+ hashfile.writelines([merge_base, "\n", head_sha])
+ hashfile.flush()
+ commit_range = f"HASHFILE:{hashfile.name}"
+ asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range))
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
+
+ _asv_compare(merge_base, head_sha)
+
+
+class _CSPerf(_SubParserGenerator, ABC):
+ """Common code used by both CPerf and SPerf."""
+
+ description = (
+ "Run the on-demand {} suite of benchmarks (part of the UK Met "
+ "Office NG-VAT project) for the ``HEAD`` of ``upstream/main`` only, "
+ "and publish the results to the input **publish_dir**, within a "
+ "unique subdirectory for this run.\n"
+ "Uses `asv run`."
+ )
+ epilog = (
+ "e.g. python bm_runner.py {0} my_publish_dir\n"
+ "e.g. python bm_runner.py {0} my_publish_dir --bench=regridding"
+ )
+
+ def add_arguments(self) -> None:
+ self.subparser.add_argument(
+ "publish_dir",
+ type=str,
+ help="HTML results will be published to a sub-dir in this dir.",
+ )
+
+ @staticmethod
+ def csperf(args: argparse.Namespace, run_type: Literal["cperf", "sperf"]) -> None:
+ _setup_common()
+
+ publish_dir = Path(args.publish_dir)
+ if not publish_dir.is_dir():
+ message = f"Input 'publish directory' is not a directory: {publish_dir}"
+ raise NotADirectoryError(message)
+ publish_subdir = (
+ publish_dir / f"{run_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+ )
+ publish_subdir.mkdir()
+
+ # Activate on demand benchmarks (C/SPerf are deactivated for
+ # 'standard' runs).
+ environ["ON_DEMAND_BENCHMARKS"] = "True"
+ commit_range = "upstream/main^!"
+
+ asv_command_str = (
+ ASV_HARNESS.format(posargs=commit_range) + f" --bench={run_type}"
+ )
+
+ # Only do a single round.
+ asv_command = shlex.split(re.sub(r"rounds=\d", "rounds=1", asv_command_str))
+ try:
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
+ except subprocess.CalledProcessError as err:
+ # C/SPerf benchmarks are much bigger than the CI ones:
+ # Don't fail the whole run if memory blows on 1 benchmark.
+ # ASV produces return code of 2 if the run includes crashes.
+ if err.returncode != 2:
+ raise
+
+ asv_command = shlex.split(f"publish {commit_range} --html-dir={publish_subdir}")
+ _subprocess_runner(asv_command, asv=True)
+
+ # Print completion message.
+ location = BENCHMARKS_DIR / ".asv"
+ echo(
+ f'New ASV results for "{run_type}".\n'
+ f'See "{publish_subdir}",'
+ f'\n or JSON files under "{location / "results"}".'
+ )
+
+
+class CPerf(_CSPerf):
+ name = "cperf"
+ description = _CSPerf.description.format("CPerf")
+ epilog = _CSPerf.epilog.format("cperf")
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _CSPerf.csperf(args, "cperf")
+
+
+class SPerf(_CSPerf):
+ name = "sperf"
+ description = _CSPerf.description.format("SPerf")
+ epilog = _CSPerf.epilog.format("sperf")
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _CSPerf.csperf(args, "sperf")
+
+
+class Custom(_SubParserGenerator):
+ name = "custom"
+ description = (
+ "Run ASV with the input **ASV sub-command**, without any preset "
+ "arguments - must all be supplied by the user. So just like running "
+ "ASV manually, with the convenience of re-using the runner's "
+ "scripted setup steps."
+ )
+ epilog = "e.g. python bm_runner.py custom continuous a1b23d4 HEAD --quick"
+
+ def add_arguments(self) -> None:
+ self.subparser.add_argument(
+ "asv_sub_command",
+ type=str,
+ help="The ASV command to run.",
+ )
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _setup_common()
+ _subprocess_runner([args.asv_sub_command, *args.asv_args], asv=True)
+
+
+class TrialRun(_SubParserGenerator):
+ name = "trialrun"
+ description = (
+ "Fast trial-run a given benchmark, to check it works : "
+ "in a provided or latest-lockfile environment, "
+ "with no repeats for accuracy of measurement."
+ )
+ epilog = (
+ "e.g. python bm_runner.py trialrun "
+ "MyBenchmarks.time_calc ${DATA_GEN_PYTHON}"
+ "\n\nNOTE: 'runpath' also replaces $DATA_GEN_PYTHON during the run."
+ )
+
+ def add_arguments(self) -> None:
+ self.subparser.add_argument(
+ "benchmark",
+ type=str,
+ help=(
+ "A benchmark name, possibly including wildcards, "
+ "as supported by the ASV '--bench' argument."
+ ),
+ )
+ self.subparser.add_argument(
+ "runpath",
+ type=str,
+ help=(
+ "A path to an existing python executable, "
+ "to completely bypass environment building."
+ ),
+ )
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ if args.runpath:
+ # Shortcut creation of a data-gen environment
+ # - which is also the trial-run env.
+ python_path = Path(args.runpath).resolve()
+ environ["DATA_GEN_PYTHON"] = str(python_path)
+ _setup_common()
+ # get path of data-gen environment, setup by previous call
+ python_path = Path(environ["DATA_GEN_PYTHON"])
+ # allow 'on-demand' benchmarks
+ environ["ON_DEMAND_BENCHMARKS"] = "1"
+ asv_command = [
+ "run",
+ "--bench",
+ args.benchmark,
+ # no repeats for timing accuracy
+ "--quick",
+ "--show-stderr",
+ # do not build a unique env : run test in data-gen environment
+ "--environment",
+ f"existing:{python_path}",
+ ] + args.asv_args
+ _subprocess_runner(asv_command, asv=True)
+
+
+class GhPost(_SubParserGenerator):
+ name = "_gh_post"
+ description = (
+ "Used by GitHub Actions to post benchmark reports that were prepared "
+ "during previous actions. Separated to comply with GitHub's security "
+ "requirements."
+ )
+ epilog = "Sole acceptable syntax: python bm_runner.py _gh_post"
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _gh_post_reports()
+
+ # No arguments permitted for this subclass:
+
+ def add_arguments(self) -> None:
+ pass
+
+ def add_asv_arguments(self) -> None:
+ pass
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Run the Iris performance benchmarks (using Airspeed Velocity).",
+ epilog=(
+ "More help is available within each sub-command."
+ "\n\nNOTE(1): a separate python environment is created to "
+ "construct test files.\n Set $DATA_GEN_PYTHON to avoid the cost "
+ "of this."
+ "\nNOTE(2): iris-test-data is downloaded and cached within the "
+ "data generation environment.\n Set "
+ "$OVERRIDE_TEST_DATA_REPOSITORY to avoid the cost of this."
+ "\nNOTE(3): test data is cached within the "
+ "benchmarks code directory, and uses a lot of disk space "
+ "of disk space (Gb).\n Set $BENCHMARK_DATA to specify where this "
+ "space can be safely allocated."
+ ),
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+ subparsers = parser.add_subparsers(required=True)
+
+ for gen in (Overnight, Branch, CPerf, SPerf, Custom, TrialRun, GhPost):
+ _ = gen(subparsers).subparser
+
+ parsed = parser.parse_args()
+ parsed.func(parsed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/benchmarks/custom_bms/README.md b/benchmarks/custom_bms/README.md
new file mode 100644
index 0000000000..eea85d74fe
--- /dev/null
+++ b/benchmarks/custom_bms/README.md
@@ -0,0 +1,11 @@
+# Iris custom benchmarks
+
+To be recognised by ASV, these benchmarks must be packaged and installed in
+line with the
+[ASV guidelines](https://asv.readthedocs.io/projects/asv-runner/en/latest/development/benchmark_plugins.html).
+This is achieved using the custom build in [install.py](./install.py).
+
+Installation is into the environment where the benchmarks are run (i.e. not
+the environment containing ASV + Nox, but the one built to the same
+specifications as the Tests environment). This is done via `build_command`
+in [asv.conf.json](../asv.conf.json).
diff --git a/benchmarks/custom_bms/install.py b/benchmarks/custom_bms/install.py
new file mode 100644
index 0000000000..59d27a0b43
--- /dev/null
+++ b/benchmarks/custom_bms/install.py
@@ -0,0 +1,55 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+"""Install Iris' custom benchmarks for detection by ASV.
+
+See the requirements for being detected as an ASV plugin:
+https://asv.readthedocs.io/projects/asv-runner/en/latest/development/benchmark_plugins.html
+"""
+
+from pathlib import Path
+import shutil
+from subprocess import run
+from tempfile import TemporaryDirectory
+
+this_dir = Path(__file__).parent
+
+
+def package_files(new_dir: Path) -> None:
+ """Package Iris' custom benchmarks for detection by ASV.
+
+ Parameters
+ ----------
+ new_dir : Path
+ The directory to package the custom benchmarks in.
+ """
+ asv_bench_iris = new_dir / "asv_bench_iris"
+ benchmarks = asv_bench_iris / "benchmarks"
+ benchmarks.mkdir(parents=True)
+ (asv_bench_iris / "__init__.py").touch()
+
+ for py_file in this_dir.glob("*.py"):
+ if py_file != Path(__file__):
+ shutil.copy2(py_file, benchmarks)
+
+ # Create this on the fly, as having multiple pyproject.toml files in 1
+ # project causes problems.
+ py_project = new_dir / "pyproject.toml"
+ py_project.write_text(
+ """
+ [project]
+ name = "asv_bench_iris"
+ version = "0.1"
+ """
+ )
+
+
+def main():
+ with TemporaryDirectory() as temp_dir:
+ package_files(Path(temp_dir))
+ run(["python", "-m", "pip", "install", temp_dir])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/benchmarks/custom_bms/tracemallocbench.py b/benchmarks/custom_bms/tracemallocbench.py
new file mode 100644
index 0000000000..486c67aeb9
--- /dev/null
+++ b/benchmarks/custom_bms/tracemallocbench.py
@@ -0,0 +1,196 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+
+"""Benchmark for growth in process resident memory, repeating for accuracy.
+
+Uses a modified version of the repeat logic in
+:class:`asv_runner.benchmarks.time.TimeBenchmark`.
+"""
+
+import re
+from timeit import Timer
+import tracemalloc
+from typing import Callable
+
+from asv_runner.benchmarks.time import TimeBenchmark, wall_timer
+
+
+class TracemallocBenchmark(TimeBenchmark):
+ """Benchmark for growth in process resident memory, repeating for accuracy.
+
+ Obviously limited as to what it actually measures : Relies on the current
+ process not having significant unused (de-allocated) memory when the
+ tested codeblock runs, and only reliable when the code allocates a
+ significant amount of new memory.
+
+ Benchmark operations prefixed with ``tracemalloc_`` or ``Tracemalloc`` will
+ use this benchmark class.
+
+ Inherits behaviour from :class:`asv_runner.benchmarks.time.TimeBenchmark`,
+ with modifications for memory measurement. See the below Attributes section
+ and https://asv.readthedocs.io/en/stable/writing_benchmarks.html#timing-benchmarks.
+
+ Attributes
+ ----------
+ Mostly identical to :class:`asv_runner.benchmarks.time.TimeBenchmark`. See
+ https://asv.readthedocs.io/en/stable/benchmarks.html#timing-benchmarks
+ Make sure to use the inherited ``repeat`` attribute if greater accuracy
+ is needed. Below are the attributes where inherited behaviour is
+ overridden.
+
+ number : int
+ The number of times the benchmarked operation will be called per
+ ``repeat``. Memory growth is measured after ALL calls -
+ i.e. `number` should make no difference to the result if the operation
+ has perfect garbage collection. The parent class's intelligent
+ modification of `number` is NOT inherited. A minimum value of ``1`` is
+ enforced.
+ warmup_time, sample_time, min_run_count, timer
+ Not used.
+ type : str = "tracemalloc"
+ The name of this benchmark type.
+ unit : str = "bytes"
+ The units of the measured metric (i.e. the growth in memory).
+
+ """
+
+ name_regex = re.compile("^(Tracemalloc[A-Z_].+)|(tracemalloc_.+)$")
+
+ param: tuple
+
+ def __init__(self, name: str, func: Callable, attr_sources: list) -> None:
+ """Initialize a new instance of `TracemallocBenchmark`.
+
+ Parameters
+ ----------
+ name : str
+ The name of the benchmark.
+ func : callable
+ The function to benchmark.
+ attr_sources : list
+ A list of objects from which to draw attributes.
+ """
+ super().__init__(name, func, attr_sources)
+ self.type = "tracemalloc"
+ self.unit = "bytes"
+
+ def _load_vars(self):
+ """Load benchmark variables from attribute sources.
+
+ Downstream handling of ``number`` is not the same as in the parent, so
+ need to make sure it is at least 1.
+ """
+ super()._load_vars()
+ self.number = max(1, self.number)
+
+ def run(self, *param: tuple) -> dict:
+ """Run the benchmark with the given parameters.
+
+ Downstream handling of ``param`` is not the same as in the parent, so
+ need to store it now.
+
+ Parameters
+ ----------
+ *param : tuple
+ The parameters to pass to the benchmark function.
+
+ Returns
+ -------
+ dict
+ A dictionary with the benchmark results. It contains the samples
+ taken, and "the number of times the function was called in each
+ sample" - for this benchmark that is always ``1`` to avoid the
+ parent class incorrectly modifying the results.
+ """
+ self.param = param
+ return super().run(*param)
+
+ def benchmark_timing(
+ self,
+ timer: Timer,
+ min_repeat: int,
+ max_repeat: int,
+ max_time: float,
+ warmup_time: float,
+ number: int,
+ min_run_count: int,
+ ) -> tuple[list[int], int]:
+ """Benchmark the timing of the function execution.
+
+ Heavily modified from the parent method
+ - Directly performs setup and measurement (parent used timeit).
+ - `number` used differently (see Parameters).
+ - No warmup phase.
+
+ Parameters
+ ----------
+ timer : timeit.Timer
+ Not used.
+ min_repeat : int
+ The minimum number of times to repeat the function execution.
+ max_repeat : int
+ The maximum number of times to repeat the function execution.
+ max_time : float
+ The maximum total time to spend on the benchmarking.
+ warmup_time : float
+ Not used.
+ number : int
+ The number of times the benchmarked operation will be called per
+ repeat. Memory growth is measured after ALL calls - i.e. `number`
+ should make no difference to the result if the operation
+ has perfect garbage collection. The parent class's intelligent
+ modification of `number` is NOT inherited.
+ min_run_count : int
+ Not used.
+
+ Returns
+ -------
+ list
+ A list of the measured memory growths, in bytes.
+ int = 1
+ Part of the inherited return signature. Must be 1 to avoid
+ the parent incorrectly modifying the results.
+ """
+ start_time = wall_timer()
+ samples: list[int] = []
+
+ def too_slow(num_samples) -> bool:
+ """Stop taking samples if limits exceeded.
+
+ Parameters
+ ----------
+ num_samples : int
+ The number of samples taken so far.
+
+ Returns
+ -------
+ bool
+ True if the benchmark should stop, False otherwise.
+ """
+ if num_samples < min_repeat:
+ return False
+ return wall_timer() > start_time + max_time
+
+ # Collect samples
+ while len(samples) < max_repeat:
+ self.redo_setup()
+ tracemalloc.start()
+ for _ in range(number):
+ __ = self.func(*self.param)
+ _, peak_mem_bytes = tracemalloc.get_traced_memory()
+ tracemalloc.stop()
+
+ samples.append(peak_mem_bytes)
+
+ if too_slow(len(samples)):
+ break
+
+ # ``number`` is not used in the same way as in the parent class. Must
+ # be returned as 1 to avoid parent incorrectly modifying the results.
+ return samples, 1
+
+
+# https://asv.readthedocs.io/projects/asv-runner/en/latest/development/benchmark_plugins.html
+export_as_benchmark = [TracemallocBenchmark]
diff --git a/codecov.yml b/codecov.yml
new file mode 100644
index 0000000000..a0efbb9997
--- /dev/null
+++ b/codecov.yml
@@ -0,0 +1,9 @@
+coverage:
+ # see https://docs.codecov.com/docs/commit-status
+ status:
+ project:
+ default:
+ target: auto
+ # coverage can drop by up to % while still posting success
+ threshold: 3%
+ patch: off
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000000..b6f52f58f9
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,51 @@
+SUBDIRS = src
+
+help:
+ @for i in $(SUBDIRS); do \
+ echo "make help in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) help); done
+
+html:
+ @for i in $(SUBDIRS); do \
+ echo "make html in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html); done
+
+html-noplot:
+ @for i in $(SUBDIRS); do \
+ echo "make html-noplot in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html-noplot); done
+
+html-noapi:
+ @for i in $(SUBDIRS); do \
+ echo "make html-noapi in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html-noapi); done
+
+html-quick:
+ @for i in $(SUBDIRS); do \
+ echo "make html-quick in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html-quick); done
+
+clean:
+ @for i in $(SUBDIRS); do \
+ echo "Clearing in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) clean); done
+
+doctest:
+ @for i in $(SUBDIRS); do \
+ echo "Running doctest in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) doctest); done
+
+linkcheck:
+ @for i in $(SUBDIRS); do \
+ echo "Running linkcheck in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) linkcheck); done
+
+show:
+ @for i in $(SUBDIRS); do \
+ echo "Running show in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) show); done
+
+livehtml:
+ @for i in $(SUBDIRS); do \
+ echo "Running show in $$i..."; \
+ (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) livehtml); done
\ No newline at end of file
diff --git a/docs/gallery_code/README.rst b/docs/gallery_code/README.rst
new file mode 100644
index 0000000000..85bf0552b4
--- /dev/null
+++ b/docs/gallery_code/README.rst
@@ -0,0 +1,28 @@
+.. _gallery_index:
+
+Gallery
+=======
+
+The gallery is divided into sections as described below. All entries
+show the code used to produce the example plot. Additionally there are links
+to download the code directly as source or as part of a
+`jupyter notebook `_,
+these links are at the bottom of the page.
+
+In order to successfully view the jupyter notebook locally so you may
+experiment with the code you will need an environment setup with the
+appropriate dependencies, see :ref:`installing_iris` for instructions.
+Ensure that ``iris-sample-data`` is installed as it is used in the gallery.
+Additionally ensure that you install ``jupyter``. The command to install both
+is::
+
+ conda install -c conda-forge iris-sample-data jupyter
+
+Once you have downloaded the notebooks (bottom of each gallery page),
+you may start the jupyter notebook via::
+
+ jupyter notebook
+
+If you wish to contribute to the gallery see the
+:ref:`contributing.documentation.gallery` section of the
+:ref:`contributing.documentation_full`.
diff --git a/docs/gallery_code/general/README.rst b/docs/gallery_code/general/README.rst
new file mode 100644
index 0000000000..3a48e7cd8e
--- /dev/null
+++ b/docs/gallery_code/general/README.rst
@@ -0,0 +1,3 @@
+General
+-------
+
diff --git a/docs/gallery_code/general/plot_SOI_filtering.py b/docs/gallery_code/general/plot_SOI_filtering.py
new file mode 100644
index 0000000000..4b256c894c
--- /dev/null
+++ b/docs/gallery_code/general/plot_SOI_filtering.py
@@ -0,0 +1,108 @@
+"""
+Applying a Filter to a Time-Series
+==================================
+
+This example demonstrates low pass filtering a time-series by applying a
+weighted running mean over the time dimension.
+
+The time-series used is the Darwin-only Southern Oscillation index (SOI),
+which is filtered using two different Lanczos filters, one to filter out
+time-scales of less than two years and one to filter out time-scales of
+less than 7 years.
+
+References
+----------
+ Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions.
+ Journal of Applied Meteorology, Vol 18, pp 1016-1022.
+
+ Trenberth K. E. (1984) Signal Versus Noise in the Southern Oscillation.
+ Monthly Weather Review, Vol 112, pp 326-332
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.plot as iplt
+
+
+def low_pass_weights(window, cutoff):
+ """Calculate weights for a low pass Lanczos filter.
+
+ Parameters
+ ----------
+ window : int
+ The length of the filter window.
+ cutoff : float
+ The cutoff frequency in inverse time steps.
+
+ """
+ order = ((window - 1) // 2) + 1
+ nwts = 2 * order + 1
+ w = np.zeros([nwts])
+ n = nwts // 2
+ w[n] = 2 * cutoff
+ k = np.arange(1.0, n)
+ sigma = np.sin(np.pi * k / n) * n / (np.pi * k)
+ firstfactor = np.sin(2.0 * np.pi * cutoff * k) / (np.pi * k)
+ w[n - 1 : 0 : -1] = firstfactor * sigma
+ w[n + 1 : -1] = firstfactor * sigma
+ return w[1:-1]
+
+
+def main():
+ # Load the monthly-valued Southern Oscillation Index (SOI) time-series.
+ fname = iris.sample_data_path("SOI_Darwin.nc")
+ soi = iris.load_cube(fname)
+
+ # Window length for filters.
+ window = 121
+
+ # Construct 2-year (24-month) and 7-year (84-month) low pass filters
+ # for the SOI data which is monthly.
+ wgts24 = low_pass_weights(window, 1.0 / 24.0)
+ wgts84 = low_pass_weights(window, 1.0 / 84.0)
+
+ # Apply each filter using the rolling_window method used with the weights
+ # keyword argument. A weighted sum is required because the magnitude of
+ # the weights are just as important as their relative sizes.
+ soi24 = soi.rolling_window("time", iris.analysis.SUM, len(wgts24), weights=wgts24)
+ soi84 = soi.rolling_window("time", iris.analysis.SUM, len(wgts84), weights=wgts84)
+
+ # Plot the SOI time series and both filtered versions.
+ plt.figure(figsize=(9, 4))
+ iplt.plot(
+ soi,
+ color="0.7",
+ linewidth=1.0,
+ linestyle="-",
+ alpha=1.0,
+ label="no filter",
+ )
+ iplt.plot(
+ soi24,
+ color="b",
+ linewidth=2.0,
+ linestyle="-",
+ alpha=0.7,
+ label="2-year filter",
+ )
+ iplt.plot(
+ soi84,
+ color="r",
+ linewidth=2.0,
+ linestyle="-",
+ alpha=0.7,
+ label="7-year filter",
+ )
+ plt.ylim([-4, 4])
+ plt.title("Southern Oscillation Index (Darwin Only)")
+ plt.xlabel("Time")
+ plt.ylabel("SOI")
+ plt.legend(fontsize=10)
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_anomaly_log_colouring.py b/docs/gallery_code/general/plot_anomaly_log_colouring.py
new file mode 100644
index 0000000000..cd11161041
--- /dev/null
+++ b/docs/gallery_code/general/plot_anomaly_log_colouring.py
@@ -0,0 +1,111 @@
+"""
+Colouring Anomaly Data With Logarithmic Scaling
+===============================================
+
+In this example, we need to plot anomaly data where the values have a
+"logarithmic" significance -- i.e. we want to give approximately equal ranges
+of colour between data values of, say, 1 and 10 as between 10 and 100.
+
+As the data range also contains zero, that obviously does not suit a simple
+logarithmic interpretation. However, values of less than a certain absolute
+magnitude may be considered "not significant", so we put these into a separate
+"zero band" which is plotted in white.
+
+To do this, we create a custom value mapping function (normalization) using
+the matplotlib Norm class :obj:`matplotlib.colors.SymLogNorm`.
+We use this to make a cell-filled pseudocolor plot with a colorbar.
+
+.. note::
+
+ By "pseudocolour", we mean that each data point is drawn as a "cell"
+ region on the plot, coloured according to its data value.
+ This is provided in Iris by the functions :meth:`iris.plot.pcolor` and
+ :meth:`iris.plot.pcolormesh`, which call the underlying matplotlib
+ functions of the same names (i.e., :obj:`matplotlib.pyplot.pcolor`
+ and :obj:`matplotlib.pyplot.pcolormesh`).
+ See also: https://en.wikipedia.org/wiki/False_color#Pseudocolor.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.colors as mcols
+import matplotlib.pyplot as plt
+
+import iris
+import iris.coord_categorisation
+import iris.plot as iplt
+
+
+def main():
+ # Load a sample air temperatures sequence.
+ file_path = iris.sample_data_path("E1_north_america.nc")
+ temperatures = iris.load_cube(file_path)
+
+ # Create a year-number coordinate from the time information.
+ iris.coord_categorisation.add_year(temperatures, "time")
+
+ # Create a sample anomaly field for one chosen year, by extracting that
+ # year and subtracting the time mean.
+ sample_year = 1982
+ year_temperature = temperatures.extract(iris.Constraint(year=sample_year))
+ time_mean = temperatures.collapsed("time", iris.analysis.MEAN)
+ anomaly = year_temperature - time_mean
+
+ # Construct a plot title string explaining which years are involved.
+ years = temperatures.coord("year").points
+ plot_title = "Temperature anomaly"
+ plot_title += "\n{} differences from {}-{} average.".format(
+ sample_year, years[0], years[-1]
+ )
+
+ # Define scaling levels for the logarithmic colouring.
+ minimum_log_level = 0.1
+ maximum_scale_level = 3.0
+
+ # Use a standard colour map which varies blue-white-red.
+ # For suitable options, see the 'Diverging colormaps' section in:
+ # https://matplotlib.org/stable/gallery/color/colormap_reference.html
+ anom_cmap = "bwr"
+
+ # Create a 'logarithmic' data normalization.
+ anom_norm = mcols.SymLogNorm(
+ linthresh=minimum_log_level,
+ linscale=0.01,
+ vmin=-maximum_scale_level,
+ vmax=maximum_scale_level,
+ )
+ # Setting "linthresh=minimum_log_level" makes its non-logarithmic
+ # data range equal to our 'zero band'.
+ # Setting "linscale=0.01" maps the whole zero band to the middle colour value
+ # (i.e., 0.5), which is the neutral point of a "diverging" style colormap.
+
+ # Create an Axes, specifying the map projection.
+ plt.axes(projection=ccrs.LambertConformal())
+
+ # Make a pseudocolour plot using this colour scheme.
+ mesh = iplt.pcolormesh(anomaly, cmap=anom_cmap, norm=anom_norm)
+
+ # Add a colourbar, with extensions to show handling of out-of-range values.
+ bar = plt.colorbar(mesh, orientation="horizontal", extend="both")
+
+ # Set some suitable fixed "logarithmic" colourbar tick positions.
+ tick_levels = [-3, -1, -0.3, 0.0, 0.3, 1, 3]
+ bar.set_ticks(tick_levels)
+
+ # Modify the tick labels so that the centre one shows "+/-".
+ tick_levels[3] = r"$\pm${:g}".format(minimum_log_level)
+ bar.set_ticklabels(tick_levels)
+
+ # Label the colourbar to show the units.
+ bar.set_label("[{}, log scale]".format(anomaly.units))
+
+ # Add coastlines and a title.
+ plt.gca().coastlines()
+ plt.title(plot_title)
+
+ # Display the result.
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_coriolis.py b/docs/gallery_code/general/plot_coriolis.py
new file mode 100644
index 0000000000..905108abfd
--- /dev/null
+++ b/docs/gallery_code/general/plot_coriolis.py
@@ -0,0 +1,84 @@
+"""
+Deriving the Coriolis Frequency Over the Globe
+==============================================
+
+This code computes the Coriolis frequency and stores it in a cube with
+associated metadata. It then plots the Coriolis frequency on an orthographic
+projection.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+from iris.coord_systems import GeogCS
+import iris.plot as iplt
+
+
+def main():
+ # Start with arrays for latitudes and longitudes, with a given number of
+ # coordinates in the arrays.
+ coordinate_points = 200
+ longitudes = np.linspace(-180.0, 180.0, coordinate_points)
+ latitudes = np.linspace(-90.0, 90.0, coordinate_points)
+ lon2d, lat2d = np.meshgrid(longitudes, latitudes)
+
+ # Omega is the Earth's rotation rate, expressed in radians per second
+ omega = 7.29e-5
+
+ # The data for our cube is the Coriolis frequency,
+ # `f = 2 * omega * sin(phi)`, which is computed for each grid point over
+ # the globe from the 2-dimensional latitude array.
+ data = 2.0 * omega * np.sin(np.deg2rad(lat2d))
+
+ # We now need to define a coordinate system for the plot.
+ # Here we'll use GeogCS; 6371229 is the radius of the Earth in metres.
+ cs = GeogCS(6371229)
+
+ # The Iris coords module turns the latitude list into a coordinate array.
+ # Coords then applies an appropriate standard name and unit to it.
+ lat_coord = iris.coords.DimCoord(
+ latitudes, standard_name="latitude", units="degrees", coord_system=cs
+ )
+
+ # The above process is repeated for the longitude coordinates.
+ lon_coord = iris.coords.DimCoord(
+ longitudes, standard_name="longitude", units="degrees", coord_system=cs
+ )
+
+ # Now we add bounds to our latitude and longitude coordinates.
+ # We want simple, contiguous bounds for our regularly-spaced coordinate
+ # points so we use the guess_bounds() method of the coordinate. For more
+ # complex coordinates, we could derive and set the bounds manually.
+ lat_coord.guess_bounds()
+ lon_coord.guess_bounds()
+
+ # Now we input our data array into the cube.
+ new_cube = iris.cube.Cube(
+ data,
+ standard_name="coriolis_parameter",
+ units="s-1",
+ dim_coords_and_dims=[(lat_coord, 0), (lon_coord, 1)],
+ )
+
+ # Now let's plot our cube, along with coastlines, a title and an
+ # appropriately-labelled colour bar:
+ ax = plt.axes(projection=ccrs.Orthographic())
+ ax.coastlines(resolution="10m")
+ mesh = iplt.pcolormesh(new_cube, cmap="seismic")
+ tick_levels = [-0.00012, -0.00006, 0.0, 0.00006, 0.00012]
+ plt.colorbar(
+ mesh,
+ orientation="horizontal",
+ label="s-1",
+ ticks=tick_levels,
+ format="%.1e",
+ )
+ plt.title("Coriolis frequency")
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_cross_section.py b/docs/gallery_code/general/plot_cross_section.py
new file mode 100644
index 0000000000..8e5bee85ed
--- /dev/null
+++ b/docs/gallery_code/general/plot_cross_section.py
@@ -0,0 +1,42 @@
+"""
+Cross Section Plots
+===================
+
+This example demonstrates contour plots of a cross-sectioned multi-dimensional
+cube which features a hybrid height vertical coordinate system.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load some test data.
+ fname = iris.sample_data_path("hybrid_height.nc")
+ theta = iris.load_cube(fname, "air_potential_temperature")
+
+ # Extract a single height vs longitude cross-section. N.B. This could
+ # easily be changed to extract a specific slice, or even to loop over *all*
+ # cross section slices.
+ cross_section = next(theta.slices(["grid_longitude", "model_level_number"]))
+
+ qplt.contourf(cross_section, coords=["grid_longitude", "altitude"], cmap="RdBu_r")
+ iplt.show()
+
+ # Now do the equivalent plot, only against model level
+ plt.figure()
+
+ qplt.contourf(
+ cross_section,
+ coords=["grid_longitude", "model_level_number"],
+ cmap="RdBu_r",
+ )
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_custom_aggregation.py b/docs/gallery_code/general/plot_custom_aggregation.py
new file mode 100644
index 0000000000..65fadfb473
--- /dev/null
+++ b/docs/gallery_code/general/plot_custom_aggregation.py
@@ -0,0 +1,96 @@
+"""
+Calculating a Custom Statistic
+==============================
+
+This example shows how to define and use a custom
+:class:`iris.analysis.Aggregator`, that provides a new statistical operator for
+use with cube aggregation functions such as :meth:`~iris.cube.Cube.collapsed`,
+:meth:`~iris.cube.Cube.aggregated_by` or
+:meth:`~iris.cube.Cube.rolling_window`.
+
+In this case, we have a 240-year sequence of yearly average surface temperature
+over North America, and we want to calculate in how many years these exceed a
+certain temperature over a spell of 5 years or more.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+from iris.analysis import Aggregator
+import iris.plot as iplt
+import iris.quickplot as qplt
+from iris.util import rolling_window
+
+
+# Define a function to perform the custom statistical operation.
+# Note: in order to meet the requirements of iris.analysis.Aggregator, it must
+# do the calculation over an arbitrary (given) data axis.
+def count_spells(data, threshold, axis, spell_length):
+ """Calculate the number of points in a sequence.
+
+ Function to calculate the number of points in a sequence where the value
+ has exceeded a threshold value for at least a certain number of timepoints.
+
+ Generalised to operate on multiple time sequences arranged on a specific
+ axis of a multidimensional array.
+
+ Parameters
+ ----------
+ data : array
+ Raw data to be compared with value threshold.
+ threshold : float
+ Threshold point for 'significant' datapoints.
+ axis : int
+ Number of the array dimension mapping the time sequences.
+ (Can also be negative, e.g. '-1' means last dimension).
+ spell_length : int
+ Number of consecutive times at which value > threshold to "count".
+
+ """
+ if axis < 0:
+ # just cope with negative axis numbers
+ axis += data.ndim
+ # Threshold the data to find the 'significant' points.
+ data_hits = data > threshold
+ # Make an array with data values "windowed" along the time axis.
+ hit_windows = rolling_window(data_hits, window=spell_length, axis=axis)
+ # Find the windows "full of True-s" (along the added 'window axis').
+ full_windows = np.all(hit_windows, axis=axis + 1)
+ # Count points fulfilling the condition (along the time axis).
+ spell_point_counts = np.sum(full_windows, axis=axis, dtype=int)
+ return spell_point_counts
+
+
+def main():
+ # Load the whole time-sequence as a single cube.
+ file_path = iris.sample_data_path("E1_north_america.nc")
+ cube = iris.load_cube(file_path)
+
+ # Make an aggregator from the user function.
+ SPELL_COUNT = Aggregator(
+ "spell_count", count_spells, units_func=lambda units, **kwargs: 1
+ )
+
+ # Define the parameters of the test.
+ threshold_temperature = 280.0
+ spell_years = 5
+
+ # Calculate the statistic.
+ warm_periods = cube.collapsed(
+ "time",
+ SPELL_COUNT,
+ threshold=threshold_temperature,
+ spell_length=spell_years,
+ )
+ warm_periods.rename("Number of 5-year warm spells in 240 years")
+
+ # Plot the results.
+ qplt.contourf(warm_periods, cmap="RdYlBu_r")
+ plt.gca().coastlines()
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_custom_file_loading.py b/docs/gallery_code/general/plot_custom_file_loading.py
new file mode 100644
index 0000000000..06de887614
--- /dev/null
+++ b/docs/gallery_code/general/plot_custom_file_loading.py
@@ -0,0 +1,339 @@
+"""
+Loading a Cube From a Custom File Format
+========================================
+
+This example shows how a custom text file can be loaded using the standard Iris
+load mechanism.
+
+The first stage in the process is to define an Iris :class:`FormatSpecification
+` for the file format. To create a
+format specification we need to define the following:
+
+* **format_name** - Some text that describes the format specification we are
+ creating
+* **file_element** - FileElement object describing the element which identifies
+ this FormatSpecification.
+
+ Possible values are:
+
+ * ``iris.io.format_picker.MagicNumber(n, o)``
+ The n bytes from the file at offset o.
+
+ * ``iris.io.format_picker.FileExtension()``
+ The file extension.
+
+ * ``iris.io.format_picker.LeadingLine()``
+ The first line of the file.
+
+* **file_element_value** - The value that the file_element should take if a file
+ matches this FormatSpecification
+* **handler** (optional) - A generator function that will be called when the file
+ specification has been identified. This function is provided by the user and
+ provides the means to parse the whole file. If no handler function is
+ provided, then identification is still possible without any handling.
+
+ The handler function must define the following arguments:
+
+ * list of filenames to process
+ * callback function - An optional function to filter/alter the Iris cubes
+ returned
+
+ The handler function must be defined as generator which yields each cube as
+ they are produced.
+
+* **priority** (optional) - Integer giving a priority for considering this
+ specification where higher priority means sooner consideration
+
+In the following example, the function :func:`load_NAME_III` has been defined
+to handle the loading of the raw data from the custom file format. This
+function is called from :func:`NAME_to_cube` which uses this data to create and
+yield Iris cubes.
+
+In the ``main()`` function the filenames are loaded via the ``iris.load_cube``
+function which automatically invokes the ``FormatSpecification`` we defined.
+The cube returned from the load function is then used to produce a plot.
+
+""" # noqa: D205, D212, D400
+
+import datetime
+
+from cf_units import CALENDAR_STANDARD, Unit
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.coord_systems as icoord_systems
+import iris.coords as icoords
+import iris.fileformats
+import iris.io.format_picker as format_picker
+import iris.plot as iplt
+
+UTC_format = "%H%M%Z %d/%m/%Y"
+
+FLOAT_HEADERS = [
+ "X grid origin",
+ "Y grid origin",
+ "X grid resolution",
+ "Y grid resolution",
+]
+INT_HEADERS = ["X grid size", "Y grid size", "Number of fields"]
+DATE_HEADERS = ["Run time", "Start of release", "End of release"]
+COLUMN_NAMES = [
+ "species_category",
+ "species",
+ "cell_measure",
+ "quantity",
+ "unit",
+ "z_level",
+ "time",
+]
+
+
+def load_NAME_III(filename):
+ """Load the Met Office's NAME III grid output files.
+
+ Loads the Met Office's NAME III grid output files returning headers, column
+ definitions and data arrays as 3 separate lists.
+
+ """
+ # Loading a file gives a generator of lines which can be progressed using
+ # the next() function. This will come in handy as we wish to progress
+ # through the file line by line.
+ with open(filename) as file_handle:
+ # Define a dictionary which can hold the header metadata for this file.
+ headers = {}
+
+ # Skip the NAME header of the file which looks something like
+ # 'NAME III (version X.X.X)'.
+ next(file_handle)
+
+ # Read the next 16 lines of header information, putting the form
+ # "header name: header value" into a dictionary.
+ for _ in range(16):
+ header_name, header_value = next(file_handle).split(":")
+
+ # Strip off any spurious space characters in the header name and
+ # value.
+ header_name = header_name.strip()
+ header_value = header_value.strip()
+
+ # Cast some headers into floats or integers if they match a given
+ # header name.
+ if header_name in FLOAT_HEADERS:
+ header_value = float(header_value)
+ elif header_name in INT_HEADERS:
+ header_value = int(header_value)
+ elif header_name in DATE_HEADERS:
+ # convert the time to python datetimes
+ header_value = datetime.datetime.strptime(header_value, UTC_format)
+
+ headers[header_name] = header_value
+
+ # Skip the next blank line in the file.
+ next(file_handle)
+
+ # Read the next 7 lines of column definitions.
+ column_headings = {}
+ for column_header_name in COLUMN_NAMES:
+ column_headings[column_header_name] = [
+ col.strip() for col in next(file_handle).split(",")
+ ][:-1]
+
+ # Convert the time to python datetimes.
+ new_time_column_header = []
+ for i, t in enumerate(column_headings["time"]):
+ # The first 4 columns aren't time at all, so don't convert them to
+ # datetimes.
+ if i >= 4:
+ t = datetime.datetime.strptime(t, UTC_format)
+ new_time_column_header.append(t)
+ column_headings["time"] = new_time_column_header
+
+ # Skip the blank line after the column headers.
+ next(file_handle)
+
+ # Make a list of data arrays to hold the data for each column.
+ data_shape = (headers["Y grid size"], headers["X grid size"])
+ data_arrays = [
+ np.zeros(data_shape, dtype=np.float32)
+ for i in range(headers["Number of fields"])
+ ]
+
+ # Iterate over the remaining lines which represent the data in a column
+ # form.
+ for line in file_handle:
+ # Split the line by comma, removing the last empty column caused by
+ # the trailing comma.
+ vals = line.split(",")[:-1]
+
+ # Cast the x and y grid positions to floats and convert them to
+ # zero based indices (the numbers are 1 based grid positions where
+ # 0.5 represents half a grid point.)
+ x = int(float(vals[0]) - 1.5)
+ y = int(float(vals[1]) - 1.5)
+
+ # Populate the data arrays (i.e. all columns but the leading 4).
+ for i, data_array in enumerate(data_arrays):
+ data_array[y, x] = float(vals[i + 4])
+
+ return headers, column_headings, data_arrays
+
+
+def NAME_to_cube(filenames, callback):
+ """Return a generator of cubes given a list of filenames and a callback."""
+ for filename in filenames:
+ header, column_headings, data_arrays = load_NAME_III(filename)
+
+ for i, data_array in enumerate(data_arrays):
+ # turn the dictionary of column headers with a list of header
+ # information for each field into a dictionary of headers for just
+ # this field. Ignore the first 4 columns of grid position (data was
+ # located with the data array).
+ field_headings = dict((k, v[i + 4]) for k, v in column_headings.items())
+
+ # make an cube
+ cube = iris.cube.Cube(data_array)
+
+ # define the name and unit
+ name = "%s %s" % (
+ field_headings["species"],
+ field_headings["quantity"],
+ )
+ name = name.upper().replace(" ", "_")
+ cube.rename(name)
+ # Some units are badly encoded in the file, fix this by putting a
+ # space in between. (if gs is not found, then the string will be
+ # returned unchanged)
+ cube.units = field_headings["unit"].replace("gs", "g s")
+
+ # define and add the singular coordinates of the field (flight
+ # level, time etc.)
+ cube.add_aux_coord(
+ icoords.AuxCoord(
+ field_headings["z_level"],
+ long_name="flight_level",
+ units="1",
+ )
+ )
+
+ # define the time unit and use it to serialise the datetime for the
+ # time coordinate
+ time_unit = Unit("hours since epoch", calendar=CALENDAR_STANDARD)
+ time_coord = icoords.AuxCoord(
+ time_unit.date2num(field_headings["time"]),
+ standard_name="time",
+ units=time_unit,
+ )
+ cube.add_aux_coord(time_coord)
+
+ # build a coordinate system which can be referenced by latitude and
+ # longitude coordinates
+ lat_lon_coord_system = icoord_systems.GeogCS(6371229)
+
+ # build regular latitude and longitude coordinates which have
+ # bounds
+ start = header["X grid origin"] + header["X grid resolution"]
+ step = header["X grid resolution"]
+ count = header["X grid size"]
+ pts = start + np.arange(count, dtype=np.float32) * step
+ lon_coord = icoords.DimCoord(
+ pts,
+ standard_name="longitude",
+ units="degrees",
+ coord_system=lat_lon_coord_system,
+ )
+ lon_coord.guess_bounds()
+
+ start = header["Y grid origin"] + header["Y grid resolution"]
+ step = header["Y grid resolution"]
+ count = header["Y grid size"]
+ pts = start + np.arange(count, dtype=np.float32) * step
+ lat_coord = icoords.DimCoord(
+ pts,
+ standard_name="latitude",
+ units="degrees",
+ coord_system=lat_lon_coord_system,
+ )
+ lat_coord.guess_bounds()
+
+ # add the latitude and longitude coordinates to the cube, with
+ # mappings to data dimensions
+ cube.add_dim_coord(lat_coord, 0)
+ cube.add_dim_coord(lon_coord, 1)
+
+ # implement standard iris callback capability. Although callbacks
+ # are not used in this example, the standard mechanism for a custom
+ # loader to implement a callback is shown:
+ cube = iris.io.run_callback(
+ callback, cube, [header, field_headings, data_array], filename
+ )
+
+ # yield the cube created (the loop will continue when the next()
+ # element is requested)
+ yield cube
+
+
+# Create a format_picker specification of the NAME file format giving it a
+# priority greater than the built in NAME loader.
+_NAME_III_spec = format_picker.FormatSpecification(
+ "Name III",
+ format_picker.LeadingLine(),
+ lambda line: line.startswith(b"NAME III"),
+ NAME_to_cube,
+ priority=6,
+)
+
+# Register the NAME loader with iris
+iris.fileformats.FORMAT_AGENT.add_spec(_NAME_III_spec)
+
+
+# ---------------------------------------------
+# | Using the new loader |
+# ---------------------------------------------
+
+
+def main():
+ fname = iris.sample_data_path("NAME_output.txt")
+
+ boundary_volc_ash_constraint = iris.Constraint(
+ "VOLCANIC_ASH_AIR_CONCENTRATION", flight_level="From FL000 - FL200"
+ )
+
+ # Callback shown as None to illustrate where a cube-level callback function
+ # would be used if required
+ cube = iris.load_cube(fname, boundary_volc_ash_constraint, callback=None)
+
+ # draw contour levels for the data (the top level is just a catch-all)
+ levels = (0.0002, 0.002, 0.004, 1e10)
+ cs = iplt.contourf(
+ cube,
+ levels=levels,
+ colors=("#80ffff", "#939598", "#e00404"),
+ )
+
+ # draw a black outline at the lowest contour to highlight affected areas
+ iplt.contour(cube, levels=(levels[0], 100), colors="black")
+
+ # set an extent and a background image for the map
+ ax = plt.gca()
+ ax.set_extent((-90, 20, 20, 75))
+ ax.stock_img("ne_shaded")
+
+ # make a legend, with custom labels, for the coloured contour set
+ artists, _ = cs.legend_elements()
+ labels = [
+ r"$%s < x \leq %s$" % (levels[0], levels[1]),
+ r"$%s < x \leq %s$" % (levels[1], levels[2]),
+ r"$x > %s$" % levels[2],
+ ]
+ ax.legend(artists, labels, title="Ash concentration / g m-3", loc="upper left")
+
+ time = cube.coord("time")
+ time_date = time.units.num2date(time.points[0]).strftime(UTC_format)
+ plt.title("Volcanic ash concentration forecast\nvalid at %s" % time_date)
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_global_map.py b/docs/gallery_code/general/plot_global_map.py
new file mode 100644
index 0000000000..60ac200a43
--- /dev/null
+++ b/docs/gallery_code/general/plot_global_map.py
@@ -0,0 +1,37 @@
+"""
+Quickplot of a 2D Cube on a Map
+===============================
+
+This example demonstrates a contour plot of global air temperature. The plot
+title and the labels for the axes are automatically derived from the metadata.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ fname = iris.sample_data_path("air_temp.pp")
+ temperature = iris.load_cube(fname)
+
+ # Plot #1: contourf with axes longitude from -180 to 180
+ plt.figure(figsize=(12, 5))
+ plt.subplot(121)
+ qplt.contourf(temperature, 15)
+ plt.gca().coastlines()
+
+ # Plot #2: contourf with axes longitude from 0 to 360
+ proj = ccrs.PlateCarree(central_longitude=-180.0)
+ plt.subplot(122, projection=proj)
+ qplt.contourf(temperature, 15)
+ plt.gca().coastlines()
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_inset.py b/docs/gallery_code/general/plot_inset.py
new file mode 100644
index 0000000000..5edd375743
--- /dev/null
+++ b/docs/gallery_code/general/plot_inset.py
@@ -0,0 +1,69 @@
+"""
+Test Data Showing Inset Plots
+=============================
+
+This example demonstrates the use of a single 3D data cube with time, latitude
+and longitude dimensions to plot a temperature series for a single latitude
+coordinate, with an inset plot of the data region.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ cube1 = iris.load_cube(iris.sample_data_path("ostia_monthly.nc"))
+ # Slice into cube to retrieve data for the inset map showing the
+ # data region
+ region = cube1[-1, :, :]
+ # Average over latitude to reduce cube to 1 dimension
+ plot_line = region.collapsed("latitude", iris.analysis.MEAN)
+
+ # Open a window for plotting
+ fig = plt.figure()
+ # Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
+ ax_main = fig.add_subplot(1, 1, 1)
+ # Produce a quick plot of the 1D cube
+ qplt.plot(plot_line)
+
+ # Set x limits to match the data
+ ax_main.set_xlim(0, plot_line.coord("longitude").points.max())
+ # Adjust the y limits so that the inset map won't clash with main plot
+ ax_main.set_ylim(294, 310)
+ ax_main.set_title("Meridional Mean Temperature")
+ # Add grid lines
+ ax_main.grid()
+
+ # Add a second set of axes specifying the fractional coordinates within
+ # the figure with bottom left corner at x=0.55, y=0.58 with width
+ # 0.3 and height 0.25.
+ # Also specify the projection
+ ax_sub = fig.add_axes(
+ [0.55, 0.58, 0.3, 0.25],
+ projection=ccrs.Mollweide(central_longitude=180),
+ )
+
+ # Use iris.plot (iplt) here so colour bar properties can be specified
+ # Also use a sequential colour scheme to reduce confusion for those with
+ # colour-blindness
+ iplt.pcolormesh(region, cmap="Blues")
+ # Manually set the orientation and tick marks on your colour bar
+ ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
+ plt.colorbar(orientation="horizontal", ticks=ticklist)
+ ax_sub.set_title("Data Region")
+ # Add coastlines
+ ax_sub.coastlines()
+ # request to show entire map, using the colour mesh on the data region only
+ ax_sub.set_global()
+
+ qplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_lineplot_with_legend.py b/docs/gallery_code/general/plot_lineplot_with_legend.py
new file mode 100644
index 0000000000..d1b3acd912
--- /dev/null
+++ b/docs/gallery_code/general/plot_lineplot_with_legend.py
@@ -0,0 +1,48 @@
+"""
+Multi-Line Temperature Profile Plot
+===================================
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ fname = iris.sample_data_path("air_temp.pp")
+
+ # Load exactly one cube from the given file.
+ temperature = iris.load_cube(fname)
+
+ # We only want a small number of latitudes, so filter some out
+ # using "extract".
+ temperature = temperature.extract(
+ iris.Constraint(latitude=lambda cell: 68 <= cell < 78)
+ )
+
+ for cube in temperature.slices("longitude"):
+ # Create a string label to identify this cube (i.e. latitude: value).
+ cube_label = "latitude: %s" % cube.coord("latitude").points[0]
+
+ # Plot the cube, and associate it with a label.
+ qplt.plot(cube, label=cube_label)
+
+ # Add the legend with 2 columns.
+ plt.legend(ncol=2)
+
+ # Put a grid on the plot.
+ plt.grid(True)
+
+ # Tell matplotlib not to extend the plot axes range to nicely
+ # rounded numbers.
+ plt.axis("tight")
+
+ # Finally, show it.
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_polar_stereo.py b/docs/gallery_code/general/plot_polar_stereo.py
new file mode 100644
index 0000000000..99abbd0ae0
--- /dev/null
+++ b/docs/gallery_code/general/plot_polar_stereo.py
@@ -0,0 +1,28 @@
+"""
+Example of a Polar Stereographic Plot
+=====================================
+
+Demonstrates plotting data that are defined on a polar stereographic
+projection.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ file_path = iris.sample_data_path("toa_brightness_stereographic.nc")
+ cube = iris.load_cube(file_path)
+ qplt.contourf(cube)
+ ax = plt.gca()
+ ax.coastlines()
+ ax.gridlines()
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_polynomial_fit.py b/docs/gallery_code/general/plot_polynomial_fit.py
new file mode 100644
index 0000000000..37cc4e283b
--- /dev/null
+++ b/docs/gallery_code/general/plot_polynomial_fit.py
@@ -0,0 +1,54 @@
+"""
+Fitting a Polynomial
+====================
+
+This example demonstrates computing a polynomial fit to 1D data from an Iris
+cube, adding the fit to the cube's metadata, and plotting both the 1D data and
+the fit.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.quickplot as qplt
+
+
+def main():
+ # Load some test data.
+ fname = iris.sample_data_path("A1B_north_america.nc")
+ cube = iris.load_cube(fname)
+
+ # Extract a single time series at a latitude and longitude point.
+ location = next(cube.slices(["time"]))
+
+ # Calculate a polynomial fit to the data at this time series.
+ x_points = location.coord("time").points
+ y_points = location.data
+ degree = 2
+
+ p = np.polyfit(x_points, y_points, degree)
+ y_fitted = np.polyval(p, x_points)
+
+ # Add the polynomial fit values to the time series to take
+ # full advantage of Iris plotting functionality.
+ long_name = "degree_{}_polynomial_fit_of_{}".format(degree, cube.name())
+ fit = iris.coords.AuxCoord(y_fitted, long_name=long_name, units=location.units)
+ location.add_aux_coord(fit, 0)
+
+ qplt.plot(location.coord("time"), location, label="data")
+ qplt.plot(
+ location.coord("time"),
+ location.coord(long_name),
+ "g-",
+ label="polynomial fit",
+ )
+ plt.legend(loc="best")
+ plt.title("Trend of US air temperature over time")
+
+ qplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_projections_and_annotations.py b/docs/gallery_code/general/plot_projections_and_annotations.py
new file mode 100644
index 0000000000..6e8ba5a5af
--- /dev/null
+++ b/docs/gallery_code/general/plot_projections_and_annotations.py
@@ -0,0 +1,140 @@
+"""
+Plotting in Different Projections
+=================================
+
+This example shows how to overlay data and graphics in different projections,
+demonstrating various features of Iris, Cartopy and matplotlib.
+
+We wish to overlay two datasets, defined on different rotated-pole grids.
+To display both together, we make a pseudocoloured plot of the first, overlaid
+with contour lines from the second.
+We also add some lines and text annotations drawn in various projections.
+
+We plot these over a specified region, in two different map projections.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.plot as iplt
+
+# Define a Cartopy 'ordinary' lat-lon coordinate reference system.
+crs_latlon = ccrs.PlateCarree()
+
+
+def make_plot(projection_name, projection_crs):
+ # Create a matplotlib Figure.
+ plt.figure()
+
+ # Add a matplotlib Axes, specifying the required display projection.
+ # NOTE: specifying 'projection' (a "cartopy.crs.Projection") makes the
+ # resulting Axes a "cartopy.mpl.geoaxes.GeoAxes", which supports plotting
+ # in different coordinate systems.
+ ax = plt.axes(projection=projection_crs)
+
+ # Set display limits to include a set region of latitude * longitude.
+ # (Note: Cartopy-specific).
+ ax.set_extent((-80.0, 20.0, 10.0, 80.0), crs=crs_latlon)
+
+ # Add coastlines and meridians/parallels (Cartopy-specific).
+ ax.coastlines(linewidth=0.75, color="navy")
+ ax.gridlines(crs=crs_latlon, linestyle="-")
+
+ # Plot the first dataset as a pseudocolour filled plot.
+ maindata_filepath = iris.sample_data_path("rotated_pole.nc")
+ main_data = iris.load_cube(maindata_filepath)
+ # NOTE: iplt.pcolormesh calls "pyplot.pcolormesh", passing in a coordinate
+ # system with the 'transform' keyword: This enables the Axes (a cartopy
+ # GeoAxes) to reproject the plot into the display projection.
+ iplt.pcolormesh(main_data, cmap="RdBu_r")
+
+ # Overplot the other dataset (which has a different grid), as contours.
+ overlay_filepath = iris.sample_data_path("space_weather.nc")
+ overlay_data = iris.load_cube(overlay_filepath, "total electron content")
+ # NOTE: as above, "iris.plot.contour" calls "pyplot.contour" with a
+ # 'transform' keyword, enabling Cartopy reprojection.
+ iplt.contour(overlay_data, 20, linewidths=2.0, colors="darkgreen", linestyles="-")
+
+ # Draw a high resolution margin line, inset from the pcolormesh border.
+ # First calculate rectangle corners, 7% in from each corner of the data.
+ x_coord, y_coord = main_data.coord(axis="x"), main_data.coord(axis="y")
+ x_start, x_end = np.min(x_coord.points), np.max(x_coord.points)
+ y_start, y_end = np.min(y_coord.points), np.max(y_coord.points)
+ margin = 0.07
+ margin_fractions = np.array([margin, 1.0 - margin])
+ x_lower, x_upper = x_start + (x_end - x_start) * margin_fractions
+ y_lower, y_upper = y_start + (y_end - y_start) * margin_fractions
+ steps = np.linspace(0, 1)
+ zeros, ones = np.zeros(steps.size), np.ones(steps.size)
+ x_delta, y_delta = (x_upper - x_lower), (y_upper - y_lower)
+ x_points = x_lower + x_delta * np.concatenate((steps, ones, steps[::-1], zeros))
+ y_points = y_lower + y_delta * np.concatenate((zeros, steps, ones, steps[::-1]))
+ # Get the Iris coordinate system of the X coordinate (Y should be the same).
+ cs_data1 = x_coord.coord_system
+ # Construct an equivalent Cartopy coordinate reference system ("crs").
+ crs_data1 = cs_data1.as_cartopy_crs()
+ # Draw the rectangle in this crs, with matplotlib "pyplot.plot".
+ # NOTE: the 'transform' keyword specifies a non-display coordinate system
+ # for the plot points (as used by the "iris.plot" functions).
+ plt.plot(
+ x_points,
+ y_points,
+ transform=crs_data1,
+ linewidth=2.0,
+ color="white",
+ linestyle="--",
+ )
+
+ # Mark some particular places with a small circle and a name label...
+ # Define some test points with latitude and longitude coordinates.
+ city_data = [
+ ("London", 51.5072, 0.1275),
+ ("Halifax, NS", 44.67, -63.61),
+ ("Reykjavik", 64.1333, -21.9333),
+ ]
+ # Place a single marker point and a text annotation at each place.
+ for name, lat, lon in city_data:
+ plt.plot(
+ lon,
+ lat,
+ marker="o",
+ markersize=7.0,
+ markeredgewidth=2.5,
+ markerfacecolor="black",
+ markeredgecolor="white",
+ transform=crs_latlon,
+ )
+ # NOTE: the "plt.annotate call" does not have a "transform=" keyword,
+ # so for this one we transform the coordinates with a Cartopy call.
+ at_x, at_y = ax.projection.transform_point(lon, lat, src_crs=crs_latlon)
+ plt.annotate(
+ name,
+ xy=(at_x, at_y),
+ xytext=(30, 20),
+ textcoords="offset points",
+ color="black",
+ backgroundcolor="white",
+ size="large",
+ arrowprops=dict(arrowstyle="->", color="white", linewidth=2.5),
+ )
+
+ # Add a title, and display.
+ plt.title(
+ "A pseudocolour plot on the {} projection,\nwith overlaid contours.".format(
+ projection_name
+ )
+ )
+ iplt.show()
+
+
+def main():
+ # Demonstrate with two different display projections.
+ make_plot("Equidistant Cylindrical", ccrs.PlateCarree())
+ make_plot("North Polar Stereographic", ccrs.NorthPolarStereo())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_rotated_pole_mapping.py b/docs/gallery_code/general/plot_rotated_pole_mapping.py
new file mode 100644
index 0000000000..e9e3656184
--- /dev/null
+++ b/docs/gallery_code/general/plot_rotated_pole_mapping.py
@@ -0,0 +1,65 @@
+"""
+Rotated Pole Mapping
+====================
+
+This example uses several visualisation methods to achieve an array of
+differing images, including:
+
+* Visualisation of point based data
+* Contouring of point based data
+* Block plot of contiguous bounded data
+* Non native projection and a Natural Earth shaded relief image underlay
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+
+import iris
+import iris.analysis.cartography
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load some test data.
+ fname = iris.sample_data_path("rotated_pole.nc")
+ air_pressure = iris.load_cube(fname)
+
+ # Plot #1: Point plot showing data values & a colorbar
+ plt.figure()
+ points = qplt.points(air_pressure, c=air_pressure.data)
+ cb = plt.colorbar(points, orientation="horizontal")
+ cb.set_label(air_pressure.units)
+ plt.gca().coastlines()
+ iplt.show()
+
+ # Plot #2: Contourf of the point based data
+ plt.figure()
+ qplt.contourf(air_pressure, 15)
+ plt.gca().coastlines()
+ iplt.show()
+
+ # Plot #3: Contourf overlaid by coloured point data
+ plt.figure()
+ qplt.contourf(air_pressure)
+ iplt.points(air_pressure, c=air_pressure.data)
+ plt.gca().coastlines()
+ iplt.show()
+
+ # For the purposes of this example, add some bounds to the latitude
+ # and longitude
+ air_pressure.coord("grid_latitude").guess_bounds()
+ air_pressure.coord("grid_longitude").guess_bounds()
+
+ # Plot #4: Block plot
+ plt.figure()
+ plt.axes(projection=ccrs.PlateCarree())
+ iplt.pcolormesh(air_pressure)
+ plt.gca().stock_img()
+ plt.gca().coastlines()
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/general/plot_zonal_means.py b/docs/gallery_code/general/plot_zonal_means.py
new file mode 100644
index 0000000000..d4ec1eb1fc
--- /dev/null
+++ b/docs/gallery_code/general/plot_zonal_means.py
@@ -0,0 +1,91 @@
+"""
+Zonal Mean Diagram of Air Temperature
+=====================================
+
+This example demonstrates aligning a linear plot and a cartographic plot
+using Matplotlib.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+import numpy as np
+
+import iris
+from iris.analysis import MEAN
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Loads air_temp.pp and "collapses" longitude into a single, average value.
+ fname = iris.sample_data_path("air_temp.pp")
+ temperature = iris.load_cube(fname)
+ collapsed_temp = temperature.collapsed("longitude", MEAN)
+
+ # Set y-axes with -90 and 90 limits and steps of 15 per tick.
+ start, stop, step = -90, 90, 15
+ yticks = np.arange(start, stop + step, step)
+ ylim = [start, stop]
+
+ # Plot "temperature" on a cartographic plot and set the ticks and titles
+ # on the axes.
+ fig = plt.figure(figsize=[12, 4])
+
+ ax1 = fig.add_subplot(111, projection=ccrs.PlateCarree())
+ im = iplt.contourf(temperature, cmap="RdYlBu_r")
+ ax1.coastlines()
+ ax1.gridlines()
+ ax1.set_xticks([-180, -90, 0, 90, 180])
+ ax1.set_yticks(yticks)
+ ax1.set_title("Air Temperature")
+ ax1.set_ylabel(f"Latitude / {temperature.coord('latitude').units}")
+ ax1.set_xlabel(f"Longitude / {temperature.coord('longitude').units}")
+ ax1.set_ylim(*ylim)
+
+ # Create a Matplotlib AxesDivider object to allow alignment of other
+ # Axes objects.
+ divider = make_axes_locatable(ax1)
+
+ # Gives the air temperature bar size, colour and a title.
+ ax2 = divider.new_vertical(
+ size="5%", pad=0.5, axes_class=plt.Axes, pack_start=True
+ ) # creates 2nd axis
+ fig.add_axes(ax2)
+ cbar = plt.colorbar(
+ im, cax=ax2, orientation="horizontal"
+ ) # puts colour bar on second axis
+ cbar.ax.set_xlabel(f"{temperature.units}") # labels colour bar
+
+ # Plot "collapsed_temp" on the mean graph and set the ticks and titles
+ # on the axes.
+ ax3 = divider.new_horizontal(
+ size="30%", pad=0.4, axes_class=plt.Axes
+ ) # create 3rd axis
+ fig.add_axes(ax3)
+ qplt.plot(
+ collapsed_temp, collapsed_temp.coord("latitude")
+ ) # plots temperature collapsed over longitude against latitude
+ ax3.axhline(0, color="k", linewidth=0.5)
+
+ # Creates zonal mean details
+ ax3.set_title("Zonal Mean")
+ ax3.yaxis.set_label_position("right")
+ ax3.yaxis.tick_right()
+ ax3.set_yticks(yticks)
+ ax3.grid()
+
+ # Round each tick for the third ax to the nearest 20 (ready for use).
+ data_max = collapsed_temp.data.max()
+ x_max = data_max - data_max % -20
+ data_min = collapsed_temp.data.min()
+ x_min = data_min - data_min % 20
+ ax3.set_xlim(x_min, x_max)
+ ax3.set_ylim(*ylim)
+
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/README.rst b/docs/gallery_code/meteorology/README.rst
new file mode 100644
index 0000000000..e8e902b498
--- /dev/null
+++ b/docs/gallery_code/meteorology/README.rst
@@ -0,0 +1,3 @@
+Meteorology
+-----------
+
diff --git a/docs/gallery_code/meteorology/plot_COP_1d.py b/docs/gallery_code/meteorology/plot_COP_1d.py
new file mode 100644
index 0000000000..84addd140a
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_COP_1d.py
@@ -0,0 +1,112 @@
+"""
+Global Average Annual Temperature Plot
+======================================
+
+Produces a time-series plot of North American temperature forecasts for 2
+different emission scenarios. Constraining data to a limited spatial area also
+features in this example.
+
+The data used comes from the HadGEM2-AO model simulations for the A1B and E1
+scenarios, both of which were derived using the IMAGE Integrated Assessment
+Model (Johns et al. 2011; Lowe et al. 2009).
+
+References
+----------
+ Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
+ ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
+ doi:10.1007/s00382-011-1005-5.
+
+ Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
+ Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
+ Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
+ doi:10.1029/2009EO210001.
+
+.. seealso::
+
+ Further details on the aggregation functionality being used in this example
+ can be found in :ref:`cube-statistics`.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.analysis.cartography
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load data into three Cubes, one for each set of NetCDF files.
+ e1 = iris.load_cube(iris.sample_data_path("E1_north_america.nc"))
+
+ a1b = iris.load_cube(iris.sample_data_path("A1B_north_america.nc"))
+
+ # load in the global pre-industrial mean temperature, and limit the domain
+ # to the same North American region that e1 and a1b are at.
+ north_america = iris.Constraint(
+ longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60
+ )
+ pre_industrial = iris.load_cube(
+ iris.sample_data_path("pre-industrial.pp"), north_america
+ )
+
+ # Generate area-weights array. As e1 and a1b are on the same grid we can
+ # do this just once and reuse. This method requires bounds on lat/lon
+ # coords, so let's add some in sensible locations using the "guess_bounds"
+ # method.
+ e1.coord("latitude").guess_bounds()
+ e1.coord("longitude").guess_bounds()
+ e1_grid_areas = iris.analysis.cartography.area_weights(e1)
+ pre_industrial.coord("latitude").guess_bounds()
+ pre_industrial.coord("longitude").guess_bounds()
+ pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
+
+ # Perform the area-weighted mean for each of the datasets using the
+ # computed grid-box areas.
+ pre_industrial_mean = pre_industrial.collapsed(
+ ["latitude", "longitude"], iris.analysis.MEAN, weights=pre_grid_areas
+ )
+ e1_mean = e1.collapsed(
+ ["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas
+ )
+ a1b_mean = a1b.collapsed(
+ ["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas
+ )
+
+ # Plot the datasets
+ qplt.plot(e1_mean, label="E1 scenario", lw=1.5, color="blue")
+ qplt.plot(a1b_mean, label="A1B-Image scenario", lw=1.5, color="red")
+
+ # Draw a horizontal line showing the pre-industrial mean
+ plt.axhline(
+ y=pre_industrial_mean.data,
+ color="gray",
+ linestyle="dashed",
+ label="pre-industrial",
+ lw=1.5,
+ )
+
+ # Constrain the period 1860-1999 and extract the observed data from a1b
+ constraint = iris.Constraint(time=lambda cell: 1860 <= cell.point.year <= 1999)
+ observed = a1b_mean.extract(constraint)
+
+ # Assert that this data set is the same as the e1 scenario:
+ # they share data up to the 1999 cut off.
+ assert np.all(np.isclose(observed.data, e1_mean.extract(constraint).data))
+
+ # Plot the observed data
+ qplt.plot(observed, label="observed", color="black", lw=1.5)
+
+ # Add a legend and title
+ plt.legend(loc="upper left")
+ plt.title("North American mean air temperature", fontsize=18)
+
+ plt.xlabel("Time / year")
+ plt.grid()
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_COP_maps.py b/docs/gallery_code/meteorology/plot_COP_maps.py
new file mode 100644
index 0000000000..1c5e865a8f
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_COP_maps.py
@@ -0,0 +1,193 @@
+"""
+Global Average Annual Temperature Maps
+======================================
+
+Produces maps of global temperature forecasts from the A1B and E1 scenarios.
+
+The data used comes from the HadGEM2-AO model simulations for the A1B and E1
+scenarios, both of which were derived using the IMAGE Integrated Assessment
+Model (Johns et al. 2011; Lowe et al. 2009).
+
+References
+----------
+ Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
+ ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
+ doi:10.1007/s00382-011-1005-5.
+
+ Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
+ Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
+ Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
+ doi:10.1029/2009EO210001.
+
+""" # noqa: D205, D212, D400
+
+import os.path
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import iris
+import iris.coords as coords
+import iris.plot as iplt
+
+
+def cop_metadata_callback(cube, field, filename):
+ """Add an "Experiment" coordinate which comes from the filename."""
+ # Extract the experiment name (such as A1B or E1) from the filename (in
+ # this case it is just the start of the file name, before the first ".").
+ fname = os.path.basename(filename) # filename without path.
+ experiment_label = fname.split(".")[0]
+
+ # Create a coordinate with the experiment label in it...
+ exp_coord = coords.AuxCoord(
+ experiment_label, long_name="Experiment", units="no_unit"
+ )
+
+ # ...and add it to the cube.
+ cube.add_aux_coord(exp_coord)
+
+
+def main():
+ # Load E1 and A1B scenarios using the callback to update the metadata.
+ scenario_files = [
+ iris.sample_data_path(fname) for fname in ["E1.2098.pp", "A1B.2098.pp"]
+ ]
+ scenarios = iris.load(scenario_files, callback=cop_metadata_callback)
+
+ # Load the preindustrial reference data.
+ preindustrial = iris.load_cube(iris.sample_data_path("pre-industrial.pp"))
+
+ # Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the
+ # specific colours.
+ levels = np.arange(20) - 2.5
+ red = (
+ np.array(
+ [
+ 0,
+ 0,
+ 221,
+ 239,
+ 229,
+ 217,
+ 239,
+ 234,
+ 228,
+ 222,
+ 205,
+ 196,
+ 161,
+ 137,
+ 116,
+ 89,
+ 77,
+ 60,
+ 51,
+ ]
+ )
+ / 256.0
+ )
+ green = (
+ np.array(
+ [
+ 16,
+ 217,
+ 242,
+ 243,
+ 235,
+ 225,
+ 190,
+ 160,
+ 128,
+ 87,
+ 72,
+ 59,
+ 33,
+ 21,
+ 29,
+ 30,
+ 30,
+ 29,
+ 26,
+ ]
+ )
+ / 256.0
+ )
+ blue = (
+ np.array(
+ [
+ 255,
+ 255,
+ 243,
+ 169,
+ 99,
+ 51,
+ 63,
+ 37,
+ 39,
+ 21,
+ 27,
+ 23,
+ 22,
+ 26,
+ 29,
+ 28,
+ 27,
+ 25,
+ 22,
+ ]
+ )
+ / 256.0
+ )
+
+ # Put those colours into an array which can be passed to contourf as the
+ # specific colours for each level.
+ colors = np.stack([red, green, blue], axis=1)
+
+ # Make a wider than normal figure to house two maps side-by-side.
+ fig, ax_array = plt.subplots(1, 2, figsize=(12, 5))
+
+ # Loop over our scenarios to make a plot for each.
+ for ax, experiment, label in zip(ax_array, ["E1", "A1B"], ["E1", "A1B-Image"]):
+ exp_cube = scenarios.extract_cube(iris.Constraint(Experiment=experiment))
+ time_coord = exp_cube.coord("time")
+
+ # Calculate the difference from the preindustial control run.
+ exp_anom_cube = exp_cube - preindustrial
+
+ # Plot this anomaly.
+ plt.sca(ax)
+ ax.set_title(f"HadGEM2 {label} Scenario", fontsize=10)
+ contour_result = iplt.contourf(
+ exp_anom_cube, levels, colors=colors, extend="both"
+ )
+ plt.gca().coastlines()
+
+ # Now add a colour bar which spans the two plots. Here we pass Figure.axes
+ # which is a list of all (two) axes currently on the figure. Note that
+ # these are different to the contents of ax_array, because those were
+ # standard Matplotlib Axes that Iris automatically replaced with Cartopy
+ # GeoAxes.
+ cbar = plt.colorbar(
+ contour_result, ax=fig.axes, aspect=60, orientation="horizontal"
+ )
+
+ # Label the colour bar and add ticks.
+ cbar.set_label(preindustrial.units)
+ cbar.ax.tick_params(length=0)
+
+ # Get the time datetime from the coordinate.
+ time = time_coord.units.num2date(time_coord.points[0])
+ # Set a title for the entire figure, using the year from the datetime
+ # object. Also, set the y value for the title so that it is not tight to
+ # the top of the plot.
+ fig.suptitle(
+ f"Annual Temperature Predictions for {time.year}",
+ y=0.9,
+ fontsize=18,
+ )
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_TEC.py b/docs/gallery_code/meteorology/plot_TEC.py
new file mode 100644
index 0000000000..e6269eaf9b
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_TEC.py
@@ -0,0 +1,42 @@
+"""
+Ionosphere Space Weather
+========================
+
+This space weather example plots a filled contour of rotated pole point
+data with a shaded relief image underlay. The plot shows aggregated
+vertical electron content in the ionosphere.
+
+The plot exhibits an interesting outline effect due to excluding data
+values below a certain threshold.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import numpy.ma as ma
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load the "total electron content" cube.
+ filename = iris.sample_data_path("space_weather.nc")
+ cube = iris.load_cube(filename, "total electron content")
+
+ # Explicitly mask negative electron content.
+ cube.data = ma.masked_less(cube.data, 0)
+
+ # Plot the cube using one hundred colour levels.
+ qplt.contourf(cube, 100)
+ plt.title("Total Electron Content")
+ plt.xlabel("longitude / degrees")
+ plt.ylabel("latitude / degrees")
+ plt.gca().stock_img()
+ plt.gca().coastlines()
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_deriving_phenomena.py b/docs/gallery_code/meteorology/plot_deriving_phenomena.py
new file mode 100644
index 0000000000..81a05be9b9
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_deriving_phenomena.py
@@ -0,0 +1,92 @@
+"""
+Deriving Exner Pressure and Air Temperature
+===========================================
+
+This example shows some processing of cubes in order to derive further related
+cubes; in this case the derived cubes are Exner pressure and air temperature
+which are calculated by combining air pressure, air potential temperature and
+specific humidity. Finally, the two new cubes are presented side-by-side in a
+plot.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import matplotlib.ticker
+
+import iris
+import iris.coords as coords
+import iris.iterate
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def limit_colorbar_ticks(contour_object):
+ """Limit colobar number of ticks.
+
+ Take a contour object which has an associated colorbar and limits the
+ number of ticks on the colorbar to 4.
+
+ """
+ colorbar = contour_object.colorbar
+ colorbar.locator = matplotlib.ticker.MaxNLocator(4)
+ colorbar.update_ticks()
+
+
+def main():
+ fname = iris.sample_data_path("colpex.pp")
+
+ # The list of phenomena of interest
+ phenomena = ["air_potential_temperature", "air_pressure"]
+
+ # Define the constraint on standard name and model level
+ constraints = [
+ iris.Constraint(phenom, model_level_number=1) for phenom in phenomena
+ ]
+
+ air_potential_temperature, air_pressure = iris.load_cubes(fname, constraints)
+
+ # Define a coordinate which represents 1000 hPa
+ p0 = coords.AuxCoord(1000, long_name="P0", units="hPa")
+ # Convert reference pressure 'p0' into the same units as 'air_pressure'
+ p0.convert_units(air_pressure.units)
+
+ # Calculate Exner pressure
+ exner_pressure = (air_pressure / p0) ** (287.05 / 1005.0)
+ # Set the name (the unit is scalar)
+ exner_pressure.rename("exner_pressure")
+
+ # Calculate air_temp
+ air_temperature = exner_pressure * air_potential_temperature
+ # Set the name (the unit is K)
+ air_temperature.rename("air_temperature")
+
+ # Now create an iterator which will give us lat lon slices of
+ # exner pressure and air temperature in the form
+ # (exner_slice, air_temp_slice).
+ lat_lon_slice_pairs = iris.iterate.izip(
+ exner_pressure,
+ air_temperature,
+ coords=["grid_latitude", "grid_longitude"],
+ )
+
+ # For the purposes of this example, we only want to demonstrate the first
+ # plot.
+ lat_lon_slice_pairs = [next(lat_lon_slice_pairs)]
+
+ plt.figure(figsize=(8, 4))
+ for exner_slice, air_temp_slice in lat_lon_slice_pairs:
+ plt.subplot(121)
+ cont = qplt.contourf(exner_slice)
+
+ # The default colorbar has a few too many ticks on it, causing text to
+ # overlap. Therefore, limit the number of ticks.
+ limit_colorbar_ticks(cont)
+
+ plt.subplot(122)
+ cont = qplt.contourf(air_temp_slice)
+ limit_colorbar_ticks(cont)
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_hovmoller.py b/docs/gallery_code/meteorology/plot_hovmoller.py
new file mode 100644
index 0000000000..829b370d78
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_hovmoller.py
@@ -0,0 +1,51 @@
+"""
+Hovmoller Diagram of Monthly Surface Temperature
+================================================
+
+This example demonstrates the creation of a Hovmoller diagram with fine control
+over plot ticks and labels. The data comes from the Met Office OSTIA project
+and has been pre-processed to calculate the monthly mean sea surface
+temperature.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.dates as mdates
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # load a single cube of surface temperature between +/- 5 latitude
+ fname = iris.sample_data_path("ostia_monthly.nc")
+ cube = iris.load_cube(
+ fname,
+ iris.Constraint("surface_temperature", latitude=lambda v: -5 < v < 5),
+ )
+
+ # Take the mean over latitude
+ cube = cube.collapsed("latitude", iris.analysis.MEAN)
+
+ # Now that we have our data in a nice way, lets create the plot
+ # contour with 20 levels
+ qplt.contourf(cube, 20)
+
+ # Put a custom label on the y axis
+ plt.ylabel("Time / years")
+
+ # Stop matplotlib providing clever axes range padding
+ plt.axis("tight")
+
+ # As we are plotting annual variability, put years as the y ticks
+ plt.gca().yaxis.set_major_locator(mdates.YearLocator())
+
+ # And format the ticks to just show the year
+ plt.gca().yaxis.set_major_formatter(mdates.DateFormatter("%Y"))
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_lagged_ensemble.py b/docs/gallery_code/meteorology/plot_lagged_ensemble.py
new file mode 100644
index 0000000000..7c34572136
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_lagged_ensemble.py
@@ -0,0 +1,155 @@
+"""
+Seasonal Ensemble Model Plots
+=============================
+
+This example demonstrates the loading of a lagged ensemble dataset from the
+GloSea4 model, which is then used to produce two types of plot:
+
+* The first shows the "postage stamp" style image with an array of 14 images,
+ one for each ensemble member with a shared colorbar. (The missing image in
+ this example represents ensemble member number 6 which was a failed run)
+
+* The second plot shows the data limited to a region of interest, in this case
+ a region defined for forecasting ENSO (El Nino-Southern Oscillation), which,
+ for the purposes of this example, has had the ensemble mean subtracted from
+ each ensemble member to give an anomaly surface temperature. In practice a
+ better approach would be to take the climatological mean, calibrated to the
+ model, from each ensemble member.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+import matplotlib.ticker
+import numpy as np
+
+import iris
+import iris.plot as iplt
+
+
+def realization_metadata(cube, field, fname):
+ """Modify the cube's metadata to add a "realization" coordinate.
+
+ A function which modifies the cube's metadata to add a "realization"
+ (ensemble member) coordinate from the filename if one doesn't already exist
+ in the cube.
+
+ """
+ # Add an ensemble member coordinate if one doesn't already exist.
+ if not cube.coords("realization"):
+ # The ensemble member is encoded in the filename as *_???.pp where ???
+ # is the ensemble member.
+ realization_number = fname[-6:-3]
+ realization_coord = iris.coords.AuxCoord(
+ np.int32(realization_number), "realization", units="1"
+ )
+ cube.add_aux_coord(realization_coord)
+
+
+def main():
+ # Create a constraint to extract surface temperature cubes which have a
+ # "realization" coordinate.
+ constraint = iris.Constraint("surface_temperature", realization=lambda value: True)
+ # Use this to load our ensemble. The callback ensures all our members
+ # have the "realization" coordinate and therefore they will all be loaded.
+ surface_temp = iris.load_cube(
+ iris.sample_data_path("GloSea4", "ensemble_???.pp"),
+ constraint,
+ callback=realization_metadata,
+ )
+
+ # -------------------------------------------------------------------------
+ # Plot #1: Ensemble postage stamps
+ # -------------------------------------------------------------------------
+
+ # For the purposes of this example, take the last time element of the cube.
+ # First get hold of the last time by slicing the coordinate.
+ last_time_coord = surface_temp.coord("time")[-1]
+ last_timestep = surface_temp.subset(last_time_coord)
+
+ # Find the maximum and minimum across the dataset.
+ data_min = np.min(last_timestep.data)
+ data_max = np.max(last_timestep.data)
+
+ # Create a wider than normal figure to support our many plots.
+ plt.figure(figsize=(12, 6), dpi=100)
+
+ # Also manually adjust the spacings which are used when creating subplots.
+ plt.gcf().subplots_adjust(
+ hspace=0.05,
+ wspace=0.05,
+ top=0.95,
+ bottom=0.05,
+ left=0.075,
+ right=0.925,
+ )
+
+ # Iterate over all possible latitude longitude slices.
+ for cube in last_timestep.slices(["latitude", "longitude"]):
+ # Get the ensemble member number from the ensemble coordinate.
+ ens_member = cube.coord("realization").points[0]
+
+ # Plot the data in a 4x4 grid, with each plot's position in the grid
+ # being determined by ensemble member number. The special case for the
+ # 13th ensemble member is to have the plot at the bottom right.
+ if ens_member == 13:
+ plt.subplot(4, 4, 16)
+ else:
+ plt.subplot(4, 4, ens_member + 1)
+
+ # Plot with 50 evenly spaced contour levels (49 intervals).
+ cf = iplt.contourf(cube, 49, vmin=data_min, vmax=data_max)
+
+ # Add coastlines.
+ plt.gca().coastlines()
+
+ # Make an axes to put the shared colorbar in.
+ colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
+ colorbar = plt.colorbar(cf, colorbar_axes, orientation="horizontal")
+ colorbar.set_label(last_timestep.units)
+
+ # Limit the colorbar to 8 tick marks.
+ colorbar.locator = matplotlib.ticker.MaxNLocator(8)
+ colorbar.update_ticks()
+
+ # Get the time for the entire plot.
+ time = last_time_coord.units.num2date(last_time_coord.bounds[0, 0])
+
+ # Set a global title for the postage stamps with the date formatted by
+ # "monthname year".
+ time_string = time.strftime("%B %Y")
+ plt.suptitle(f"Surface temperature ensemble forecasts for {time_string}")
+
+ iplt.show()
+
+ # -------------------------------------------------------------------------
+ # Plot #2: ENSO plumes
+ # -------------------------------------------------------------------------
+
+ # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so use the intersection
+ # method to restrict to this region.
+ nino_cube = surface_temp.intersection(latitude=[-5, 5], longitude=[-170, -120])
+
+ # Calculate the horizontal mean for the nino region.
+ mean = nino_cube.collapsed(["latitude", "longitude"], iris.analysis.MEAN)
+
+ # Calculate the ensemble mean of the horizontal mean.
+ ensemble_mean = mean.collapsed("realization", iris.analysis.MEAN)
+
+ # Take the ensemble mean from each ensemble member.
+ mean -= ensemble_mean
+
+ plt.figure()
+
+ for ensemble_member in mean.slices(["time"]):
+ # Draw each ensemble member as a dashed line in black.
+ iplt.plot(ensemble_member, "--k")
+
+ plt.title("Mean temperature anomaly for ENSO 3.4 region")
+ plt.xlabel("Time")
+ plt.ylabel("Temperature anomaly / K")
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_wind_barbs.py b/docs/gallery_code/meteorology/plot_wind_barbs.py
new file mode 100644
index 0000000000..f11c9a7b50
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_wind_barbs.py
@@ -0,0 +1,61 @@
+"""
+Plotting Wind Direction Using Barbs
+===================================
+
+This example demonstrates using barbs to plot wind speed contours and wind
+direction barbs from wind vector component input data. The vector components
+are co-located in space in this case.
+
+The magnitude of the wind in the original data is low and so doesn't illustrate
+the full range of barbs. The wind is scaled to simulate a storm that better
+illustrates the range of barbs that are available.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load the u and v components of wind from a pp file
+ infile = iris.sample_data_path("wind_speed_lake_victoria.pp")
+
+ uwind = iris.load_cube(infile, "x_wind")
+ vwind = iris.load_cube(infile, "y_wind")
+
+ uwind.convert_units("knot")
+ vwind.convert_units("knot")
+
+ # To illustrate the full range of barbs, scale the wind speed up to pretend
+ # that a storm is passing over
+ magnitude = (uwind**2 + vwind**2) ** 0.5
+ magnitude.convert_units("knot")
+ max_speed = magnitude.collapsed(("latitude", "longitude"), iris.analysis.MAX).data
+ max_desired = 65
+
+ uwind = uwind / max_speed * max_desired
+ vwind = vwind / max_speed * max_desired
+
+ # Create a cube containing the wind speed
+ windspeed = (uwind**2 + vwind**2) ** 0.5
+ windspeed.rename("windspeed")
+ windspeed.convert_units("knot")
+
+ plt.figure()
+
+ # Plot the wind speed as a contour plot
+ qplt.contourf(windspeed)
+
+ # Add wind barbs except for the outermost values which overhang the edge
+ # of the plot if left
+ iplt.barbs(uwind[1:-1, 1:-1], vwind[1:-1, 1:-1], pivot="middle", length=6)
+
+ plt.title("Wind speed during a simulated storm")
+ qplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/meteorology/plot_wind_speed.py b/docs/gallery_code/meteorology/plot_wind_speed.py
new file mode 100644
index 0000000000..5310ad937d
--- /dev/null
+++ b/docs/gallery_code/meteorology/plot_wind_speed.py
@@ -0,0 +1,62 @@
+"""
+Plotting Wind Direction Using Quiver
+====================================
+
+This example demonstrates using quiver to plot wind speed contours and wind
+direction arrows from wind vector component input data. The vector components
+are co-located in space in this case.
+
+For the second plot, the data used for the arrows is normalised to produce
+arrows with a uniform size on the plot.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.feature as cfeat
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load the u and v components of wind from a pp file.
+ infile = iris.sample_data_path("wind_speed_lake_victoria.pp")
+
+ uwind = iris.load_cube(infile, "x_wind")
+ vwind = iris.load_cube(infile, "y_wind")
+
+ # Create a cube containing the wind speed.
+ windspeed = (uwind**2 + vwind**2) ** 0.5
+ windspeed.rename("windspeed")
+
+ # Plot the wind speed as a contour plot.
+ qplt.contourf(windspeed, 20)
+
+ # Show the lake on the current axes.
+ lakes = cfeat.NaturalEarthFeature("physical", "lakes", "50m", facecolor="none")
+ plt.gca().add_feature(lakes)
+
+ # Add arrows to show the wind vectors.
+ iplt.quiver(uwind, vwind, pivot="middle")
+
+ plt.title("Wind speed over Lake Victoria")
+ qplt.show()
+
+ # Normalise the data for uniform arrow size.
+ u_norm = uwind / windspeed
+ v_norm = vwind / windspeed
+
+ # Make a new figure for the normalised plot.
+ plt.figure()
+
+ qplt.contourf(windspeed, 20)
+ plt.gca().add_feature(lakes)
+ iplt.quiver(u_norm, v_norm, pivot="middle")
+
+ plt.title("Wind speed over Lake Victoria")
+ qplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/oceanography/README.rst b/docs/gallery_code/oceanography/README.rst
new file mode 100644
index 0000000000..0f3adf906b
--- /dev/null
+++ b/docs/gallery_code/oceanography/README.rst
@@ -0,0 +1,3 @@
+Oceanography
+------------
+
diff --git a/docs/gallery_code/oceanography/plot_atlantic_profiles.py b/docs/gallery_code/oceanography/plot_atlantic_profiles.py
new file mode 100644
index 0000000000..a43fb7f8cb
--- /dev/null
+++ b/docs/gallery_code/oceanography/plot_atlantic_profiles.py
@@ -0,0 +1,92 @@
+"""
+Oceanographic Profiles and T-S Diagrams
+=======================================
+
+This example demonstrates how to plot vertical profiles of different
+variables in the same axes, and how to make a scatter plot of two
+variables. There is an oceanographic theme but the same techniques are
+equally applicable to atmospheric or other kinds of data.
+
+The data used are profiles of potential temperature and salinity in the
+Equatorial and South Atlantic, output from an ocean model.
+
+The y-axis of the first plot produced will be automatically inverted due to the
+presence of the attribute positive=down on the depth coordinate. This means
+depth values intuitively increase downward on the y-axis.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+
+import iris
+import iris.iterate
+import iris.plot as iplt
+
+
+def main():
+ # Load the gridded temperature and salinity data.
+ fname = iris.sample_data_path("atlantic_profiles.nc")
+ cubes = iris.load(fname)
+ (theta,) = cubes.extract("sea_water_potential_temperature")
+ (salinity,) = cubes.extract("sea_water_practical_salinity")
+
+ # Extract profiles of temperature and salinity from a particular point in
+ # the southern portion of the domain, and limit the depth of the profile
+ # to 1000m.
+ lon_cons = iris.Constraint(longitude=330.5)
+ lat_cons = iris.Constraint(latitude=lambda lat: -10 < lat < -9)
+ depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
+ theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
+ salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
+
+ # Plot these profiles on the same set of axes. Depth is automatically
+ # recognised as a vertical coordinate and placed on the y-axis.
+ # The first plot is in the default axes. We'll use the same color for the
+ # curve and its axes/tick labels.
+ plt.figure(figsize=(5, 6))
+ temperature_color = (0.3, 0.4, 0.5)
+ ax1 = plt.gca()
+ iplt.plot(
+ theta_1000m,
+ linewidth=2,
+ color=temperature_color,
+ alpha=0.75,
+ )
+ ax1.set_xlabel("Potential Temperature / K", color=temperature_color)
+ ax1.set_ylabel("Depth / m")
+ for ticklabel in ax1.get_xticklabels():
+ ticklabel.set_color(temperature_color)
+
+ # To plot salinity in the same axes we use twiny(). We'll use a different
+ # color to identify salinity.
+ salinity_color = (0.6, 0.1, 0.15)
+ ax2 = plt.gca().twiny()
+ iplt.plot(
+ salinity_1000m,
+ linewidth=2,
+ color=salinity_color,
+ alpha=0.75,
+ )
+ ax2.set_xlabel("Salinity / PSU", color=salinity_color)
+ for ticklabel in ax2.get_xticklabels():
+ ticklabel.set_color(salinity_color)
+ plt.tight_layout()
+ iplt.show()
+
+ # Now plot a T-S diagram using scatter. We'll use all the profiles here,
+ # and each point will be coloured according to its depth.
+ plt.figure(figsize=(6, 6))
+ depth_values = theta.coord("depth").points
+ for s, t in iris.iterate.izip(salinity, theta, coords="depth"):
+ iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r")
+ ax = plt.gca()
+ ax.set_xlabel("Salinity / PSU")
+ ax.set_ylabel("Potential Temperature / K")
+ cb = plt.colorbar(orientation="horizontal")
+ cb.set_label("Depth / m")
+ plt.tight_layout()
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/oceanography/plot_load_nemo.py b/docs/gallery_code/oceanography/plot_load_nemo.py
new file mode 100644
index 0000000000..aac89fec0e
--- /dev/null
+++ b/docs/gallery_code/oceanography/plot_load_nemo.py
@@ -0,0 +1,61 @@
+"""
+Load a Time Series of Data From the NEMO Model
+==============================================
+
+This example demonstrates how to load multiple files containing data output by
+the NEMO model and combine them into a time series in a single cube. The
+different time dimensions in these files can prevent Iris from concatenating
+them without the intervention shown here.
+
+""" # noqa: D205, D212, D400
+
+import matplotlib.pyplot as plt
+
+import iris
+import iris.plot as iplt
+import iris.quickplot as qplt
+from iris.util import equalise_attributes, promote_aux_coord_to_dim_coord
+
+
+def main():
+ # Load the three files of sample NEMO data.
+ fname = iris.sample_data_path("NEMO/nemo_1m_*.nc")
+ cubes = iris.load(fname)
+
+ # Some attributes are unique to each file and must be removed to allow
+ # concatenation.
+ equalise_attributes(cubes)
+
+ # The cubes still cannot be concatenated because their dimension coordinate
+ # is "time_counter", which has the same value for each cube. concatenate
+ # needs distinct values in order to create a new DimCoord for the output
+ # cube. Here, each cube has a "time" auxiliary coordinate, and these do
+ # have distinct values, so we can promote them to allow concatenation.
+ for cube in cubes:
+ promote_aux_coord_to_dim_coord(cube, "time")
+
+ # The cubes can now be concatenated into a single time series.
+ cube = cubes.concatenate_cube()
+
+ # Generate a time series plot of a single point
+ plt.figure()
+ y_point_index = 100
+ x_point_index = 100
+ qplt.plot(cube[:, y_point_index, x_point_index], "o-")
+
+ # Include the point's position in the plot's title
+ lat_point = cube.coord("latitude").points[y_point_index, x_point_index]
+ lat_string = "{:.3f}\u00b0 {}".format(
+ abs(lat_point), "N" if lat_point > 0.0 else "S"
+ )
+ lon_point = cube.coord("longitude").points[y_point_index, x_point_index]
+ lon_string = "{:.3f}\u00b0 {}".format(
+ abs(lon_point), "E" if lon_point > 0.0 else "W"
+ )
+ plt.title("{} at {} {}".format(cube.long_name.capitalize(), lat_string, lon_string))
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_code/oceanography/plot_orca_projection.py b/docs/gallery_code/oceanography/plot_orca_projection.py
new file mode 100644
index 0000000000..bb68056cb3
--- /dev/null
+++ b/docs/gallery_code/oceanography/plot_orca_projection.py
@@ -0,0 +1,59 @@
+"""
+Tri-Polar Grid Projected Plotting
+=================================
+
+This example demonstrates cell plots of data on the semi-structured ORCA2 model
+grid.
+
+First, the data is projected into the PlateCarree coordinate reference system.
+
+Second four pcolormesh plots are created from this projected dataset,
+using different projections for the output image.
+
+""" # noqa: D205, D212, D400
+
+import cartopy.crs as ccrs
+import matplotlib.pyplot as plt
+
+import iris
+import iris.analysis.cartography
+import iris.plot as iplt
+import iris.quickplot as qplt
+
+
+def main():
+ # Load data
+ filepath = iris.sample_data_path("orca2_votemper.nc")
+ cube = iris.load_cube(filepath)
+
+ # Choose plot projections
+ projections = {}
+ projections["Mollweide"] = ccrs.Mollweide()
+ projections["PlateCarree"] = ccrs.PlateCarree()
+ projections["NorthPolarStereo"] = ccrs.NorthPolarStereo()
+ projections["Orthographic"] = ccrs.Orthographic(
+ central_longitude=-90, central_latitude=45
+ )
+
+ pcarree = projections["PlateCarree"]
+ # Transform cube to target projection
+ new_cube, extent = iris.analysis.cartography.project(cube, pcarree, nx=400, ny=200)
+
+ # Plot data in each projection
+ for name in sorted(projections):
+ fig = plt.figure()
+ fig.suptitle("ORCA2 Data Projected to {}".format(name))
+ # Set up axes and title
+ ax = plt.subplot(projection=projections[name])
+ # Set limits
+ ax.set_global()
+ # plot with Iris quickplot pcolormesh
+ qplt.pcolormesh(new_cube)
+ # Draw coastlines
+ ax.coastlines()
+
+ iplt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/gallery_tests/__init__.py b/docs/gallery_tests/__init__.py
new file mode 100644
index 0000000000..9468138e04
--- /dev/null
+++ b/docs/gallery_tests/__init__.py
@@ -0,0 +1,6 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+
+"""Gallery Tests."""
diff --git a/docs/gallery_tests/conftest.py b/docs/gallery_tests/conftest.py
new file mode 100644
index 0000000000..564a2892a2
--- /dev/null
+++ b/docs/gallery_tests/conftest.py
@@ -0,0 +1,67 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+
+"""Pytest fixtures for the gallery tests."""
+
+import pathlib
+
+import matplotlib.pyplot as plt
+import pytest
+
+import iris
+
+CURRENT_DIR = pathlib.Path(__file__).resolve()
+GALLERY_DIR = CURRENT_DIR.parents[1] / "gallery_code"
+
+
+@pytest.fixture
+def image_setup_teardown():
+ """Perform setup and teardown fixture.
+
+ Ensures all figures are closed before and after test to prevent one test
+ polluting another if it fails with a figure unclosed.
+
+ """
+ plt.close("all")
+ yield
+ plt.close("all")
+
+
+@pytest.fixture
+def import_patches(monkeypatch):
+ """Replace plt.show() with a function that does nothing, also add to sys.path.
+
+ Replace plt.show() with a function that does nothing, also add all the
+ gallery examples to sys.path.
+
+ """
+
+ def no_show():
+ pass
+
+ monkeypatch.setattr(plt, "show", no_show)
+
+ for example_dir in GALLERY_DIR.iterdir():
+ if example_dir.is_dir():
+ monkeypatch.syspath_prepend(example_dir)
+
+ yield
+
+
+@pytest.fixture
+def iris_future_defaults():
+ """Create a fixture which resets all the iris.FUTURE settings to the defaults.
+
+ Create a fixture which resets all the iris.FUTURE settings to the defaults,
+ as otherwise changes made in one test can affect subsequent ones.
+
+ """
+ # Run with all default settings in iris.FUTURE.
+ default_future_kwargs = iris.Future().__dict__.copy()
+ for dead_option in iris.Future.deprecated_options:
+ # Avoid a warning when setting these !
+ del default_future_kwargs[dead_option]
+ with iris.FUTURE.context(**default_future_kwargs):
+ yield
diff --git a/docs/gallery_tests/test_gallery_examples.py b/docs/gallery_tests/test_gallery_examples.py
new file mode 100644
index 0000000000..39e8fe0507
--- /dev/null
+++ b/docs/gallery_tests/test_gallery_examples.py
@@ -0,0 +1,43 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+
+"""Test all the gallery examples."""
+
+import importlib
+
+import matplotlib.pyplot as plt
+import pytest
+
+from iris.tests import _RESULT_PATH
+from iris.tests.graphics import check_graphic
+
+from .conftest import GALLERY_DIR
+
+
+def gallery_examples():
+ """Entry point for generator to yield all current gallery examples."""
+ for example_file in GALLERY_DIR.glob("*/plot*.py"):
+ yield example_file.stem
+
+
+@pytest.mark.filterwarnings("error::iris.IrisDeprecation")
+@pytest.mark.parametrize("example", gallery_examples())
+def test_plot_example(
+ example,
+ image_setup_teardown,
+ import_patches,
+ iris_future_defaults,
+):
+ """Test that all figures from example code match KGO."""
+ module = importlib.import_module(example)
+
+ # Run example.
+ module.main()
+ # Loop through open figures and set each to be the current figure so check_graphic
+ # will find it.
+ for fig_num in plt.get_fignums():
+ plt.figure(fig_num)
+ image_id = f"gallery_tests.test_{example}.{fig_num - 1}"
+ check_graphic(image_id, _RESULT_PATH)
diff --git a/docs/iris/Makefile b/docs/iris/Makefile
deleted file mode 100644
index 1a66b03805..0000000000
--- a/docs/iris/Makefile
+++ /dev/null
@@ -1,47 +0,0 @@
-SUBDIRS = src
-
-html:
- @for i in $(SUBDIRS); do \
- echo "make html in $$i..."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) html); done
-
-pdf:
- @for i in $(SUBDIRS); do\
- echo "make latex in $$i.."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) latex); done
- echo "\def\sphinxdocclass{MO_report}" > build/latex/docs.tex
- echo "\documentclass[letterpaper,10pt,english]{MO_report}" >> build/latex/docs.tex
- tail -n +4 build/latex/Iris.tex >> build/latex/docs.tex
- sed 's/\\tableofcontents/\\tableofcontents\n\\pagenumbering\{arabic\}/' build/latex/docs.tex > build/latex/docs2.tex
- sed 's/subsection{/section{/' build/latex/docs2.tex > build/latex/documentation.tex
- (cd build/latex; pdflatex -interaction=scrollmode documentation.tex)
- # call latex again to get page numbers right...
- (cd build/latex; pdflatex -interaction=scrollmode documentation.tex);
-
-all:
- @for i in $(SUBDIRS); do \
- echo "make all in $$i..."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) all); done
-install:
- @for i in $(SUBDIRS); do \
- echo "Installing in $$i..."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) install); done
-build:
- @for i in $(SUBDIRS); do \
- echo "Clearing in $$i..."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) build); done
-clean:
- @for i in $(SUBDIRS); do \
- echo "Clearing in $$i..."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) clean); done
-
-doctest:
- @for i in $(SUBDIRS); do \
- echo "Running doctest in $$i..."; \
- (cd $$i; $(MAKE) $(MFLAGS) $(MYMAKEFLAGS) doctest); done
-
-extest:
- @echo
- @echo "Running \"example_code/graphics\" tests..."
- @echo
- python -m unittest discover -v -t .
diff --git a/docs/iris/example_code/General/SOI_filtering.py b/docs/iris/example_code/General/SOI_filtering.py
deleted file mode 100644
index a488b5865e..0000000000
--- a/docs/iris/example_code/General/SOI_filtering.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Applying a filter to a time-series
-==================================
-
-This example demonstrates low pass filtering a time-series by applying a
-weighted running mean over the time dimension.
-
-The time-series used is the Darwin-only Southern Oscillation index (SOI),
-which is filtered using two different Lanczos filters, one to filter out
-time-scales of less than two years and one to filter out time-scales of
-less than 7 years.
-
-References
-----------
-
- Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions.
- Journal of Applied Meteorology, Vol 18, pp 1016-1022.
-
- Trenberth K. E. (1984) Signal Versus Noise in the Southern Oscillation.
- Monthly Weather Review, Vol 112, pp 326-332
-
-"""
-import numpy as np
-import matplotlib.pyplot as plt
-import iris
-import iris.plot as iplt
-
-
-def low_pass_weights(window, cutoff):
- """Calculate weights for a low pass Lanczos filter.
-
- Args:
-
- window: int
- The length of the filter window.
-
- cutoff: float
- The cutoff frequency in inverse time steps.
-
- """
- order = ((window - 1) // 2) + 1
- nwts = 2 * order + 1
- w = np.zeros([nwts])
- n = nwts // 2
- w[n] = 2 * cutoff
- k = np.arange(1., n)
- sigma = np.sin(np.pi * k / n) * n / (np.pi * k)
- firstfactor = np.sin(2. * np.pi * cutoff * k) / (np.pi * k)
- w[n-1:0:-1] = firstfactor * sigma
- w[n+1:-1] = firstfactor * sigma
- return w[1:-1]
-
-
-def main():
- # Load the monthly-valued Southern Oscillation Index (SOI) time-series.
- fname = iris.sample_data_path('SOI_Darwin.nc')
- soi = iris.load_cube(fname)
-
- # Window length for filters.
- window = 121
-
- # Construct 2-year (24-month) and 7-year (84-month) low pass filters
- # for the SOI data which is monthly.
- wgts24 = low_pass_weights(window, 1. / 24.)
- wgts84 = low_pass_weights(window, 1. / 84.)
-
- # Apply each filter using the rolling_window method used with the weights
- # keyword argument. A weighted sum is required because the magnitude of
- # the weights are just as important as their relative sizes.
- soi24 = soi.rolling_window('time',
- iris.analysis.SUM,
- len(wgts24),
- weights=wgts24)
- soi84 = soi.rolling_window('time',
- iris.analysis.SUM,
- len(wgts84),
- weights=wgts84)
-
- # Plot the SOI time series and both filtered versions.
- plt.figure(figsize=(9, 4))
- iplt.plot(soi, color='0.7', linewidth=1., linestyle='-',
- alpha=1., label='no filter')
- iplt.plot(soi24, color='b', linewidth=2., linestyle='-',
- alpha=.7, label='2-year filter')
- iplt.plot(soi84, color='r', linewidth=2., linestyle='-',
- alpha=.7, label='7-year filter')
- plt.ylim([-4, 4])
- plt.title('Southern Oscillation Index (Darwin Only)')
- plt.xlabel('Time')
- plt.ylabel('SOI')
- plt.legend(fontsize=10)
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/__init__.py b/docs/iris/example_code/General/__init__.py
deleted file mode 100644
index f67741cf37..0000000000
--- a/docs/iris/example_code/General/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-General visualisation examples
-==============================
-"""
diff --git a/docs/iris/example_code/General/anomaly_log_colouring.py b/docs/iris/example_code/General/anomaly_log_colouring.py
deleted file mode 100644
index d3f71b6ddc..0000000000
--- a/docs/iris/example_code/General/anomaly_log_colouring.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Colouring anomaly data with logarithmic scaling
-===============================================
-
-In this example, we need to plot anomaly data where the values have a
-"logarithmic" significance -- i.e. we want to give approximately equal ranges
-of colour between data values of, say, 1 and 10 as between 10 and 100.
-
-As the data range also contains zero, that obviously does not suit a simple
-logarithmic interpretation. However, values of less than a certain absolute
-magnitude may be considered "not significant", so we put these into a separate
-"zero band" which is plotted in white.
-
-To do this, we create a custom value mapping function (normalization) using
-the matplotlib Norm class `matplotlib.colours.SymLogNorm
-`_.
-We use this to make a cell-filled pseudocolour plot with a colorbar.
-
-NOTE: By "pseudocolour", we mean that each data point is drawn as a "cell"
-region on the plot, coloured according to its data value.
-This is provided in Iris by the functions :meth:`iris.plot.pcolor` and
-:meth:`iris.plot.pcolormesh`, which call the underlying matplotlib
-functions of the same names (i.e. `matplotlib.pyplot.pcolor
-`_
-and `matplotlib.pyplot.pcolormesh
-`_).
-See also: http://en.wikipedia.org/wiki/False_color#Pseudocolor.
-
-"""
-import cartopy.crs as ccrs
-import iris
-import iris.coord_categorisation
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-import matplotlib.colors as mcols
-
-
-def main():
- # Load a sample air temperatures sequence.
- file_path = iris.sample_data_path('E1_north_america.nc')
- temperatures = iris.load_cube(file_path)
-
- # Create a year-number coordinate from the time information.
- iris.coord_categorisation.add_year(temperatures, 'time')
-
- # Create a sample anomaly field for one chosen year, by extracting that
- # year and subtracting the time mean.
- sample_year = 1982
- year_temperature = temperatures.extract(iris.Constraint(year=sample_year))
- time_mean = temperatures.collapsed('time', iris.analysis.MEAN)
- anomaly = year_temperature - time_mean
-
- # Construct a plot title string explaining which years are involved.
- years = temperatures.coord('year').points
- plot_title = 'Temperature anomaly'
- plot_title += '\n{} differences from {}-{} average.'.format(
- sample_year, years[0], years[-1])
-
- # Define scaling levels for the logarithmic colouring.
- minimum_log_level = 0.1
- maximum_scale_level = 3.0
-
- # Use a standard colour map which varies blue-white-red.
- # For suitable options, see the 'Diverging colormaps' section in:
- # http://matplotlib.org/examples/color/colormaps_reference.html
- anom_cmap = 'bwr'
-
- # Create a 'logarithmic' data normalization.
- anom_norm = mcols.SymLogNorm(linthresh=minimum_log_level,
- linscale=0,
- vmin=-maximum_scale_level,
- vmax=maximum_scale_level)
- # Setting "linthresh=minimum_log_level" makes its non-logarithmic
- # data range equal to our 'zero band'.
- # Setting "linscale=0" maps the whole zero band to the middle colour value
- # (i.e. 0.5), which is the neutral point of a "diverging" style colormap.
-
- # Create an Axes, specifying the map projection.
- plt.axes(projection=ccrs.LambertConformal())
-
- # Make a pseudocolour plot using this colour scheme.
- mesh = iplt.pcolormesh(anomaly, cmap=anom_cmap, norm=anom_norm)
-
- # Add a colourbar, with extensions to show handling of out-of-range values.
- bar = plt.colorbar(mesh, orientation='horizontal', extend='both')
-
- # Set some suitable fixed "logarithmic" colourbar tick positions.
- tick_levels = [-3, -1, -0.3, 0.0, 0.3, 1, 3]
- bar.set_ticks(tick_levels)
-
- # Modify the tick labels so that the centre one shows "+/-".
- tick_levels[3] = r'$\pm${:g}'.format(minimum_log_level)
- bar.set_ticklabels(tick_levels)
-
- # Label the colourbar to show the units.
- bar.set_label('[{}, log scale]'.format(anomaly.units))
-
- # Add coastlines and a title.
- plt.gca().coastlines()
- plt.title(plot_title)
-
- # Display the result.
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/coriolis_plot.py b/docs/iris/example_code/General/coriolis_plot.py
deleted file mode 100644
index 273faf6470..0000000000
--- a/docs/iris/example_code/General/coriolis_plot.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""
-Deriving the Coriolis frequency over the globe
-==============================================
-
-This code computes the Coriolis frequency and stores it in a cube with
-associated metadata. It then plots the Coriolis frequency on an orthographic
-projection.
-
-"""
-
-import cartopy.crs as ccrs
-import iris
-from iris.coord_systems import GeogCS
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-import numpy as np
-
-
-def main():
- # Start with arrays for latitudes and longitudes, with a given number of
- # coordinates in the arrays.
- coordinate_points = 200
- longitudes = np.linspace(-180.0, 180.0, coordinate_points)
- latitudes = np.linspace(-90.0, 90.0, coordinate_points)
- lon2d, lat2d = np.meshgrid(longitudes, latitudes)
-
- # Omega is the Earth's rotation rate, expressed in radians per second
- omega = 7.29e-5
-
- # The data for our cube is the Coriolis frequency,
- # `f = 2 * omega * sin(phi)`, which is computed for each grid point over
- # the globe from the 2-dimensional latitude array.
- data = 2. * omega * np.sin(np.deg2rad(lat2d))
-
- # We now need to define a coordinate system for the plot.
- # Here we'll use GeogCS; 6371229 is the radius of the Earth in metres.
- cs = GeogCS(6371229)
-
- # The Iris coords module turns the latitude list into a coordinate array.
- # Coords then applies an appropriate standard name and unit to it.
- lat_coord = iris.coords.DimCoord(latitudes,
- standard_name='latitude',
- units='degrees',
- coord_system=cs)
-
- # The above process is repeated for the longitude coordinates.
- lon_coord = iris.coords.DimCoord(longitudes,
- standard_name='longitude',
- units='degrees',
- coord_system=cs)
-
- # Now we add bounds to our latitude and longitude coordinates.
- # We want simple, contiguous bounds for our regularly-spaced coordinate
- # points so we use the guess_bounds() method of the coordinate. For more
- # complex coordinates, we could derive and set the bounds manually.
- lat_coord.guess_bounds()
- lon_coord.guess_bounds()
-
- # Now we input our data array into the cube.
- new_cube = iris.cube.Cube(data,
- standard_name='coriolis_parameter',
- units='s-1',
- dim_coords_and_dims=[(lat_coord, 0),
- (lon_coord, 1)])
-
- # Now let's plot our cube, along with coastlines, a title and an
- # appropriately-labelled colour bar:
- ax = plt.axes(projection=ccrs.Orthographic())
- ax.coastlines(resolution='10m')
- mesh = iplt.pcolormesh(new_cube, cmap='seismic')
- tick_levels = [-0.00012, -0.00006, 0.0, 0.00006, 0.00012]
- plt.colorbar(mesh, orientation='horizontal', label='s-1',
- ticks=tick_levels, format='%.1e')
- plt.title('Coriolis frequency')
- plt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/cross_section.py b/docs/iris/example_code/General/cross_section.py
deleted file mode 100644
index e0d05fb230..0000000000
--- a/docs/iris/example_code/General/cross_section.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Cross section plots
-===================
-
-This example demonstrates contour plots of a cross-sectioned multi-dimensional
-cube which features a hybrid height vertical coordinate system.
-
-"""
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- # Load some test data.
- fname = iris.sample_data_path('hybrid_height.nc')
- theta = iris.load_cube(fname, 'air_potential_temperature')
-
- # Extract a single height vs longitude cross-section. N.B. This could
- # easily be changed to extract a specific slice, or even to loop over *all*
- # cross section slices.
- cross_section = next(theta.slices(['grid_longitude',
- 'model_level_number']))
-
- qplt.contourf(cross_section, coords=['grid_longitude', 'altitude'],
- cmap='RdBu_r')
- iplt.show()
-
- # Now do the equivalent plot, only against model level
- plt.figure()
-
- qplt.contourf(cross_section,
- coords=['grid_longitude', 'model_level_number'],
- cmap='RdBu_r')
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/custom_aggregation.py b/docs/iris/example_code/General/custom_aggregation.py
deleted file mode 100644
index d8df506469..0000000000
--- a/docs/iris/example_code/General/custom_aggregation.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""
-Calculating a custom statistic
-==============================
-
-This example shows how to define and use a custom
-:class:`iris.analysis.Aggregator`, that provides a new statistical operator for
-use with cube aggregation functions such as :meth:`~iris.cube.Cube.collapsed`,
-:meth:`~iris.cube.Cube.aggregated_by` or
-:meth:`~iris.cube.Cube.rolling_window`.
-
-In this case, we have a 240-year sequence of yearly average surface temperature
-over North America, and we want to calculate in how many years these exceed a
-certain temperature over a spell of 5 years or more.
-
-"""
-import matplotlib.pyplot as plt
-import numpy as np
-
-import iris
-from iris.analysis import Aggregator
-import iris.plot as iplt
-import iris.quickplot as qplt
-from iris.util import rolling_window
-
-
-# Define a function to perform the custom statistical operation.
-# Note: in order to meet the requirements of iris.analysis.Aggregator, it must
-# do the calculation over an arbitrary (given) data axis.
-def count_spells(data, threshold, axis, spell_length):
- """
- Function to calculate the number of points in a sequence where the value
- has exceeded a threshold value for at least a certain number of timepoints.
-
- Generalised to operate on multiple time sequences arranged on a specific
- axis of a multidimensional array.
-
- Args:
-
- * data (array):
- raw data to be compared with value threshold.
-
- * threshold (float):
- threshold point for 'significant' datapoints.
-
- * axis (int):
- number of the array dimension mapping the time sequences.
- (Can also be negative, e.g. '-1' means last dimension)
-
- * spell_length (int):
- number of consecutive times at which value > threshold to "count".
-
- """
- if axis < 0:
- # just cope with negative axis numbers
- axis += data.ndim
- # Threshold the data to find the 'significant' points.
- data_hits = data > threshold
- # Make an array with data values "windowed" along the time axis.
- hit_windows = rolling_window(data_hits, window=spell_length, axis=axis)
- # Find the windows "full of True-s" (along the added 'window axis').
- full_windows = np.all(hit_windows, axis=axis+1)
- # Count points fulfilling the condition (along the time axis).
- spell_point_counts = np.sum(full_windows, axis=axis, dtype=int)
- return spell_point_counts
-
-
-def main():
- # Load the whole time-sequence as a single cube.
- file_path = iris.sample_data_path('E1_north_america.nc')
- cube = iris.load_cube(file_path)
-
- # Make an aggregator from the user function.
- SPELL_COUNT = Aggregator('spell_count',
- count_spells,
- units_func=lambda units: 1)
-
- # Define the parameters of the test.
- threshold_temperature = 280.0
- spell_years = 5
-
- # Calculate the statistic.
- warm_periods = cube.collapsed('time', SPELL_COUNT,
- threshold=threshold_temperature,
- spell_length=spell_years)
- warm_periods.rename('Number of 5-year warm spells in 240 years')
-
- # Plot the results.
- qplt.contourf(warm_periods, cmap='RdYlBu_r')
- plt.gca().coastlines()
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/custom_file_loading.py b/docs/iris/example_code/General/custom_file_loading.py
deleted file mode 100644
index d6fddf8464..0000000000
--- a/docs/iris/example_code/General/custom_file_loading.py
+++ /dev/null
@@ -1,317 +0,0 @@
-"""
-Loading a cube from a custom file format
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-This example shows how a custom text file can be loaded using the standard Iris
-load mechanism.
-
-The first stage in the process is to define an Iris :class:`FormatSpecification
-` for the file format. To create a
-format specification we need to define the following:
-
-* format_name - Some text that describes the format specification we are
- creating
-* file_element - FileElement object describing the element which identifies
- this FormatSpecification.
-
- Possible values are:
-
- ``iris.io.format_picker.MagicNumber(n, o)``
- The n bytes from the file at offset o.
-
- ``iris.io.format_picker.FileExtension()``
- The file's extension.
-
- ``iris.io.format_picker.LeadingLine()``
- The first line of the file.
-
-* file_element_value - The value that the file_element should take if a file
- matches this FormatSpecification
-* handler (optional) - A generator function that will be called when the file
- specification has been identified. This function is provided by the user and
- provides the means to parse the whole file. If no handler function is
- provided, then identification is still possible without any handling.
-
- The handler function must define the following arguments:
-
- * list of filenames to process
- * callback function - An optional function to filter/alter the Iris cubes
- returned
-
- The handler function must be defined as generator which yields each cube as
- they are produced.
-
-* priority (optional) - Integer giving a priority for considering this
- specification where higher priority means sooner consideration
-
-In the following example, the function :func:`load_NAME_III` has been defined
-to handle the loading of the raw data from the custom file format. This
-function is called from :func:`NAME_to_cube` which uses this data to create and
-yield Iris cubes.
-
-In the ``main()`` function the filenames are loaded via the ``iris.load_cube``
-function which automatically invokes the ``FormatSpecification`` we defined.
-The cube returned from the load function is then used to produce a plot.
-
-"""
-import datetime
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-from cf_units import Unit, CALENDAR_GREGORIAN
-
-import iris
-import iris.coords as icoords
-import iris.coord_systems as icoord_systems
-import iris.fileformats
-import iris.io.format_picker as format_picker
-import iris.plot as iplt
-
-
-UTC_format = '%H%M%Z %d/%m/%Y'
-
-FLOAT_HEADERS = ['X grid origin', 'Y grid origin',
- 'X grid resolution', 'Y grid resolution']
-INT_HEADERS = ['X grid size', 'Y grid size', 'Number of fields']
-DATE_HEADERS = ['Run time', 'Start of release', 'End of release']
-COLUMN_NAMES = ['species_category', 'species', 'cell_measure', 'quantity',
- 'unit', 'z_level', 'time']
-
-
-def load_NAME_III(filename):
- """
- Loads the Met Office's NAME III grid output files returning headers, column
- definitions and data arrays as 3 separate lists.
-
- """
-
- # Loading a file gives a generator of lines which can be progressed using
- # the next() function. This will come in handy as we wish to progress
- # through the file line by line.
- with open(filename) as file_handle:
- # Define a dictionary which can hold the header metadata for this file.
- headers = {}
-
- # Skip the NAME header of the file which looks something like
- # 'NAME III (version X.X.X)'.
- next(file_handle)
-
- # Read the next 16 lines of header information, putting the form
- # "header name: header value" into a dictionary.
- for _ in range(16):
- header_name, header_value = next(file_handle).split(':')
-
- # Strip off any spurious space characters in the header name and
- # value.
- header_name = header_name.strip()
- header_value = header_value.strip()
-
- # Cast some headers into floats or integers if they match a given
- # header name.
- if header_name in FLOAT_HEADERS:
- header_value = float(header_value)
- elif header_name in INT_HEADERS:
- header_value = int(header_value)
- elif header_name in DATE_HEADERS:
- # convert the time to python datetimes
- header_value = datetime.datetime.strptime(header_value,
- UTC_format)
-
- headers[header_name] = header_value
-
- # Skip the next blank line in the file.
- next(file_handle)
-
- # Read the next 7 lines of column definitions.
- column_headings = {}
- for column_header_name in COLUMN_NAMES:
- column_headings[column_header_name] = [
- col.strip() for col in next(file_handle).split(',')
- ][:-1]
-
- # Convert the time to python datetimes.
- new_time_column_header = []
- for i, t in enumerate(column_headings['time']):
- # The first 4 columns aren't time at all, so don't convert them to
- # datetimes.
- if i >= 4:
- t = datetime.datetime.strptime(t, UTC_format)
- new_time_column_header.append(t)
- column_headings['time'] = new_time_column_header
-
- # Skip the blank line after the column headers.
- next(file_handle)
-
- # Make a list of data arrays to hold the data for each column.
- data_shape = (headers['Y grid size'], headers['X grid size'])
- data_arrays = [np.zeros(data_shape, dtype=np.float32)
- for i in range(headers['Number of fields'])]
-
- # Iterate over the remaining lines which represent the data in a column
- # form.
- for line in file_handle:
- # Split the line by comma, removing the last empty column caused by
- # the trailing comma.
- vals = line.split(',')[:-1]
-
- # Cast the x and y grid positions to floats and convert them to
- # zero based indices (the numbers are 1 based grid positions where
- # 0.5 represents half a grid point.)
- x = int(float(vals[0]) - 1.5)
- y = int(float(vals[1]) - 1.5)
-
- # Populate the data arrays (i.e. all columns but the leading 4).
- for i, data_array in enumerate(data_arrays):
- data_array[y, x] = float(vals[i + 4])
-
- return headers, column_headings, data_arrays
-
-
-def NAME_to_cube(filenames, callback):
- """
- Returns a generator of cubes given a list of filenames and a callback.
- """
-
- for filename in filenames:
- header, column_headings, data_arrays = load_NAME_III(filename)
-
- for i, data_array in enumerate(data_arrays):
- # turn the dictionary of column headers with a list of header
- # information for each field into a dictionary of headers for just
- # this field. Ignore the first 4 columns of grid position (data was
- # located with the data array).
- field_headings = dict((k, v[i + 4])
- for k, v in column_headings.items())
-
- # make an cube
- cube = iris.cube.Cube(data_array)
-
- # define the name and unit
- name = ('%s %s' % (field_headings['species'],
- field_headings['quantity']))
- name = name.upper().replace(' ', '_')
- cube.rename(name)
- # Some units are badly encoded in the file, fix this by putting a
- # space in between. (if gs is not found, then the string will be
- # returned unchanged)
- cube.units = field_headings['unit'].replace('gs', 'g s')
-
- # define and add the singular coordinates of the field (flight
- # level, time etc.)
- cube.add_aux_coord(icoords.AuxCoord(field_headings['z_level'],
- long_name='flight_level',
- units='1'))
-
- # define the time unit and use it to serialise the datetime for the
- # time coordinate
- time_unit = Unit('hours since epoch', calendar=CALENDAR_GREGORIAN)
- time_coord = icoords.AuxCoord(
- time_unit.date2num(field_headings['time']),
- standard_name='time',
- units=time_unit)
- cube.add_aux_coord(time_coord)
-
- # build a coordinate system which can be referenced by latitude and
- # longitude coordinates
- lat_lon_coord_system = icoord_systems.GeogCS(6371229)
-
- # build regular latitude and longitude coordinates which have
- # bounds
- start = header['X grid origin'] + header['X grid resolution']
- step = header['X grid resolution']
- count = header['X grid size']
- pts = start + np.arange(count, dtype=np.float32) * step
- lon_coord = icoords.DimCoord(pts, standard_name='longitude',
- units='degrees',
- coord_system=lat_lon_coord_system)
- lon_coord.guess_bounds()
-
- start = header['Y grid origin'] + header['Y grid resolution']
- step = header['Y grid resolution']
- count = header['Y grid size']
- pts = start + np.arange(count, dtype=np.float32) * step
- lat_coord = icoords.DimCoord(pts, standard_name='latitude',
- units='degrees',
- coord_system=lat_lon_coord_system)
- lat_coord.guess_bounds()
-
- # add the latitude and longitude coordinates to the cube, with
- # mappings to data dimensions
- cube.add_dim_coord(lat_coord, 0)
- cube.add_dim_coord(lon_coord, 1)
-
- # implement standard iris callback capability. Although callbacks
- # are not used in this example, the standard mechanism for a custom
- # loader to implement a callback is shown:
- cube = iris.io.run_callback(callback, cube,
- [header, field_headings, data_array],
- filename)
-
- # yield the cube created (the loop will continue when the next()
- # element is requested)
- yield cube
-
-
-# Create a format_picker specification of the NAME file format giving it a
-# priority greater than the built in NAME loader.
-_NAME_III_spec = format_picker.FormatSpecification(
- 'Name III',
- format_picker.LeadingLine(),
- lambda line: line.startswith(b"NAME III"),
- NAME_to_cube,
- priority=6)
-
-# Register the NAME loader with iris
-iris.fileformats.FORMAT_AGENT.add_spec(_NAME_III_spec)
-
-
-# ---------------------------------------------
-# | Using the new loader |
-# ---------------------------------------------
-
-def main():
- fname = iris.sample_data_path('NAME_output.txt')
-
- boundary_volc_ash_constraint = iris.Constraint(
- 'VOLCANIC_ASH_AIR_CONCENTRATION',
- flight_level='From FL000 - FL200')
-
- # Callback shown as None to illustrate where a cube-level callback function
- # would be used if required
- cube = iris.load_cube(fname, boundary_volc_ash_constraint, callback=None)
-
- # draw contour levels for the data (the top level is just a catch-all)
- levels = (0.0002, 0.002, 0.004, 1e10)
- cs = iplt.contourf(cube, levels=levels,
- colors=('#80ffff', '#939598', '#e00404'),
- )
-
- # draw a black outline at the lowest contour to highlight affected areas
- iplt.contour(cube, levels=(levels[0], 100),
- colors='black')
-
- # set an extent and a background image for the map
- ax = plt.gca()
- ax.set_extent((-90, 20, 20, 75))
- ax.stock_img('ne_shaded')
-
- # make a legend, with custom labels, for the coloured contour set
- artists, _ = cs.legend_elements()
- labels = [
- r'$%s < x \leq %s$' % (levels[0], levels[1]),
- r'$%s < x \leq %s$' % (levels[1], levels[2]),
- r'$x > %s$' % levels[2]
- ]
- ax.legend(artists, labels, title='Ash concentration / g m-3',
- loc='upper left')
-
- time = cube.coord('time')
- time_date = time.units.num2date(time.points[0]).strftime(UTC_format)
- plt.title('Volcanic ash concentration forecast\nvalid at %s' % time_date)
-
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/global_map.py b/docs/iris/example_code/General/global_map.py
deleted file mode 100644
index 4ed8b97443..0000000000
--- a/docs/iris/example_code/General/global_map.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-Quickplot of a 2d cube on a map
-===============================
-
-This example demonstrates a contour plot of global air temperature. The plot
-title and the labels for the axes are automatically derived from the metadata.
-
-"""
-import cartopy.crs as ccrs
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- fname = iris.sample_data_path('air_temp.pp')
- temperature = iris.load_cube(fname)
-
- # Plot #1: contourf with axes longitude from -180 to 180
- plt.figure(figsize=(12, 5))
- plt.subplot(121)
- qplt.contourf(temperature, 15)
- plt.gca().coastlines()
-
- # Plot #2: contourf with axes longitude from 0 to 360
- proj = ccrs.PlateCarree(central_longitude=-180.0)
- plt.subplot(122, projection=proj)
- qplt.contourf(temperature, 15)
- plt.gca().coastlines()
- iplt.show()
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/inset_plot.py b/docs/iris/example_code/General/inset_plot.py
deleted file mode 100644
index f2ae2d1155..0000000000
--- a/docs/iris/example_code/General/inset_plot.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Test Data Showing Inset Plots
-=============================
-
-This example demonstrates the use of a single 3D data cube with time, latitude
-and longitude dimensions to plot a temperature series for a single latitude
-coordinate, with an inset plot of the data region.
-
-"""
-
-import matplotlib.pyplot as plt
-import numpy as np
-import iris
-import cartopy.crs as ccrs
-import iris.quickplot as qplt
-import iris.plot as iplt
-
-
-def main():
- cube1 = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
- # Slice into cube to retrieve data for the inset map showing the
- # data region
- region = cube1[-1, :, :]
- # Average over latitude to reduce cube to 1 dimension
- plot_line = region.collapsed('latitude', iris.analysis.MEAN)
-
- # Open a window for plotting
- fig = plt.figure()
- # Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
- ax_main = fig.add_subplot(1, 1, 1)
- # Produce a quick plot of the 1D cube
- qplt.plot(plot_line)
-
- # Set x limits to match the data
- ax_main.set_xlim(0, plot_line.coord('longitude').points.max())
- # Adjust the y limits so that the inset map won't clash with main plot
- ax_main.set_ylim(294, 310)
- ax_main.set_title('Meridional Mean Temperature')
- # Add grid lines
- ax_main.grid()
-
- # Add a second set of axes specifying the fractional coordinates within
- # the figure with bottom left corner at x=0.55, y=0.58 with width
- # 0.3 and height 0.25.
- # Also specify the projection
- ax_sub = fig.add_axes([0.55, 0.58, 0.3, 0.25],
- projection=ccrs.Mollweide(central_longitude=180))
-
- # Use iris.plot (iplt) here so colour bar properties can be specified
- # Also use a sequential colour scheme to reduce confusion for those with
- # colour-blindness
- iplt.pcolormesh(region, cmap='Blues')
- # Manually set the orientation and tick marks on your colour bar
- ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
- plt.colorbar(orientation='horizontal', ticks=ticklist)
- ax_sub.set_title('Data Region')
- # Add coastlines
- ax_sub.coastlines()
- # request to show entire map, using the colour mesh on the data region only
- ax_sub.set_global()
-
- qplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/lineplot_with_legend.py b/docs/iris/example_code/General/lineplot_with_legend.py
deleted file mode 100644
index 4ce80a9569..0000000000
--- a/docs/iris/example_code/General/lineplot_with_legend.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""
-Multi-line temperature profile plot
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-"""
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- fname = iris.sample_data_path('air_temp.pp')
-
- # Load exactly one cube from the given file.
- temperature = iris.load_cube(fname)
-
- # We only want a small number of latitudes, so filter some out
- # using "extract".
- temperature = temperature.extract(
- iris.Constraint(latitude=lambda cell: 68 <= cell < 78))
-
- for cube in temperature.slices('longitude'):
-
- # Create a string label to identify this cube (i.e. latitude: value).
- cube_label = 'latitude: %s' % cube.coord('latitude').points[0]
-
- # Plot the cube, and associate it with a label.
- qplt.plot(cube, label=cube_label)
-
- # Add the legend with 2 columns.
- plt.legend(ncol=2)
-
- # Put a grid on the plot.
- plt.grid(True)
-
- # Tell matplotlib not to extend the plot axes range to nicely
- # rounded numbers.
- plt.axis('tight')
-
- # Finally, show it.
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/orca_projection.py b/docs/iris/example_code/General/orca_projection.py
deleted file mode 100644
index fb44221221..0000000000
--- a/docs/iris/example_code/General/orca_projection.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Tri-Polar Grid Projected Plotting
-=================================
-
-This example demonstrates cell plots of data on the semi-structured ORCA2 model
-grid.
-
-First, the data is projected into the PlateCarree coordinate reference system.
-
-Second four pcolormesh plots are created from this projected dataset,
-using different projections for the output image.
-
-"""
-
-import matplotlib.pyplot as plt
-
-import cartopy.crs as ccrs
-import iris
-import iris.analysis.cartography
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- # Load data
- filepath = iris.sample_data_path('orca2_votemper.nc')
- cube = iris.load_cube(filepath)
-
- # Choose plot projections
- projections = {}
- projections['Mollweide'] = ccrs.Mollweide()
- projections['PlateCarree'] = ccrs.PlateCarree()
- projections['NorthPolarStereo'] = ccrs.NorthPolarStereo()
- projections['Orthographic'] = ccrs.Orthographic(central_longitude=-90,
- central_latitude=45)
-
- pcarree = projections['PlateCarree']
- # Transform cube to target projection
- new_cube, extent = iris.analysis.cartography.project(cube, pcarree,
- nx=400, ny=200)
-
- # Plot data in each projection
- for name in sorted(projections):
- fig = plt.figure()
- fig.suptitle('ORCA2 Data Projected to {}'.format(name))
- # Set up axes and title
- ax = plt.subplot(projection=projections[name])
- # Set limits
- ax.set_global()
- # plot with Iris quickplot pcolormesh
- qplt.pcolormesh(new_cube)
- # Draw coastlines
- ax.coastlines()
-
- iplt.show()
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/polar_stereo.py b/docs/iris/example_code/General/polar_stereo.py
deleted file mode 100644
index f1cefdc903..0000000000
--- a/docs/iris/example_code/General/polar_stereo.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Example of a polar stereographic plot
-=====================================
-
-Demonstrates plotting data that are defined on a polar stereographic
-projection.
-
-"""
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- file_path = iris.sample_data_path('polar_stereo.grib2')
- cube = iris.load_cube(file_path)
- qplt.contourf(cube)
- ax = plt.gca()
- ax.coastlines()
- ax.gridlines()
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/polynomial_fit.py b/docs/iris/example_code/General/polynomial_fit.py
deleted file mode 100644
index 84f3265dd1..0000000000
--- a/docs/iris/example_code/General/polynomial_fit.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Fitting a polynomial
-====================
-
-This example demonstrates computing a polynomial fit to 1D data from an Iris
-cube, adding the fit to the cube's metadata, and plotting both the 1D data and
-the fit.
-
-"""
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-import iris
-import iris.quickplot as qplt
-
-
-def main():
- # Load some test data.
- fname = iris.sample_data_path('A1B_north_america.nc')
- cube = iris.load_cube(fname)
-
- # Extract a single time series at a latitude and longitude point.
- location = next(cube.slices(['time']))
-
- # Calculate a polynomial fit to the data at this time series.
- x_points = location.coord('time').points
- y_points = location.data
- degree = 2
-
- p = np.polyfit(x_points, y_points, degree)
- y_fitted = np.polyval(p, x_points)
-
- # Add the polynomial fit values to the time series to take
- # full advantage of Iris plotting functionality.
- long_name = 'degree_{}_polynomial_fit_of_{}'.format(degree, cube.name())
- fit = iris.coords.AuxCoord(y_fitted, long_name=long_name,
- units=location.units)
- location.add_aux_coord(fit, 0)
-
- qplt.plot(location.coord('time'), location, label='data')
- qplt.plot(location.coord('time'),
- location.coord(long_name),
- 'g-', label='polynomial fit')
- plt.legend(loc='best')
- plt.title('Trend of US air temperature over time')
-
- qplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/projections_and_annotations.py b/docs/iris/example_code/General/projections_and_annotations.py
deleted file mode 100644
index b4cb8b1eb7..0000000000
--- a/docs/iris/example_code/General/projections_and_annotations.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""
-Plotting in different projections
-=================================
-
-This example shows how to overlay data and graphics in different projections,
-demonstrating various features of Iris, Cartopy and matplotlib.
-
-We wish to overlay two datasets, defined on different rotated-pole grids.
-To display both together, we make a pseudocoloured plot of the first, overlaid
-with contour lines from the second.
-We also add some lines and text annotations drawn in various projections.
-
-We plot these over a specified region, in two different map projections.
-
-"""
-import cartopy.crs as ccrs
-import iris
-import iris.plot as iplt
-import numpy as np
-import matplotlib.pyplot as plt
-
-
-# Define a Cartopy 'ordinary' lat-lon coordinate reference system.
-crs_latlon = ccrs.PlateCarree()
-
-
-def make_plot(projection_name, projection_crs):
-
- # Create a matplotlib Figure.
- plt.figure()
-
- # Add a matplotlib Axes, specifying the required display projection.
- # NOTE: specifying 'projection' (a "cartopy.crs.Projection") makes the
- # resulting Axes a "cartopy.mpl.geoaxes.GeoAxes", which supports plotting
- # in different coordinate systems.
- ax = plt.axes(projection=projection_crs)
-
- # Set display limits to include a set region of latitude * longitude.
- # (Note: Cartopy-specific).
- ax.set_extent((-80.0, 20.0, 10.0, 80.0), crs=crs_latlon)
-
- # Add coastlines and meridians/parallels (Cartopy-specific).
- ax.coastlines(linewidth=0.75, color='navy')
- ax.gridlines(crs=crs_latlon, linestyle='-')
-
- # Plot the first dataset as a pseudocolour filled plot.
- maindata_filepath = iris.sample_data_path('rotated_pole.nc')
- main_data = iris.load_cube(maindata_filepath)
- # NOTE: iplt.pcolormesh calls "pyplot.pcolormesh", passing in a coordinate
- # system with the 'transform' keyword: This enables the Axes (a cartopy
- # GeoAxes) to reproject the plot into the display projection.
- iplt.pcolormesh(main_data, cmap='RdBu_r')
-
- # Overplot the other dataset (which has a different grid), as contours.
- overlay_filepath = iris.sample_data_path('space_weather.nc')
- overlay_data = iris.load_cube(overlay_filepath, 'total electron content')
- # NOTE: as above, "iris.plot.contour" calls "pyplot.contour" with a
- # 'transform' keyword, enabling Cartopy reprojection.
- iplt.contour(overlay_data, 20,
- linewidths=2.0, colors='darkgreen', linestyles='-')
-
- # Draw a margin line, some way in from the border of the 'main' data...
- # First calculate rectangle corners, 7% in from each corner of the data.
- x_coord, y_coord = main_data.coord(axis='x'), main_data.coord(axis='y')
- x_start, x_end = np.min(x_coord.points), np.max(x_coord.points)
- y_start, y_end = np.min(y_coord.points), np.max(y_coord.points)
- margin = 0.07
- margin_fractions = np.array([margin, 1.0 - margin])
- x_lower, x_upper = x_start + (x_end - x_start) * margin_fractions
- y_lower, y_upper = y_start + (y_end - y_start) * margin_fractions
- box_x_points = x_lower + (x_upper - x_lower) * np.array([0, 1, 1, 0, 0])
- box_y_points = y_lower + (y_upper - y_lower) * np.array([0, 0, 1, 1, 0])
- # Get the Iris coordinate sytem of the X coordinate (Y should be the same).
- cs_data1 = x_coord.coord_system
- # Construct an equivalent Cartopy coordinate reference system ("crs").
- crs_data1 = cs_data1.as_cartopy_crs()
- # Draw the rectangle in this crs, with matplotlib "pyplot.plot".
- # NOTE: the 'transform' keyword specifies a non-display coordinate system
- # for the plot points (as used by the "iris.plot" functions).
- plt.plot(box_x_points, box_y_points, transform=crs_data1,
- linewidth=2.0, color='white', linestyle='--')
-
- # Mark some particular places with a small circle and a name label...
- # Define some test points with latitude and longitude coordinates.
- city_data = [('London', 51.5072, 0.1275),
- ('Halifax, NS', 44.67, -63.61),
- ('Reykjavik', 64.1333, -21.9333)]
- # Place a single marker point and a text annotation at each place.
- for name, lat, lon in city_data:
- plt.plot(lon, lat, marker='o', markersize=7.0, markeredgewidth=2.5,
- markerfacecolor='black', markeredgecolor='white',
- transform=crs_latlon)
- # NOTE: the "plt.annotate call" does not have a "transform=" keyword,
- # so for this one we transform the coordinates with a Cartopy call.
- at_x, at_y = ax.projection.transform_point(lon, lat,
- src_crs=crs_latlon)
- plt.annotate(
- name, xy=(at_x, at_y), xytext=(30, 20), textcoords='offset points',
- color='black', backgroundcolor='white', size='large',
- arrowprops=dict(arrowstyle='->', color='white', linewidth=2.5))
-
- # Add a title, and display.
- plt.title('A pseudocolour plot on the {} projection,\n'
- 'with overlaid contours.'.format(projection_name))
- iplt.show()
-
-
-def main():
- # Demonstrate with two different display projections.
- make_plot('Equidistant Cylindrical', ccrs.PlateCarree())
- make_plot('North Polar Stereographic', ccrs.NorthPolarStereo())
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/General/rotated_pole_mapping.py b/docs/iris/example_code/General/rotated_pole_mapping.py
deleted file mode 100644
index e175e6fe74..0000000000
--- a/docs/iris/example_code/General/rotated_pole_mapping.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Rotated pole mapping
-=====================
-
-This example uses several visualisation methods to achieve an array of
-differing images, including:
-
- * Visualisation of point based data
- * Contouring of point based data
- * Block plot of contiguous bounded data
- * Non native projection and a Natural Earth shaded relief image underlay
-
-"""
-import cartopy.crs as ccrs
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-import iris.analysis.cartography
-
-
-def main():
- # Load some test data.
- fname = iris.sample_data_path('rotated_pole.nc')
- air_pressure = iris.load_cube(fname)
-
- # Plot #1: Point plot showing data values & a colorbar
- plt.figure()
- points = qplt.points(air_pressure, c=air_pressure.data)
- cb = plt.colorbar(points, orientation='horizontal')
- cb.set_label(air_pressure.units)
- plt.gca().coastlines()
- iplt.show()
-
- # Plot #2: Contourf of the point based data
- plt.figure()
- qplt.contourf(air_pressure, 15)
- plt.gca().coastlines()
- iplt.show()
-
- # Plot #3: Contourf overlayed by coloured point data
- plt.figure()
- qplt.contourf(air_pressure)
- iplt.points(air_pressure, c=air_pressure.data)
- plt.gca().coastlines()
- iplt.show()
-
- # For the purposes of this example, add some bounds to the latitude
- # and longitude
- air_pressure.coord('grid_latitude').guess_bounds()
- air_pressure.coord('grid_longitude').guess_bounds()
-
- # Plot #4: Block plot
- plt.figure()
- plt.axes(projection=ccrs.PlateCarree())
- iplt.pcolormesh(air_pressure)
- plt.gca().stock_img()
- plt.gca().coastlines()
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/COP_1d_plot.py b/docs/iris/example_code/Meteorology/COP_1d_plot.py
deleted file mode 100644
index 53bff28b7e..0000000000
--- a/docs/iris/example_code/Meteorology/COP_1d_plot.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-Global average annual temperature plot
-======================================
-
-Produces a time-series plot of North American temperature forecasts for 2
-different emission scenarios. Constraining data to a limited spatial area also
-features in this example.
-
-The data used comes from the HadGEM2-AO model simulations for the A1B and E1
-scenarios, both of which were derived using the IMAGE Integrated Assessment
-Model (Johns et al. 2011; Lowe et al. 2009).
-
-References
-----------
-
- Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
- ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
- doi:10.1007/s00382-011-1005-5.
-
- Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
- Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
- Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
- doi:10.1029/2009EO210001.
-
-.. seealso::
-
- Further details on the aggregation functionality being used in this example
- can be found in :ref:`cube-statistics`.
-
-"""
-import numpy as np
-import matplotlib.pyplot as plt
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-import iris.analysis.cartography
-
-
-def main():
- # Load data into three Cubes, one for each set of NetCDF files.
- e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc'))
-
- a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc'))
-
- # load in the global pre-industrial mean temperature, and limit the domain
- # to the same North American region that e1 and a1b are at.
- north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315,
- latitude=lambda v: 15 <= v <= 60)
- pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'),
- north_america)
-
- # Generate area-weights array. As e1 and a1b are on the same grid we can
- # do this just once and re-use. This method requires bounds on lat/lon
- # coords, so let's add some in sensible locations using the "guess_bounds"
- # method.
- e1.coord('latitude').guess_bounds()
- e1.coord('longitude').guess_bounds()
- e1_grid_areas = iris.analysis.cartography.area_weights(e1)
- pre_industrial.coord('latitude').guess_bounds()
- pre_industrial.coord('longitude').guess_bounds()
- pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
-
- # Perform the area-weighted mean for each of the datasets using the
- # computed grid-box areas.
- pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'],
- iris.analysis.MEAN,
- weights=pre_grid_areas)
- e1_mean = e1.collapsed(['latitude', 'longitude'],
- iris.analysis.MEAN,
- weights=e1_grid_areas)
- a1b_mean = a1b.collapsed(['latitude', 'longitude'],
- iris.analysis.MEAN,
- weights=e1_grid_areas)
-
- # Plot the datasets
- qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue')
- qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red')
-
- # Draw a horizontal line showing the pre-industrial mean
- plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed',
- label='pre-industrial', lw=1.5)
-
- # Constrain the period 1860-1999 and extract the observed data from a1b
- constraint = iris.Constraint(time=lambda
- cell: 1860 <= cell.point.year <= 1999)
- observed = a1b_mean.extract(constraint)
- # Assert that this data set is the same as the e1 scenario:
- # they share data up to the 1999 cut off.
- assert np.all(np.isclose(observed.data,
- e1_mean.extract(constraint).data))
-
- # Plot the observed data
- qplt.plot(observed, label='observed', color='black', lw=1.5)
-
- # Add a legend and title
- plt.legend(loc="upper left")
- plt.title('North American mean air temperature', fontsize=18)
-
- plt.xlabel('Time / year')
-
- plt.grid()
-
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/COP_maps.py b/docs/iris/example_code/Meteorology/COP_maps.py
deleted file mode 100644
index aa5049feb9..0000000000
--- a/docs/iris/example_code/Meteorology/COP_maps.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Global average annual temperature maps
-======================================
-
-Produces maps of global temperature forecasts from the A1B and E1 scenarios.
-
-The data used comes from the HadGEM2-AO model simulations for the A1B and E1
-scenarios, both of which were derived using the IMAGE Integrated Assessment
-Model (Johns et al. 2011; Lowe et al. 2009).
-
-References
-----------
-
- Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
- ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
- doi:10.1007/s00382-011-1005-5.
-
- Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
- Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
- Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
- doi:10.1029/2009EO210001.
-
-"""
-from six.moves import zip
-
-import os.path
-import matplotlib.pyplot as plt
-import numpy as np
-
-import iris
-import iris.coords as coords
-import iris.plot as iplt
-
-
-def cop_metadata_callback(cube, field, filename):
- """
- A function which adds an "Experiment" coordinate which comes from the
- filename.
- """
-
- # Extract the experiment name (such as a1b or e1) from the filename (in
- # this case it is just the parent folder's name)
- containing_folder = os.path.dirname(filename)
- experiment_label = os.path.basename(containing_folder)
-
- # Create a coordinate with the experiment label in it
- exp_coord = coords.AuxCoord(experiment_label, long_name='Experiment',
- units='no_unit')
-
- # and add it to the cube
- cube.add_aux_coord(exp_coord)
-
-
-def main():
- # Load e1 and a1 using the callback to update the metadata
- e1 = iris.load_cube(iris.sample_data_path('E1.2098.pp'),
- callback=cop_metadata_callback)
- a1b = iris.load_cube(iris.sample_data_path('A1B.2098.pp'),
- callback=cop_metadata_callback)
-
- # Load the global average data and add an 'Experiment' coord it
- global_avg = iris.load_cube(iris.sample_data_path('pre-industrial.pp'))
-
- # Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the
- # specific colours
- levels = np.arange(20) - 2.5
- red = np.array([0, 0, 221, 239, 229, 217, 239, 234, 228, 222, 205, 196,
- 161, 137, 116, 89, 77, 60, 51]) / 256.
- green = np.array([16, 217, 242, 243, 235, 225, 190, 160, 128, 87, 72, 59,
- 33, 21, 29, 30, 30, 29, 26]) / 256.
- blue = np.array([255, 255, 243, 169, 99, 51, 63, 37, 39, 21, 27, 23, 22,
- 26, 29, 28, 27, 25, 22]) / 256.
-
- # Put those colours into an array which can be passed to contourf as the
- # specific colours for each level
- colors = np.array([red, green, blue]).T
-
- # Subtract the global
-
- # Iterate over each latitude longitude slice for both e1 and a1b scenarios
- # simultaneously
- for e1_slice, a1b_slice in zip(e1.slices(['latitude', 'longitude']),
- a1b.slices(['latitude', 'longitude'])):
-
- time_coord = a1b_slice.coord('time')
-
- # Calculate the difference from the mean
- delta_e1 = e1_slice - global_avg
- delta_a1b = a1b_slice - global_avg
-
- # Make a wider than normal figure to house two maps side-by-side
- fig = plt.figure(figsize=(12, 5))
-
- # Get the time datetime from the coordinate
- time = time_coord.units.num2date(time_coord.points[0])
- # Set a title for the entire figure, giving the time in a nice format
- # of "MonthName Year". Also, set the y value for the title so that it
- # is not tight to the top of the plot.
- fig.suptitle(
- 'Annual Temperature Predictions for ' + time.strftime("%Y"),
- y=0.9,
- fontsize=18)
-
- # Add the first subplot showing the E1 scenario
- plt.subplot(121)
- plt.title('HadGEM2 E1 Scenario', fontsize=10)
- iplt.contourf(delta_e1, levels, colors=colors, extend='both')
- plt.gca().coastlines()
- # get the current axes' subplot for use later on
- plt1_ax = plt.gca()
-
- # Add the second subplot showing the A1B scenario
- plt.subplot(122)
- plt.title('HadGEM2 A1B-Image Scenario', fontsize=10)
- contour_result = iplt.contourf(delta_a1b, levels, colors=colors,
- extend='both')
- plt.gca().coastlines()
- # get the current axes' subplot for use later on
- plt2_ax = plt.gca()
-
- # Now add a colourbar who's leftmost point is the same as the leftmost
- # point of the left hand plot and rightmost point is the rightmost
- # point of the right hand plot
-
- # Get the positions of the 2nd plot and the left position of the 1st
- # plot
- left, bottom, width, height = plt2_ax.get_position().bounds
- first_plot_left = plt1_ax.get_position().bounds[0]
-
- # the width of the colorbar should now be simple
- width = left - first_plot_left + width
-
- # Add axes to the figure, to place the colour bar
- colorbar_axes = fig.add_axes([first_plot_left, bottom + 0.07,
- width, 0.03])
-
- # Add the colour bar
- cbar = plt.colorbar(contour_result, colorbar_axes,
- orientation='horizontal')
-
- # Label the colour bar and add ticks
- cbar.set_label(e1_slice.units)
- cbar.ax.tick_params(length=0)
-
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/TEC.py b/docs/iris/example_code/Meteorology/TEC.py
deleted file mode 100644
index 43f69fcea0..0000000000
--- a/docs/iris/example_code/Meteorology/TEC.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-Ionosphere space weather
-========================
-
-This space weather example plots a filled contour of rotated pole point
-data with a shaded relief image underlay. The plot shows aggregated
-vertical electron content in the ionosphere.
-
-The plot exhibits an interesting outline effect due to excluding data
-values below a certain threshold.
-
-"""
-
-import matplotlib.pyplot as plt
-import numpy.ma as ma
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- # Load the "total electron content" cube.
- filename = iris.sample_data_path('space_weather.nc')
- cube = iris.load_cube(filename, 'total electron content')
-
- # Explicitly mask negative electron content.
- cube.data = ma.masked_less(cube.data, 0)
-
- # Plot the cube using one hundred colour levels.
- qplt.contourf(cube, 100)
- plt.title('Total Electron Content')
- plt.xlabel('longitude / degrees')
- plt.ylabel('latitude / degrees')
- plt.gca().stock_img()
- plt.gca().coastlines()
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/__init__.py b/docs/iris/example_code/Meteorology/__init__.py
deleted file mode 100644
index 39c05d08c6..0000000000
--- a/docs/iris/example_code/Meteorology/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-Meteorology visualisation examples
-==================================
-"""
diff --git a/docs/iris/example_code/Meteorology/deriving_phenomena.py b/docs/iris/example_code/Meteorology/deriving_phenomena.py
deleted file mode 100644
index d34a2c8b78..0000000000
--- a/docs/iris/example_code/Meteorology/deriving_phenomena.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Deriving Exner Pressure and Air Temperature
-===========================================
-
-This example shows some processing of cubes in order to derive further related
-cubes; in this case the derived cubes are Exner pressure and air temperature
-which are calculated by combining air pressure, air potential temperature and
-specific humidity. Finally, the two new cubes are presented side-by-side in a
-plot.
-
-"""
-import matplotlib.pyplot as plt
-import matplotlib.ticker
-
-import iris
-import iris.coords as coords
-import iris.iterate
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def limit_colorbar_ticks(contour_object):
- """
- Takes a contour object which has an associated colorbar and limits the
- number of ticks on the colorbar to 4.
-
- """
- # Under Matplotlib v1.2.x the colorbar attribute of a contour object is
- # a tuple containing the colorbar and an axes object, whereas under
- # Matplotlib v1.3.x it is simply the colorbar.
- try:
- colorbar = contour_object.colorbar[0]
- except (AttributeError, TypeError):
- colorbar = contour_object.colorbar
-
- colorbar.locator = matplotlib.ticker.MaxNLocator(4)
- colorbar.update_ticks()
-
-
-def main():
- fname = iris.sample_data_path('colpex.pp')
-
- # The list of phenomena of interest
- phenomena = ['air_potential_temperature', 'air_pressure']
-
- # Define the constraint on standard name and model level
- constraints = [iris.Constraint(phenom, model_level_number=1) for
- phenom in phenomena]
-
- air_potential_temperature, air_pressure = iris.load_cubes(fname,
- constraints)
-
- # Define a coordinate which represents 1000 hPa
- p0 = coords.AuxCoord(1000, long_name='P0', units='hPa')
- # Convert reference pressure 'p0' into the same units as 'air_pressure'
- p0.convert_units(air_pressure.units)
-
- # Calculate Exner pressure
- exner_pressure = (air_pressure / p0) ** (287.05 / 1005.0)
- # Set the name (the unit is scalar)
- exner_pressure.rename('exner_pressure')
-
- # Calculate air_temp
- air_temperature = exner_pressure * air_potential_temperature
- # Set the name (the unit is K)
- air_temperature.rename('air_temperature')
-
- # Now create an iterator which will give us lat lon slices of
- # exner pressure and air temperature in the form
- # (exner_slice, air_temp_slice).
- lat_lon_slice_pairs = iris.iterate.izip(exner_pressure,
- air_temperature,
- coords=['grid_latitude',
- 'grid_longitude'])
-
- # For the purposes of this example, we only want to demonstrate the first
- # plot.
- lat_lon_slice_pairs = [next(lat_lon_slice_pairs)]
-
- plt.figure(figsize=(8, 4))
- for exner_slice, air_temp_slice in lat_lon_slice_pairs:
- plt.subplot(121)
- cont = qplt.contourf(exner_slice)
-
- # The default colorbar has a few too many ticks on it, causing text to
- # overlap. Therefore, limit the number of ticks.
- limit_colorbar_ticks(cont)
-
- plt.subplot(122)
- cont = qplt.contourf(air_temp_slice)
- limit_colorbar_ticks(cont)
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/hovmoller.py b/docs/iris/example_code/Meteorology/hovmoller.py
deleted file mode 100644
index 5d8b0852ac..0000000000
--- a/docs/iris/example_code/Meteorology/hovmoller.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""
-Hovmoller diagram of monthly surface temperature
-================================================
-
-This example demonstrates the creation of a Hovmoller diagram with fine control
-over plot ticks and labels. The data comes from the Met Office OSTIA project
-and has been pre-processed to calculate the monthly mean sea surface
-temperature.
-
-"""
-import matplotlib.pyplot as plt
-import matplotlib.dates as mdates
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-def main():
- # load a single cube of surface temperature between +/- 5 latitude
- fname = iris.sample_data_path('ostia_monthly.nc')
- cube = iris.load_cube(fname,
- iris.Constraint('surface_temperature',
- latitude=lambda v: -5 < v < 5))
-
- # Take the mean over latitude
- cube = cube.collapsed('latitude', iris.analysis.MEAN)
-
- # Now that we have our data in a nice way, lets create the plot
- # contour with 20 levels
- qplt.contourf(cube, 20)
-
- # Put a custom label on the y axis
- plt.ylabel('Time / years')
-
- # Stop matplotlib providing clever axes range padding
- plt.axis('tight')
-
- # As we are plotting annual variability, put years as the y ticks
- plt.gca().yaxis.set_major_locator(mdates.YearLocator())
-
- # And format the ticks to just show the year
- plt.gca().yaxis.set_major_formatter(mdates.DateFormatter('%Y'))
-
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/lagged_ensemble.py b/docs/iris/example_code/Meteorology/lagged_ensemble.py
deleted file mode 100644
index 7898d26392..0000000000
--- a/docs/iris/example_code/Meteorology/lagged_ensemble.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Seasonal ensemble model plots
-=============================
-
-This example demonstrates the loading of a lagged ensemble dataset from the
-GloSea4 model, which is then used to produce two types of plot:
-
- * The first shows the "postage stamp" style image with an array of 14 images,
- one for each ensemble member with a shared colorbar. (The missing image in
- this example represents ensemble member number 6 which was a failed run)
-
- * The second plot shows the data limited to a region of interest, in this case
- a region defined for forecasting ENSO (El Nino-Southern Oscillation), which,
- for the purposes of this example, has had the ensemble mean subtracted from
- each ensemble member to give an anomaly surface temperature. In practice a
- better approach would be to take the climatological mean, calibrated to the
- model, from each ensemble member.
-
-"""
-import matplotlib.pyplot as plt
-import numpy as np
-
-import iris
-import iris.plot as iplt
-
-
-def realization_metadata(cube, field, fname):
- """
- A function which modifies the cube's metadata to add a "realization"
- (ensemble member) coordinate from the filename if one doesn't already exist
- in the cube.
-
- """
- # add an ensemble member coordinate if one doesn't already exist
- if not cube.coords('realization'):
- # the ensemble member is encoded in the filename as *_???.pp where ???
- # is the ensemble member
- realization_number = fname[-6:-3]
-
- import iris.coords
- realization_coord = iris.coords.AuxCoord(np.int32(realization_number),
- 'realization')
- cube.add_aux_coord(realization_coord)
-
-
-def main():
- # extract surface temperature cubes which have an ensemble member
- # coordinate, adding appropriate lagged ensemble metadata
- surface_temp = iris.load_cube(
- iris.sample_data_path('GloSea4', 'ensemble_???.pp'),
- iris.Constraint('surface_temperature', realization=lambda value: True),
- callback=realization_metadata,
- )
-
- # -------------------------------------------------------------------------
- # Plot #1: Ensemble postage stamps
- # -------------------------------------------------------------------------
-
- # for the purposes of this example, take the last time element of the cube
- last_timestep = surface_temp[:, -1, :, :]
-
- # Make 50 evenly spaced levels which span the dataset
- contour_levels = np.linspace(np.min(last_timestep.data),
- np.max(last_timestep.data),
- 50)
-
- # Create a wider than normal figure to support our many plots
- plt.figure(figsize=(12, 6), dpi=100)
-
- # Also manually adjust the spacings which are used when creating subplots
- plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05,
- left=0.075, right=0.925)
-
- # iterate over all possible latitude longitude slices
- for cube in last_timestep.slices(['latitude', 'longitude']):
-
- # get the ensemble member number from the ensemble coordinate
- ens_member = cube.coord('realization').points[0]
-
- # plot the data in a 4x4 grid, with each plot's position in the grid
- # being determined by ensemble member number the special case for the
- # 13th ensemble member is to have the plot at the bottom right
- if ens_member == 13:
- plt.subplot(4, 4, 16)
- else:
- plt.subplot(4, 4, ens_member+1)
-
- cf = iplt.contourf(cube, contour_levels)
-
- # add coastlines
- plt.gca().coastlines()
-
- # make an axes to put the shared colorbar in
- colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
- colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal')
- colorbar.set_label('%s' % last_timestep.units)
-
- # limit the colorbar to 8 tick marks
- import matplotlib.ticker
- colorbar.locator = matplotlib.ticker.MaxNLocator(8)
- colorbar.update_ticks()
-
- # get the time for the entire plot
- time_coord = last_timestep.coord('time')
- time = time_coord.units.num2date(time_coord.bounds[0, 0])
-
- # set a global title for the postage stamps with the date formated by
- # "monthname year"
- plt.suptitle('Surface temperature ensemble forecasts for %s' % (
- time.strftime('%B %Y'), ))
-
- iplt.show()
-
- # -------------------------------------------------------------------------
- # Plot #2: ENSO plumes
- # -------------------------------------------------------------------------
-
- # Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint
- # which matches this
- nino_3_4_constraint = iris.Constraint(
- longitude=lambda v: -170+360 <= v <= -120+360,
- latitude=lambda v: -5 <= v <= 5)
-
- nino_cube = surface_temp.extract(nino_3_4_constraint)
-
- # Subsetting a circular longitude coordinate always results in a circular
- # coordinate, so set the coordinate to be non-circular
- nino_cube.coord('longitude').circular = False
-
- # Calculate the horizontal mean for the nino region
- mean = nino_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)
-
- # Calculate the ensemble mean of the horizontal mean. To do this, remove
- # the "forecast_period" and "forecast_reference_time" coordinates which
- # span both "relalization" and "time".
- mean.remove_coord("forecast_reference_time")
- mean.remove_coord("forecast_period")
- ensemble_mean = mean.collapsed('realization', iris.analysis.MEAN)
-
- # take the ensemble mean from each ensemble member
- mean -= ensemble_mean.data
-
- plt.figure()
-
- for ensemble_member in mean.slices(['time']):
- # draw each ensemble member as a dashed line in black
- iplt.plot(ensemble_member, '--k')
-
- plt.title('Mean temperature anomaly for ENSO 3.4 region')
- plt.xlabel('Time')
- plt.ylabel('Temperature anomaly / K')
-
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Meteorology/wind_speed.py b/docs/iris/example_code/Meteorology/wind_speed.py
deleted file mode 100644
index 231defb25b..0000000000
--- a/docs/iris/example_code/Meteorology/wind_speed.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""
-Plotting wind direction using quiver
-===========================================================
-
-This example demonstrates using quiver to plot wind speed contours and wind
-direction arrows from wind vector component input data. The vector components
-are co-located in space in this case.
-
-For the second plot, the data used for the arrows is normalised to produce
-arrows with a uniform size on the plot.
-
-"""
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-import iris
-import iris.coord_categorisation
-import iris.quickplot as qplt
-
-import cartopy
-import cartopy.feature as cfeat
-import cartopy.crs as ccrs
-
-
-def main():
- # Load the u and v components of wind from a pp file
- infile = iris.sample_data_path('wind_speed_lake_victoria.pp')
-
- uwind = iris.load_cube(infile, 'x_wind')
- vwind = iris.load_cube(infile, 'y_wind')
-
- ulon = uwind.coord('longitude')
- vlon = vwind.coord('longitude')
-
- # The longitude points go from 180 to 540, so subtract 360 from them
- ulon.points = ulon.points - 360.0
- vlon.points = vlon.points - 360.0
-
- # Create a cube containing the wind speed
- windspeed = (uwind ** 2 + vwind ** 2) ** 0.5
- windspeed.rename('windspeed')
-
- x = ulon.points
- y = uwind.coord('latitude').points
- u = uwind.data
- v = vwind.data
-
- # Set up axes to show the lake
- lakes = cfeat.NaturalEarthFeature('physical', 'lakes', '50m',
- facecolor='none')
-
- plt.figure()
- ax = plt.axes(projection=ccrs.PlateCarree())
- ax.add_feature(lakes)
-
- # Get the coordinate reference system used by the data
- transform = ulon.coord_system.as_cartopy_projection()
-
- # Plot the wind speed as a contour plot
- qplt.contourf(windspeed, 20)
-
- # Add arrows to show the wind vectors
- plt.quiver(x, y, u, v, pivot='middle', transform=transform)
-
- plt.title("Wind speed over Lake Victoria")
- qplt.show()
-
- # Normalise the data for uniform arrow size
- u_norm = u / np.sqrt(u ** 2.0 + v ** 2.0)
- v_norm = v / np.sqrt(u ** 2.0 + v ** 2.0)
-
- plt.figure()
- ax = plt.axes(projection=ccrs.PlateCarree())
- ax.add_feature(lakes)
-
- qplt.contourf(windspeed, 20)
-
- plt.quiver(x, y, u_norm, v_norm, pivot='middle', transform=transform)
-
- plt.title("Wind speed over Lake Victoria")
- qplt.show()
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Oceanography/__init__.py b/docs/iris/example_code/Oceanography/__init__.py
deleted file mode 100644
index afac828a05..0000000000
--- a/docs/iris/example_code/Oceanography/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-Oceanography visualisation examples
-===================================
-"""
diff --git a/docs/iris/example_code/Oceanography/atlantic_profiles.py b/docs/iris/example_code/Oceanography/atlantic_profiles.py
deleted file mode 100644
index b3d76ea632..0000000000
--- a/docs/iris/example_code/Oceanography/atlantic_profiles.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Oceanographic profiles and T-S diagrams
-=======================================
-
-This example demonstrates how to plot vertical profiles of different
-variables in the same axes, and how to make a scatter plot of two
-variables. There is an oceanographic theme but the same techniques are
-equally applicable to atmospheric or other kinds of data.
-
-The data used are profiles of potential temperature and salinity in the
-Equatorial and South Atlantic, output from an ocean model.
-
-The y-axis of the first plot produced will be automatically inverted due to the
-presence of the attribute positive=down on the depth coordinate. This means
-depth values intuitively increase downward on the y-axis.
-
-"""
-import iris
-import iris.iterate
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-
-
-def main():
- # Load the gridded temperature and salinity data.
- fname = iris.sample_data_path('atlantic_profiles.nc')
- cubes = iris.load(fname)
- theta, = cubes.extract('sea_water_potential_temperature')
- salinity, = cubes.extract('sea_water_practical_salinity')
-
- # Extract profiles of temperature and salinity from a particular point in
- # the southern portion of the domain, and limit the depth of the profile
- # to 1000m.
- lon_cons = iris.Constraint(longitude=330.5)
- lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
- depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
- theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
- salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
-
- # Plot these profiles on the same set of axes. In each case we call plot
- # with two arguments, the cube followed by the depth coordinate. Putting
- # them in this order places the depth coordinate on the y-axis.
- # The first plot is in the default axes. We'll use the same color for the
- # curve and its axes/tick labels.
- plt.figure(figsize=(5, 6))
- temperature_color = (.3, .4, .5)
- ax1 = plt.gca()
- iplt.plot(theta_1000m, theta_1000m.coord('depth'), linewidth=2,
- color=temperature_color, alpha=.75)
- ax1.set_xlabel('Potential Temperature / K', color=temperature_color)
- ax1.set_ylabel('Depth / m')
- for ticklabel in ax1.get_xticklabels():
- ticklabel.set_color(temperature_color)
- # To plot salinity in the same axes we use twiny(). We'll use a different
- # color to identify salinity.
- salinity_color = (.6, .1, .15)
- ax2 = plt.gca().twiny()
- iplt.plot(salinity_1000m, salinity_1000m.coord('depth'), linewidth=2,
- color=salinity_color, alpha=.75)
- ax2.set_xlabel('Salinity / PSU', color=salinity_color)
- for ticklabel in ax2.get_xticklabels():
- ticklabel.set_color(salinity_color)
- plt.tight_layout()
- iplt.show()
-
- # Now plot a T-S diagram using scatter. We'll use all the profiles here,
- # and each point will be coloured according to its depth.
- plt.figure(figsize=(6, 6))
- depth_values = theta.coord('depth').points
- for s, t in iris.iterate.izip(salinity, theta, coords='depth'):
- iplt.scatter(s, t, c=depth_values, marker='+', cmap='RdYlBu_r')
- ax = plt.gca()
- ax.set_xlabel('Salinity / PSU')
- ax.set_ylabel('Potential Temperature / K')
- cb = plt.colorbar(orientation='horizontal')
- cb.set_label('Depth / m')
- plt.tight_layout()
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_code/Oceanography/load_nemo.py b/docs/iris/example_code/Oceanography/load_nemo.py
deleted file mode 100644
index a76da68248..0000000000
--- a/docs/iris/example_code/Oceanography/load_nemo.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Load a time series of data from the NEMO model
-==============================================
-
-This example demonstrates how to load multiple files containing data output by
-the NEMO model and combine them into a time series in a single cube. The
-different time dimensions in these files can prevent Iris from concatenating
-them without the intervention shown here.
-"""
-from __future__ import unicode_literals
-
-import iris
-import iris.plot as iplt
-import iris.quickplot as qplt
-import matplotlib.pyplot as plt
-from iris.util import promote_aux_coord_to_dim_coord
-
-
-def main():
- # Load the three files of sample NEMO data.
- fname = iris.sample_data_path('NEMO/nemo_1m_*.nc')
- cubes = iris.load(fname)
-
- # Some attributes are unique to each file and must be blanked
- # to allow concatenation.
- differing_attrs = ['file_name', 'name', 'timeStamp', 'TimeStamp']
- for cube in cubes:
- for attribute in differing_attrs:
- cube.attributes[attribute] = ''
-
- # The cubes still cannot be concatenated because their time dimension is
- # time_counter rather than time. time needs to be promoted to allow
- # concatenation.
- for cube in cubes:
- promote_aux_coord_to_dim_coord(cube, 'time')
-
- # The cubes can now be concatenated into a single time series.
- cube = cubes.concatenate_cube()
-
- # Generate a time series plot of a single point
- plt.figure()
- y_point_index = 100
- x_point_index = 100
- qplt.plot(cube[:, y_point_index, x_point_index], 'o-')
-
- # Include the point's position in the plot's title
- lat_point = cube.coord('latitude').points[y_point_index, x_point_index]
- lat_string = '{:.3f}\u00B0 {}'.format(abs(lat_point),
- 'N' if lat_point > 0. else 'S')
- lon_point = cube.coord('longitude').points[y_point_index, x_point_index]
- lon_string = '{:.3f}\u00B0 {}'.format(abs(lon_point),
- 'E' if lon_point > 0. else 'W')
- plt.title('{} at {} {}'.format(cube.long_name.capitalize(),
- lat_string, lon_string))
- iplt.show()
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/example_tests/__init__.py b/docs/iris/example_tests/__init__.py
deleted file mode 100644
index 174361337f..0000000000
--- a/docs/iris/example_tests/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/docs/iris/example_tests/extest_util.py b/docs/iris/example_tests/extest_util.py
deleted file mode 100644
index 3d9017b6ee..0000000000
--- a/docs/iris/example_tests/extest_util.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-"""
-Provides context managers which are fundamental to the ability
-to run the example tests.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import contextlib
-import os.path
-import warnings
-import sys
-
-import matplotlib.pyplot as plt
-
-import iris
-from iris._deprecation import IrisDeprecation
-import iris.plot as iplt
-import iris.quickplot as qplt
-
-
-EXAMPLE_DIRECTORY = os.path.join(os.path.dirname(os.path.dirname(__file__)),
- 'example_code')
-EXAMPLE_DIRECTORIES = [os.path.join(EXAMPLE_DIRECTORY, the_dir)
- for the_dir in os.listdir(EXAMPLE_DIRECTORY)]
-
-
-@contextlib.contextmanager
-def add_examples_to_path():
- """
- Creates a context manager which can be used to add the iris examples
- to the PYTHONPATH. The examples are only importable throughout the lifetime
- of this context manager.
-
- """
- orig_sys_path = sys.path
- sys.path = sys.path[:]
- sys.path += EXAMPLE_DIRECTORIES
- yield
- sys.path = orig_sys_path
-
-
-@contextlib.contextmanager
-def show_replaced_by_check_graphic(test_case):
- """
- Creates a context manager which can be used to replace the functionality
- of matplotlib.pyplot.show with a function which calls the check_graphic
- method on the given test_case (iris.tests.IrisTest.check_graphic).
-
- """
- def replacement_show():
- # form a closure on test_case and tolerance
- test_case.check_graphic()
-
- orig_show = plt.show
- plt.show = iplt.show = qplt.show = replacement_show
- yield
- plt.show = iplt.show = qplt.show = orig_show
-
-
-@contextlib.contextmanager
-def fail_any_deprecation_warnings():
- """
- Create a context in which any deprecation warning will cause an error.
-
- The context also resets all the iris.FUTURE settings to the defaults, as
- otherwise changes made in one test can affect subsequent ones.
-
- """
- with warnings.catch_warnings():
- # Detect and error all and any Iris deprecation warnings.
- warnings.simplefilter("error", IrisDeprecation)
- # Run with all default settings in iris.FUTURE.
- default_future_kwargs = iris.Future().__dict__.copy()
- for dead_option in iris.Future.deprecated_options:
- # Avoid a warning when setting these !
- del default_future_kwargs[dead_option]
- with iris.FUTURE.context(**default_future_kwargs):
- yield
diff --git a/docs/iris/example_tests/test_COP_1d_plot.py b/docs/iris/example_tests/test_COP_1d_plot.py
deleted file mode 100644
index f356423b25..0000000000
--- a/docs/iris/example_tests/test_COP_1d_plot.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestCOP1DPlot(tests.GraphicsTest):
- """Test the COP_1d_plot example code."""
- def test_COP_1d_plot(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import COP_1d_plot
- with show_replaced_by_check_graphic(self):
- COP_1d_plot.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_COP_maps.py b/docs/iris/example_tests/test_COP_maps.py
deleted file mode 100644
index 10dcbcc816..0000000000
--- a/docs/iris/example_tests/test_COP_maps.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestCOPMaps(tests.GraphicsTest):
- """Test the COP_maps example code."""
- def test_cop_maps(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import COP_maps
- with show_replaced_by_check_graphic(self):
- COP_maps.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_SOI_filtering.py b/docs/iris/example_tests/test_SOI_filtering.py
deleted file mode 100644
index ea2c48920e..0000000000
--- a/docs/iris/example_tests/test_SOI_filtering.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2012 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestSOIFiltering(tests.GraphicsTest):
- """Test the SOI_filtering example code."""
- def test_soi_filtering(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import SOI_filtering
- with show_replaced_by_check_graphic(self):
- SOI_filtering.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_TEC.py b/docs/iris/example_tests/test_TEC.py
deleted file mode 100644
index e6e27c46bd..0000000000
--- a/docs/iris/example_tests/test_TEC.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestTEC(tests.GraphicsTest):
- """Test the TEC example code."""
- def test_TEC(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import TEC
- with show_replaced_by_check_graphic(self):
- TEC.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_anomaly_log_colouring.py b/docs/iris/example_tests/test_anomaly_log_colouring.py
deleted file mode 100644
index faa040c0e6..0000000000
--- a/docs/iris/example_tests/test_anomaly_log_colouring.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestAnomalyLogColouring(tests.GraphicsTest):
- """Test the anomaly colouring example code."""
- def test_anomaly_log_colouring(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import anomaly_log_colouring
- with show_replaced_by_check_graphic(self):
- anomaly_log_colouring.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_atlantic_profiles.py b/docs/iris/example_tests/test_atlantic_profiles.py
deleted file mode 100644
index 63d6b7b754..0000000000
--- a/docs/iris/example_tests/test_atlantic_profiles.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2013 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestAtlanticProfiles(tests.GraphicsTest):
- """Test the atlantic_profiles example code."""
- def test_atlantic_profiles(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import atlantic_profiles
- with show_replaced_by_check_graphic(self):
- atlantic_profiles.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_coriolis_plot.py b/docs/iris/example_tests/test_coriolis_plot.py
deleted file mode 100644
index 71a2334488..0000000000
--- a/docs/iris/example_tests/test_coriolis_plot.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# (C) British Crown Copyright 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-
-import iris.tests as tests
-
-from . import extest_util
-
-with extest_util.add_examples_to_path():
- import coriolis_plot
-
-
-class TestCoriolisPlot(tests.GraphicsTest):
- """Test the Coriolis Plot example code."""
- def test_coriolis_plot(self):
- with extest_util.show_replaced_by_check_graphic(self):
- coriolis_plot.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_cross_section.py b/docs/iris/example_tests/test_cross_section.py
deleted file mode 100644
index ad62f51b01..0000000000
--- a/docs/iris/example_tests/test_cross_section.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestCrossSection(tests.GraphicsTest):
- """Test the cross_section example code."""
- def test_cross_section(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import cross_section
- with show_replaced_by_check_graphic(self):
- cross_section.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_custom_aggregation.py b/docs/iris/example_tests/test_custom_aggregation.py
deleted file mode 100644
index 319078a3a4..0000000000
--- a/docs/iris/example_tests/test_custom_aggregation.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2013 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestCustomAggregation(tests.GraphicsTest):
- """Test the custom aggregation example code."""
- def test_custom_aggregation(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import custom_aggregation
- with show_replaced_by_check_graphic(self):
- custom_aggregation.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_custom_file_loading.py b/docs/iris/example_tests/test_custom_file_loading.py
deleted file mode 100644
index b0231d474e..0000000000
--- a/docs/iris/example_tests/test_custom_file_loading.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestCustomFileLoading(tests.GraphicsTest):
- """Test the custom_file_loading example code."""
- def test_custom_file_loading(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import custom_file_loading
- with show_replaced_by_check_graphic(self):
- custom_file_loading.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_deriving_phenomena.py b/docs/iris/example_tests/test_deriving_phenomena.py
deleted file mode 100644
index bad47fe438..0000000000
--- a/docs/iris/example_tests/test_deriving_phenomena.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestDerivingPhenomena(tests.GraphicsTest):
- """Test the deriving_phenomena example code."""
- def test_deriving_phenomena(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import deriving_phenomena
- with show_replaced_by_check_graphic(self):
- deriving_phenomena.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_global_map.py b/docs/iris/example_tests/test_global_map.py
deleted file mode 100644
index eee10d6187..0000000000
--- a/docs/iris/example_tests/test_global_map.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestGlobalMap(tests.GraphicsTest):
- """Test the global_map example code."""
- def test_global_map(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import global_map
- with show_replaced_by_check_graphic(self):
- global_map.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_hovmoller.py b/docs/iris/example_tests/test_hovmoller.py
deleted file mode 100644
index ff0f37b1eb..0000000000
--- a/docs/iris/example_tests/test_hovmoller.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestGlobalMap(tests.GraphicsTest):
- """Test the hovmoller example code."""
- def test_hovmoller(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import hovmoller
- with show_replaced_by_check_graphic(self):
- hovmoller.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_inset_plot.py b/docs/iris/example_tests/test_inset_plot.py
deleted file mode 100644
index 67c4eb9e66..0000000000
--- a/docs/iris/example_tests/test_inset_plot.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# (C) British Crown Copyright 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestInsetPlot(tests.GraphicsTest):
- """Test the inset plot example code."""
- def test_inset_plot(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import inset_plot
- with show_replaced_by_check_graphic(self):
- inset_plot.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_lagged_ensemble.py b/docs/iris/example_tests/test_lagged_ensemble.py
deleted file mode 100644
index faa7960dbb..0000000000
--- a/docs/iris/example_tests/test_lagged_ensemble.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestLaggedEnsemble(tests.GraphicsTest):
- """Test the lagged ensemble example code."""
- def test_lagged_ensemble(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import lagged_ensemble
- with show_replaced_by_check_graphic(self):
- lagged_ensemble.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_lineplot_with_legend.py b/docs/iris/example_tests/test_lineplot_with_legend.py
deleted file mode 100644
index a7f9e54b6f..0000000000
--- a/docs/iris/example_tests/test_lineplot_with_legend.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestLineplotWithLegend(tests.GraphicsTest):
- """Test the lineplot_with_legend example code."""
- def test_lineplot_with_legend(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import lineplot_with_legend
- with show_replaced_by_check_graphic(self):
- lineplot_with_legend.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_load_nemo.py b/docs/iris/example_tests/test_load_nemo.py
deleted file mode 100644
index 13785609e2..0000000000
--- a/docs/iris/example_tests/test_load_nemo.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2019, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestLoadNemo(tests.GraphicsTest):
- """Test the load_nemo example code."""
- def test_load_nemo(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import load_nemo
- with show_replaced_by_check_graphic(self):
- load_nemo.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_orca_projection.py b/docs/iris/example_tests/test_orca_projection.py
deleted file mode 100644
index 5d7b5e4114..0000000000
--- a/docs/iris/example_tests/test_orca_projection.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestOrcaProjection(tests.GraphicsTest):
- """Test the orca projection example code."""
- def test_orca_projection(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import orca_projection
- with show_replaced_by_check_graphic(self):
- orca_projection.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_polar_stereo.py b/docs/iris/example_tests/test_polar_stereo.py
deleted file mode 100644
index c2bbc0e02b..0000000000
--- a/docs/iris/example_tests/test_polar_stereo.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# (C) British Crown Copyright 2013 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-@tests.skip_grib
-class TestPolarStereo(tests.GraphicsTest):
- """Test the polar_stereo example code."""
- def test_polar_stereo(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import polar_stereo
- with show_replaced_by_check_graphic(self):
- polar_stereo.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_polynomial_fit.py b/docs/iris/example_tests/test_polynomial_fit.py
deleted file mode 100644
index c56834a961..0000000000
--- a/docs/iris/example_tests/test_polynomial_fit.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestPolynomialFit(tests.GraphicsTest):
- """Test the polynomial_fit example code."""
- def test_polynomial_fit(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import polynomial_fit
- with show_replaced_by_check_graphic(self):
- polynomial_fit.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_projections_and_annotations.py b/docs/iris/example_tests/test_projections_and_annotations.py
deleted file mode 100644
index adfcc45026..0000000000
--- a/docs/iris/example_tests/test_projections_and_annotations.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestProjectionsAndAnnotations(tests.GraphicsTest):
- """Test the atlantic_profiles example code."""
- def test_projections_and_annotations(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import projections_and_annotations
- with show_replaced_by_check_graphic(self):
- projections_and_annotations.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_rotated_pole_mapping.py b/docs/iris/example_tests/test_rotated_pole_mapping.py
deleted file mode 100644
index 672ba50434..0000000000
--- a/docs/iris/example_tests/test_rotated_pole_mapping.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestRotatedPoleMapping(tests.GraphicsTest):
- """Test the rotated_pole_mapping example code."""
- def test_rotated_pole_mapping(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import rotated_pole_mapping
- with show_replaced_by_check_graphic(self):
- rotated_pole_mapping.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/example_tests/test_wind_speed.py b/docs/iris/example_tests/test_wind_speed.py
deleted file mode 100644
index dacf146c91..0000000000
--- a/docs/iris/example_tests/test_wind_speed.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import Iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from .extest_util import (add_examples_to_path,
- show_replaced_by_check_graphic,
- fail_any_deprecation_warnings)
-
-
-class TestWindSpeed(tests.GraphicsTest):
- """Test the wind_speed example code."""
- def test_wind_speed(self):
- with fail_any_deprecation_warnings():
- with add_examples_to_path():
- import wind_speed
- with show_replaced_by_check_graphic(self):
- wind_speed.main()
-
-
-if __name__ == '__main__':
- tests.main()
diff --git a/docs/iris/src/Makefile b/docs/iris/src/Makefile
deleted file mode 100644
index 53d224874d..0000000000
--- a/docs/iris/src/Makefile
+++ /dev/null
@@ -1,133 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = ../build
-SRCDIR = ./
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
-
-help:
- @echo "Please use \`make ' where is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
- -rm -rf $(SRCDIR)/iris
- -rm -rf $(SRCDIR)/examples $(SRCDIR)/_templates/gallery.html $(SRCDIR)/_static/random_image.js $(SRCDIR)/_static/random.js
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Iris.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Iris.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/Iris"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Iris"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf: latex
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- make -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/docs/iris/src/_static/Iris7_1_trim_100.png b/docs/iris/src/_static/Iris7_1_trim_100.png
deleted file mode 100644
index 330ee6e95d..0000000000
Binary files a/docs/iris/src/_static/Iris7_1_trim_100.png and /dev/null differ
diff --git a/docs/iris/src/_static/Iris7_1_trim_full.png b/docs/iris/src/_static/Iris7_1_trim_full.png
deleted file mode 100644
index ac219de136..0000000000
Binary files a/docs/iris/src/_static/Iris7_1_trim_full.png and /dev/null differ
diff --git a/docs/iris/src/_static/copybutton.js b/docs/iris/src/_static/copybutton.js
deleted file mode 100644
index 6800c3cb93..0000000000
--- a/docs/iris/src/_static/copybutton.js
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2013 PSF. Licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
-// File originates from the cpython source found in Doc/tools/sphinxext/static/copybutton.js
-
-$(document).ready(function() {
- /* Add a [>>>] button on the top-right corner of code samples to hide
- * the >>> and ... prompts and the output and thus make the code
- * copyable. */
- var div = $('.highlight-python .highlight,' +
- '.highlight-python3 .highlight')
- var pre = div.find('pre');
-
- // get the styles from the current theme
- pre.parent().parent().css('position', 'relative');
- var hide_text = 'Hide the prompts and output';
- var show_text = 'Show the prompts and output';
- var border_width = pre.css('border-top-width');
- var border_style = pre.css('border-top-style');
- var border_color = pre.css('border-top-color');
- var button_styles = {
- 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
- 'border-color': border_color, 'border-style': border_style,
- 'border-width': border_width, 'color': border_color, 'text-size': '75%',
- 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em'
- }
-
- // create and add the button to all the code blocks that contain >>>
- div.each(function(index) {
- var jthis = $(this);
- if (jthis.find('.gp').length > 0) {
- var button = $('>>>');
- button.css(button_styles)
- button.attr('title', hide_text);
- jthis.prepend(button);
- }
- // tracebacks (.gt) contain bare text elements that need to be
- // wrapped in a span to work with .nextUntil() (see later)
- jthis.find('pre:has(.gt)').contents().filter(function() {
- return ((this.nodeType == 3) && (this.data.trim().length > 0));
- }).wrap('');
- });
-
- // define the behavior of the button when it's clicked
- $('.copybutton').toggle(
- function() {
- var button = $(this);
- button.parent().find('.go, .gp, .gt').hide();
- button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
- button.css('text-decoration', 'line-through');
- button.attr('title', show_text);
- },
- function() {
- var button = $(this);
- button.parent().find('.go, .gp, .gt').show();
- button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
- button.css('text-decoration', 'none');
- button.attr('title', hide_text);
- });
-});
-
diff --git a/docs/iris/src/_static/favicon-16x16.png b/docs/iris/src/_static/favicon-16x16.png
deleted file mode 100644
index ea64d21a55..0000000000
Binary files a/docs/iris/src/_static/favicon-16x16.png and /dev/null differ
diff --git a/docs/iris/src/_static/favicon-32x32.png b/docs/iris/src/_static/favicon-32x32.png
deleted file mode 100644
index 9270dd6a99..0000000000
Binary files a/docs/iris/src/_static/favicon-32x32.png and /dev/null differ
diff --git a/docs/iris/src/_static/jquery.cycle.all.latest.js b/docs/iris/src/_static/jquery.cycle.all.latest.js
deleted file mode 100644
index 75d7ab98f8..0000000000
--- a/docs/iris/src/_static/jquery.cycle.all.latest.js
+++ /dev/null
@@ -1,1331 +0,0 @@
-/*!
- * jQuery Cycle Plugin (with Transition Definitions)
- * Examples and documentation at: http://jquery.malsup.com/cycle/
- * Copyright (c) 2007-2010 M. Alsup
- * Version: 2.88 (08-JUN-2010)
- * Dual licensed under the MIT and GPL licenses.
- * http://jquery.malsup.com/license.html
- * Requires: jQuery v1.2.6 or later
- */
-;(function($) {
-
-var ver = '2.88';
-
-// if $.support is not defined (pre jQuery 1.3) add what I need
-if ($.support == undefined) {
- $.support = {
- opacity: !($.browser.msie)
- };
-}
-
-function debug(s) {
- if ($.fn.cycle.debug)
- log(s);
-}
-function log() {
- if (window.console && window.console.log)
- window.console.log('[cycle] ' + Array.prototype.join.call(arguments,' '));
-};
-
-// the options arg can be...
-// a number - indicates an immediate transition should occur to the given slide index
-// a string - 'pause', 'resume', 'toggle', 'next', 'prev', 'stop', 'destroy' or the name of a transition effect (ie, 'fade', 'zoom', etc)
-// an object - properties to control the slideshow
-//
-// the arg2 arg can be...
-// the name of an fx (only used in conjunction with a numeric value for 'options')
-// the value true (only used in first arg == 'resume') and indicates
-// that the resume should occur immediately (not wait for next timeout)
-
-$.fn.cycle = function(options, arg2) {
- var o = { s: this.selector, c: this.context };
-
- // in 1.3+ we can fix mistakes with the ready state
- if (this.length === 0 && options != 'stop') {
- if (!$.isReady && o.s) {
- log('DOM not ready, queuing slideshow');
- $(function() {
- $(o.s,o.c).cycle(options,arg2);
- });
- return this;
- }
- // is your DOM ready? http://docs.jquery.com/Tutorials:Introducing_$(document).ready()
- log('terminating; zero elements found by selector' + ($.isReady ? '' : ' (DOM not ready)'));
- return this;
- }
-
- // iterate the matched nodeset
- return this.each(function() {
- var opts = handleArguments(this, options, arg2);
- if (opts === false)
- return;
-
- opts.updateActivePagerLink = opts.updateActivePagerLink || $.fn.cycle.updateActivePagerLink;
-
- // stop existing slideshow for this container (if there is one)
- if (this.cycleTimeout)
- clearTimeout(this.cycleTimeout);
- this.cycleTimeout = this.cyclePause = 0;
-
- var $cont = $(this);
- var $slides = opts.slideExpr ? $(opts.slideExpr, this) : $cont.children();
- var els = $slides.get();
- if (els.length < 2) {
- log('terminating; too few slides: ' + els.length);
- return;
- }
-
- var opts2 = buildOptions($cont, $slides, els, opts, o);
- if (opts2 === false)
- return;
-
- var startTime = opts2.continuous ? 10 : getTimeout(els[opts2.currSlide], els[opts2.nextSlide], opts2, !opts2.rev);
-
- // if it's an auto slideshow, kick it off
- if (startTime) {
- startTime += (opts2.delay || 0);
- if (startTime < 10)
- startTime = 10;
- debug('first timeout: ' + startTime);
- this.cycleTimeout = setTimeout(function(){go(els,opts2,0,(!opts2.rev && !opts.backwards))}, startTime);
- }
- });
-};
-
-// process the args that were passed to the plugin fn
-function handleArguments(cont, options, arg2) {
- if (cont.cycleStop == undefined)
- cont.cycleStop = 0;
- if (options === undefined || options === null)
- options = {};
- if (options.constructor == String) {
- switch(options) {
- case 'destroy':
- case 'stop':
- var opts = $(cont).data('cycle.opts');
- if (!opts)
- return false;
- cont.cycleStop++; // callbacks look for change
- if (cont.cycleTimeout)
- clearTimeout(cont.cycleTimeout);
- cont.cycleTimeout = 0;
- $(cont).removeData('cycle.opts');
- if (options == 'destroy')
- destroy(opts);
- return false;
- case 'toggle':
- cont.cyclePause = (cont.cyclePause === 1) ? 0 : 1;
- checkInstantResume(cont.cyclePause, arg2, cont);
- return false;
- case 'pause':
- cont.cyclePause = 1;
- return false;
- case 'resume':
- cont.cyclePause = 0;
- checkInstantResume(false, arg2, cont);
- return false;
- case 'prev':
- case 'next':
- var opts = $(cont).data('cycle.opts');
- if (!opts) {
- log('options not found, "prev/next" ignored');
- return false;
- }
- $.fn.cycle[options](opts);
- return false;
- default:
- options = { fx: options };
- };
- return options;
- }
- else if (options.constructor == Number) {
- // go to the requested slide
- var num = options;
- options = $(cont).data('cycle.opts');
- if (!options) {
- log('options not found, can not advance slide');
- return false;
- }
- if (num < 0 || num >= options.elements.length) {
- log('invalid slide index: ' + num);
- return false;
- }
- options.nextSlide = num;
- if (cont.cycleTimeout) {
- clearTimeout(cont.cycleTimeout);
- cont.cycleTimeout = 0;
- }
- if (typeof arg2 == 'string')
- options.oneTimeFx = arg2;
- go(options.elements, options, 1, num >= options.currSlide);
- return false;
- }
- return options;
-
- function checkInstantResume(isPaused, arg2, cont) {
- if (!isPaused && arg2 === true) { // resume now!
- var options = $(cont).data('cycle.opts');
- if (!options) {
- log('options not found, can not resume');
- return false;
- }
- if (cont.cycleTimeout) {
- clearTimeout(cont.cycleTimeout);
- cont.cycleTimeout = 0;
- }
- go(options.elements, options, 1, (!opts.rev && !opts.backwards));
- }
- }
-};
-
-function removeFilter(el, opts) {
- if (!$.support.opacity && opts.cleartype && el.style.filter) {
- try { el.style.removeAttribute('filter'); }
- catch(smother) {} // handle old opera versions
- }
-};
-
-// unbind event handlers
-function destroy(opts) {
- if (opts.next)
- $(opts.next).unbind(opts.prevNextEvent);
- if (opts.prev)
- $(opts.prev).unbind(opts.prevNextEvent);
-
- if (opts.pager || opts.pagerAnchorBuilder)
- $.each(opts.pagerAnchors || [], function() {
- this.unbind().remove();
- });
- opts.pagerAnchors = null;
- if (opts.destroy) // callback
- opts.destroy(opts);
-};
-
-// one-time initialization
-function buildOptions($cont, $slides, els, options, o) {
- // support metadata plugin (v1.0 and v2.0)
- var opts = $.extend({}, $.fn.cycle.defaults, options || {}, $.metadata ? $cont.metadata() : $.meta ? $cont.data() : {});
- if (opts.autostop)
- opts.countdown = opts.autostopCount || els.length;
-
- var cont = $cont[0];
- $cont.data('cycle.opts', opts);
- opts.$cont = $cont;
- opts.stopCount = cont.cycleStop;
- opts.elements = els;
- opts.before = opts.before ? [opts.before] : [];
- opts.after = opts.after ? [opts.after] : [];
- opts.after.unshift(function(){ opts.busy=0; });
-
- // push some after callbacks
- if (!$.support.opacity && opts.cleartype)
- opts.after.push(function() { removeFilter(this, opts); });
- if (opts.continuous)
- opts.after.push(function() { go(els,opts,0,(!opts.rev && !opts.backwards)); });
-
- saveOriginalOpts(opts);
-
- // clearType corrections
- if (!$.support.opacity && opts.cleartype && !opts.cleartypeNoBg)
- clearTypeFix($slides);
-
- // container requires non-static position so that slides can be position within
- if ($cont.css('position') == 'static')
- $cont.css('position', 'relative');
- if (opts.width)
- $cont.width(opts.width);
- if (opts.height && opts.height != 'auto')
- $cont.height(opts.height);
-
- if (opts.startingSlide)
- opts.startingSlide = parseInt(opts.startingSlide);
- else if (opts.backwards)
- opts.startingSlide = els.length - 1;
-
- // if random, mix up the slide array
- if (opts.random) {
- opts.randomMap = [];
- for (var i = 0; i < els.length; i++)
- opts.randomMap.push(i);
- opts.randomMap.sort(function(a,b) {return Math.random() - 0.5;});
- opts.randomIndex = 1;
- opts.startingSlide = opts.randomMap[1];
- }
- else if (opts.startingSlide >= els.length)
- opts.startingSlide = 0; // catch bogus input
- opts.currSlide = opts.startingSlide || 0;
- var first = opts.startingSlide;
-
- // set position and zIndex on all the slides
- $slides.css({position: 'absolute', top:0, left:0}).hide().each(function(i) {
- var z;
- if (opts.backwards)
- z = first ? i <= first ? els.length + (i-first) : first-i : els.length-i;
- else
- z = first ? i >= first ? els.length - (i-first) : first-i : els.length-i;
- $(this).css('z-index', z)
- });
-
- // make sure first slide is visible
- $(els[first]).css('opacity',1).show(); // opacity bit needed to handle restart use case
- removeFilter(els[first], opts);
-
- // stretch slides
- if (opts.fit && opts.width)
- $slides.width(opts.width);
- if (opts.fit && opts.height && opts.height != 'auto')
- $slides.height(opts.height);
-
- // stretch container
- var reshape = opts.containerResize && !$cont.innerHeight();
- if (reshape) { // do this only if container has no size http://tinyurl.com/da2oa9
- var maxw = 0, maxh = 0;
- for(var j=0; j < els.length; j++) {
- var $e = $(els[j]), e = $e[0], w = $e.outerWidth(), h = $e.outerHeight();
- if (!w) w = e.offsetWidth || e.width || $e.attr('width')
- if (!h) h = e.offsetHeight || e.height || $e.attr('height');
- maxw = w > maxw ? w : maxw;
- maxh = h > maxh ? h : maxh;
- }
- if (maxw > 0 && maxh > 0)
- $cont.css({width:maxw+'px',height:maxh+'px'});
- }
-
- if (opts.pause)
- $cont.hover(function(){this.cyclePause++;},function(){this.cyclePause--;});
-
- if (supportMultiTransitions(opts) === false)
- return false;
-
- // apparently a lot of people use image slideshows without height/width attributes on the images.
- // Cycle 2.50+ requires the sizing info for every slide; this block tries to deal with that.
- var requeue = false;
- options.requeueAttempts = options.requeueAttempts || 0;
- $slides.each(function() {
- // try to get height/width of each slide
- var $el = $(this);
- this.cycleH = (opts.fit && opts.height) ? opts.height : ($el.height() || this.offsetHeight || this.height || $el.attr('height') || 0);
- this.cycleW = (opts.fit && opts.width) ? opts.width : ($el.width() || this.offsetWidth || this.width || $el.attr('width') || 0);
-
- if ( $el.is('img') ) {
- // sigh.. sniffing, hacking, shrugging... this crappy hack tries to account for what browsers do when
- // an image is being downloaded and the markup did not include sizing info (height/width attributes);
- // there seems to be some "default" sizes used in this situation
- var loadingIE = ($.browser.msie && this.cycleW == 28 && this.cycleH == 30 && !this.complete);
- var loadingFF = ($.browser.mozilla && this.cycleW == 34 && this.cycleH == 19 && !this.complete);
- var loadingOp = ($.browser.opera && ((this.cycleW == 42 && this.cycleH == 19) || (this.cycleW == 37 && this.cycleH == 17)) && !this.complete);
- var loadingOther = (this.cycleH == 0 && this.cycleW == 0 && !this.complete);
- // don't requeue for images that are still loading but have a valid size
- if (loadingIE || loadingFF || loadingOp || loadingOther) {
- if (o.s && opts.requeueOnImageNotLoaded && ++options.requeueAttempts < 100) { // track retry count so we don't loop forever
- log(options.requeueAttempts,' - img slide not loaded, requeuing slideshow: ', this.src, this.cycleW, this.cycleH);
- setTimeout(function() {$(o.s,o.c).cycle(options)}, opts.requeueTimeout);
- requeue = true;
- return false; // break each loop
- }
- else {
- log('could not determine size of image: '+this.src, this.cycleW, this.cycleH);
- }
- }
- }
- return true;
- });
-
- if (requeue)
- return false;
-
- opts.cssBefore = opts.cssBefore || {};
- opts.animIn = opts.animIn || {};
- opts.animOut = opts.animOut || {};
-
- $slides.not(':eq('+first+')').css(opts.cssBefore);
- if (opts.cssFirst)
- $($slides[first]).css(opts.cssFirst);
-
- if (opts.timeout) {
- opts.timeout = parseInt(opts.timeout);
- // ensure that timeout and speed settings are sane
- if (opts.speed.constructor == String)
- opts.speed = $.fx.speeds[opts.speed] || parseInt(opts.speed);
- if (!opts.sync)
- opts.speed = opts.speed / 2;
-
- var buffer = opts.fx == 'shuffle' ? 500 : 250;
- while((opts.timeout - opts.speed) < buffer) // sanitize timeout
- opts.timeout += opts.speed;
- }
- if (opts.easing)
- opts.easeIn = opts.easeOut = opts.easing;
- if (!opts.speedIn)
- opts.speedIn = opts.speed;
- if (!opts.speedOut)
- opts.speedOut = opts.speed;
-
- opts.slideCount = els.length;
- opts.currSlide = opts.lastSlide = first;
- if (opts.random) {
- if (++opts.randomIndex == els.length)
- opts.randomIndex = 0;
- opts.nextSlide = opts.randomMap[opts.randomIndex];
- }
- else if (opts.backwards)
- opts.nextSlide = opts.startingSlide == 0 ? (els.length-1) : opts.startingSlide-1;
- else
- opts.nextSlide = opts.startingSlide >= (els.length-1) ? 0 : opts.startingSlide+1;
-
- // run transition init fn
- if (!opts.multiFx) {
- var init = $.fn.cycle.transitions[opts.fx];
- if ($.isFunction(init))
- init($cont, $slides, opts);
- else if (opts.fx != 'custom' && !opts.multiFx) {
- log('unknown transition: ' + opts.fx,'; slideshow terminating');
- return false;
- }
- }
-
- // fire artificial events
- var e0 = $slides[first];
- if (opts.before.length)
- opts.before[0].apply(e0, [e0, e0, opts, true]);
- if (opts.after.length > 1)
- opts.after[1].apply(e0, [e0, e0, opts, true]);
-
- if (opts.next)
- $(opts.next).bind(opts.prevNextEvent,function(){return advance(opts,opts.rev?-1:1)});
- if (opts.prev)
- $(opts.prev).bind(opts.prevNextEvent,function(){return advance(opts,opts.rev?1:-1)});
- if (opts.pager || opts.pagerAnchorBuilder)
- buildPager(els,opts);
-
- exposeAddSlide(opts, els);
-
- return opts;
-};
-
-// save off original opts so we can restore after clearing state
-function saveOriginalOpts(opts) {
- opts.original = { before: [], after: [] };
- opts.original.cssBefore = $.extend({}, opts.cssBefore);
- opts.original.cssAfter = $.extend({}, opts.cssAfter);
- opts.original.animIn = $.extend({}, opts.animIn);
- opts.original.animOut = $.extend({}, opts.animOut);
- $.each(opts.before, function() { opts.original.before.push(this); });
- $.each(opts.after, function() { opts.original.after.push(this); });
-};
-
-function supportMultiTransitions(opts) {
- var i, tx, txs = $.fn.cycle.transitions;
- // look for multiple effects
- if (opts.fx.indexOf(',') > 0) {
- opts.multiFx = true;
- opts.fxs = opts.fx.replace(/\s*/g,'').split(',');
- // discard any bogus effect names
- for (i=0; i < opts.fxs.length; i++) {
- var fx = opts.fxs[i];
- tx = txs[fx];
- if (!tx || !txs.hasOwnProperty(fx) || !$.isFunction(tx)) {
- log('discarding unknown transition: ',fx);
- opts.fxs.splice(i,1);
- i--;
- }
- }
- // if we have an empty list then we threw everything away!
- if (!opts.fxs.length) {
- log('No valid transitions named; slideshow terminating.');
- return false;
- }
- }
- else if (opts.fx == 'all') { // auto-gen the list of transitions
- opts.multiFx = true;
- opts.fxs = [];
- for (p in txs) {
- tx = txs[p];
- if (txs.hasOwnProperty(p) && $.isFunction(tx))
- opts.fxs.push(p);
- }
- }
- if (opts.multiFx && opts.randomizeEffects) {
- // munge the fxs array to make effect selection random
- var r1 = Math.floor(Math.random() * 20) + 30;
- for (i = 0; i < r1; i++) {
- var r2 = Math.floor(Math.random() * opts.fxs.length);
- opts.fxs.push(opts.fxs.splice(r2,1)[0]);
- }
- debug('randomized fx sequence: ',opts.fxs);
- }
- return true;
-};
-
-// provide a mechanism for adding slides after the slideshow has started
-function exposeAddSlide(opts, els) {
- opts.addSlide = function(newSlide, prepend) {
- var $s = $(newSlide), s = $s[0];
- if (!opts.autostopCount)
- opts.countdown++;
- els[prepend?'unshift':'push'](s);
- if (opts.els)
- opts.els[prepend?'unshift':'push'](s); // shuffle needs this
- opts.slideCount = els.length;
-
- $s.css('position','absolute');
- $s[prepend?'prependTo':'appendTo'](opts.$cont);
-
- if (prepend) {
- opts.currSlide++;
- opts.nextSlide++;
- }
-
- if (!$.support.opacity && opts.cleartype && !opts.cleartypeNoBg)
- clearTypeFix($s);
-
- if (opts.fit && opts.width)
- $s.width(opts.width);
- if (opts.fit && opts.height && opts.height != 'auto')
- $slides.height(opts.height);
- s.cycleH = (opts.fit && opts.height) ? opts.height : $s.height();
- s.cycleW = (opts.fit && opts.width) ? opts.width : $s.width();
-
- $s.css(opts.cssBefore);
-
- if (opts.pager || opts.pagerAnchorBuilder)
- $.fn.cycle.createPagerAnchor(els.length-1, s, $(opts.pager), els, opts);
-
- if ($.isFunction(opts.onAddSlide))
- opts.onAddSlide($s);
- else
- $s.hide(); // default behavior
- };
-}
-
-// reset internal state; we do this on every pass in order to support multiple effects
-$.fn.cycle.resetState = function(opts, fx) {
- fx = fx || opts.fx;
- opts.before = []; opts.after = [];
- opts.cssBefore = $.extend({}, opts.original.cssBefore);
- opts.cssAfter = $.extend({}, opts.original.cssAfter);
- opts.animIn = $.extend({}, opts.original.animIn);
- opts.animOut = $.extend({}, opts.original.animOut);
- opts.fxFn = null;
- $.each(opts.original.before, function() { opts.before.push(this); });
- $.each(opts.original.after, function() { opts.after.push(this); });
-
- // re-init
- var init = $.fn.cycle.transitions[fx];
- if ($.isFunction(init))
- init(opts.$cont, $(opts.elements), opts);
-};
-
-// this is the main engine fn, it handles the timeouts, callbacks and slide index mgmt
-function go(els, opts, manual, fwd) {
- // opts.busy is true if we're in the middle of an animation
- if (manual && opts.busy && opts.manualTrump) {
- // let manual transitions requests trump active ones
- debug('manualTrump in go(), stopping active transition');
- $(els).stop(true,true);
- opts.busy = false;
- }
- // don't begin another timeout-based transition if there is one active
- if (opts.busy) {
- debug('transition active, ignoring new tx request');
- return;
- }
-
- var p = opts.$cont[0], curr = els[opts.currSlide], next = els[opts.nextSlide];
-
- // stop cycling if we have an outstanding stop request
- if (p.cycleStop != opts.stopCount || p.cycleTimeout === 0 && !manual)
- return;
-
- // check to see if we should stop cycling based on autostop options
- if (!manual && !p.cyclePause && !opts.bounce &&
- ((opts.autostop && (--opts.countdown <= 0)) ||
- (opts.nowrap && !opts.random && opts.nextSlide < opts.currSlide))) {
- if (opts.end)
- opts.end(opts);
- return;
- }
-
- // if slideshow is paused, only transition on a manual trigger
- var changed = false;
- if ((manual || !p.cyclePause) && (opts.nextSlide != opts.currSlide)) {
- changed = true;
- var fx = opts.fx;
- // keep trying to get the slide size if we don't have it yet
- curr.cycleH = curr.cycleH || $(curr).height();
- curr.cycleW = curr.cycleW || $(curr).width();
- next.cycleH = next.cycleH || $(next).height();
- next.cycleW = next.cycleW || $(next).width();
-
- // support multiple transition types
- if (opts.multiFx) {
- if (opts.lastFx == undefined || ++opts.lastFx >= opts.fxs.length)
- opts.lastFx = 0;
- fx = opts.fxs[opts.lastFx];
- opts.currFx = fx;
- }
-
- // one-time fx overrides apply to: $('div').cycle(3,'zoom');
- if (opts.oneTimeFx) {
- fx = opts.oneTimeFx;
- opts.oneTimeFx = null;
- }
-
- $.fn.cycle.resetState(opts, fx);
-
- // run the before callbacks
- if (opts.before.length)
- $.each(opts.before, function(i,o) {
- if (p.cycleStop != opts.stopCount) return;
- o.apply(next, [curr, next, opts, fwd]);
- });
-
- // stage the after callacks
- var after = function() {
- $.each(opts.after, function(i,o) {
- if (p.cycleStop != opts.stopCount) return;
- o.apply(next, [curr, next, opts, fwd]);
- });
- };
-
- debug('tx firing; currSlide: ' + opts.currSlide + '; nextSlide: ' + opts.nextSlide);
-
- // get ready to perform the transition
- opts.busy = 1;
- if (opts.fxFn) // fx function provided?
- opts.fxFn(curr, next, opts, after, fwd, manual && opts.fastOnEvent);
- else if ($.isFunction($.fn.cycle[opts.fx])) // fx plugin ?
- $.fn.cycle[opts.fx](curr, next, opts, after, fwd, manual && opts.fastOnEvent);
- else
- $.fn.cycle.custom(curr, next, opts, after, fwd, manual && opts.fastOnEvent);
- }
-
- if (changed || opts.nextSlide == opts.currSlide) {
- // calculate the next slide
- opts.lastSlide = opts.currSlide;
- if (opts.random) {
- opts.currSlide = opts.nextSlide;
- if (++opts.randomIndex == els.length)
- opts.randomIndex = 0;
- opts.nextSlide = opts.randomMap[opts.randomIndex];
- if (opts.nextSlide == opts.currSlide)
- opts.nextSlide = (opts.currSlide == opts.slideCount - 1) ? 0 : opts.currSlide + 1;
- }
- else if (opts.backwards) {
- var roll = (opts.nextSlide - 1) < 0;
- if (roll && opts.bounce) {
- opts.backwards = !opts.backwards;
- opts.nextSlide = 1;
- opts.currSlide = 0;
- }
- else {
- opts.nextSlide = roll ? (els.length-1) : opts.nextSlide-1;
- opts.currSlide = roll ? 0 : opts.nextSlide+1;
- }
- }
- else { // sequence
- var roll = (opts.nextSlide + 1) == els.length;
- if (roll && opts.bounce) {
- opts.backwards = !opts.backwards;
- opts.nextSlide = els.length-2;
- opts.currSlide = els.length-1;
- }
- else {
- opts.nextSlide = roll ? 0 : opts.nextSlide+1;
- opts.currSlide = roll ? els.length-1 : opts.nextSlide-1;
- }
- }
- }
- if (changed && opts.pager)
- opts.updateActivePagerLink(opts.pager, opts.currSlide, opts.activePagerClass);
-
- // stage the next transition
- var ms = 0;
- if (opts.timeout && !opts.continuous)
- ms = getTimeout(els[opts.currSlide], els[opts.nextSlide], opts, fwd);
- else if (opts.continuous && p.cyclePause) // continuous shows work off an after callback, not this timer logic
- ms = 10;
- if (ms > 0)
- p.cycleTimeout = setTimeout(function(){ go(els, opts, 0, (!opts.rev && !opts.backwards)) }, ms);
-};
-
-// invoked after transition
-$.fn.cycle.updateActivePagerLink = function(pager, currSlide, clsName) {
- $(pager).each(function() {
- $(this).children().removeClass(clsName).eq(currSlide).addClass(clsName);
- });
-};
-
-// calculate timeout value for current transition
-function getTimeout(curr, next, opts, fwd) {
- if (opts.timeoutFn) {
- // call user provided calc fn
- var t = opts.timeoutFn.call(curr,curr,next,opts,fwd);
- while ((t - opts.speed) < 250) // sanitize timeout
- t += opts.speed;
- debug('calculated timeout: ' + t + '; speed: ' + opts.speed);
- if (t !== false)
- return t;
- }
- return opts.timeout;
-};
-
-// expose next/prev function, caller must pass in state
-$.fn.cycle.next = function(opts) { advance(opts, opts.rev?-1:1); };
-$.fn.cycle.prev = function(opts) { advance(opts, opts.rev?1:-1);};
-
-// advance slide forward or back
-function advance(opts, val) {
- var els = opts.elements;
- var p = opts.$cont[0], timeout = p.cycleTimeout;
- if (timeout) {
- clearTimeout(timeout);
- p.cycleTimeout = 0;
- }
- if (opts.random && val < 0) {
- // move back to the previously display slide
- opts.randomIndex--;
- if (--opts.randomIndex == -2)
- opts.randomIndex = els.length-2;
- else if (opts.randomIndex == -1)
- opts.randomIndex = els.length-1;
- opts.nextSlide = opts.randomMap[opts.randomIndex];
- }
- else if (opts.random) {
- opts.nextSlide = opts.randomMap[opts.randomIndex];
- }
- else {
- opts.nextSlide = opts.currSlide + val;
- if (opts.nextSlide < 0) {
- if (opts.nowrap) return false;
- opts.nextSlide = els.length - 1;
- }
- else if (opts.nextSlide >= els.length) {
- if (opts.nowrap) return false;
- opts.nextSlide = 0;
- }
- }
-
- var cb = opts.onPrevNextEvent || opts.prevNextClick; // prevNextClick is deprecated
- if ($.isFunction(cb))
- cb(val > 0, opts.nextSlide, els[opts.nextSlide]);
- go(els, opts, 1, val>=0);
- return false;
-};
-
-function buildPager(els, opts) {
- var $p = $(opts.pager);
- $.each(els, function(i,o) {
- $.fn.cycle.createPagerAnchor(i,o,$p,els,opts);
- });
- opts.updateActivePagerLink(opts.pager, opts.startingSlide, opts.activePagerClass);
-};
-
-$.fn.cycle.createPagerAnchor = function(i, el, $p, els, opts) {
- var a;
- if ($.isFunction(opts.pagerAnchorBuilder)) {
- a = opts.pagerAnchorBuilder(i,el);
- debug('pagerAnchorBuilder('+i+', el) returned: ' + a);
- }
- else
- a = ''+(i+1)+'';
-
- if (!a)
- return;
- var $a = $(a);
- // don't reparent if anchor is in the dom
- if ($a.parents('body').length === 0) {
- var arr = [];
- if ($p.length > 1) {
- $p.each(function() {
- var $clone = $a.clone(true);
- $(this).append($clone);
- arr.push($clone[0]);
- });
- $a = $(arr);
- }
- else {
- $a.appendTo($p);
- }
- }
-
- opts.pagerAnchors = opts.pagerAnchors || [];
- opts.pagerAnchors.push($a);
- $a.bind(opts.pagerEvent, function(e) {
- e.preventDefault();
- opts.nextSlide = i;
- var p = opts.$cont[0], timeout = p.cycleTimeout;
- if (timeout) {
- clearTimeout(timeout);
- p.cycleTimeout = 0;
- }
- var cb = opts.onPagerEvent || opts.pagerClick; // pagerClick is deprecated
- if ($.isFunction(cb))
- cb(opts.nextSlide, els[opts.nextSlide]);
- go(els,opts,1,opts.currSlide < i); // trigger the trans
-// return false; // <== allow bubble
- });
-
- if ( ! /^click/.test(opts.pagerEvent) && !opts.allowPagerClickBubble)
- $a.bind('click.cycle', function(){return false;}); // suppress click
-
- if (opts.pauseOnPagerHover)
- $a.hover(function() { opts.$cont[0].cyclePause++; }, function() { opts.$cont[0].cyclePause--; } );
-};
-
-// helper fn to calculate the number of slides between the current and the next
-$.fn.cycle.hopsFromLast = function(opts, fwd) {
- var hops, l = opts.lastSlide, c = opts.currSlide;
- if (fwd)
- hops = c > l ? c - l : opts.slideCount - l;
- else
- hops = c < l ? l - c : l + opts.slideCount - c;
- return hops;
-};
-
-// fix clearType problems in ie6 by setting an explicit bg color
-// (otherwise text slides look horrible during a fade transition)
-function clearTypeFix($slides) {
- debug('applying clearType background-color hack');
- function hex(s) {
- s = parseInt(s).toString(16);
- return s.length < 2 ? '0'+s : s;
- };
- function getBg(e) {
- for ( ; e && e.nodeName.toLowerCase() != 'html'; e = e.parentNode) {
- var v = $.css(e,'background-color');
- if (v.indexOf('rgb') >= 0 ) {
- var rgb = v.match(/\d+/g);
- return '#'+ hex(rgb[0]) + hex(rgb[1]) + hex(rgb[2]);
- }
- if (v && v != 'transparent')
- return v;
- }
- return '#ffffff';
- };
- $slides.each(function() { $(this).css('background-color', getBg(this)); });
-};
-
-// reset common props before the next transition
-$.fn.cycle.commonReset = function(curr,next,opts,w,h,rev) {
- $(opts.elements).not(curr).hide();
- opts.cssBefore.opacity = 1;
- opts.cssBefore.display = 'block';
- if (w !== false && next.cycleW > 0)
- opts.cssBefore.width = next.cycleW;
- if (h !== false && next.cycleH > 0)
- opts.cssBefore.height = next.cycleH;
- opts.cssAfter = opts.cssAfter || {};
- opts.cssAfter.display = 'none';
- $(curr).css('zIndex',opts.slideCount + (rev === true ? 1 : 0));
- $(next).css('zIndex',opts.slideCount + (rev === true ? 0 : 1));
-};
-
-// the actual fn for effecting a transition
-$.fn.cycle.custom = function(curr, next, opts, cb, fwd, speedOverride) {
- var $l = $(curr), $n = $(next);
- var speedIn = opts.speedIn, speedOut = opts.speedOut, easeIn = opts.easeIn, easeOut = opts.easeOut;
- $n.css(opts.cssBefore);
- if (speedOverride) {
- if (typeof speedOverride == 'number')
- speedIn = speedOut = speedOverride;
- else
- speedIn = speedOut = 1;
- easeIn = easeOut = null;
- }
- var fn = function() {$n.animate(opts.animIn, speedIn, easeIn, cb)};
- $l.animate(opts.animOut, speedOut, easeOut, function() {
- if (opts.cssAfter) $l.css(opts.cssAfter);
- if (!opts.sync) fn();
- });
- if (opts.sync) fn();
-};
-
-// transition definitions - only fade is defined here, transition pack defines the rest
-$.fn.cycle.transitions = {
- fade: function($cont, $slides, opts) {
- $slides.not(':eq('+opts.currSlide+')').css('opacity',0);
- opts.before.push(function(curr,next,opts) {
- $.fn.cycle.commonReset(curr,next,opts);
- opts.cssBefore.opacity = 0;
- });
- opts.animIn = { opacity: 1 };
- opts.animOut = { opacity: 0 };
- opts.cssBefore = { top: 0, left: 0 };
- }
-};
-
-$.fn.cycle.ver = function() { return ver; };
-
-// override these globally if you like (they are all optional)
-$.fn.cycle.defaults = {
- fx: 'fade', // name of transition effect (or comma separated names, ex: 'fade,scrollUp,shuffle')
- timeout: 4000, // milliseconds between slide transitions (0 to disable auto advance)
- timeoutFn: null, // callback for determining per-slide timeout value: function(currSlideElement, nextSlideElement, options, forwardFlag)
- continuous: 0, // true to start next transition immediately after current one completes
- speed: 1000, // speed of the transition (any valid fx speed value)
- speedIn: null, // speed of the 'in' transition
- speedOut: null, // speed of the 'out' transition
- next: null, // selector for element to use as event trigger for next slide
- prev: null, // selector for element to use as event trigger for previous slide
-// prevNextClick: null, // @deprecated; please use onPrevNextEvent instead
- onPrevNextEvent: null, // callback fn for prev/next events: function(isNext, zeroBasedSlideIndex, slideElement)
- prevNextEvent:'click.cycle',// event which drives the manual transition to the previous or next slide
- pager: null, // selector for element to use as pager container
- //pagerClick null, // @deprecated; please use onPagerEvent instead
- onPagerEvent: null, // callback fn for pager events: function(zeroBasedSlideIndex, slideElement)
- pagerEvent: 'click.cycle', // name of event which drives the pager navigation
- allowPagerClickBubble: false, // allows or prevents click event on pager anchors from bubbling
- pagerAnchorBuilder: null, // callback fn for building anchor links: function(index, DOMelement)
- before: null, // transition callback (scope set to element to be shown): function(currSlideElement, nextSlideElement, options, forwardFlag)
- after: null, // transition callback (scope set to element that was shown): function(currSlideElement, nextSlideElement, options, forwardFlag)
- end: null, // callback invoked when the slideshow terminates (use with autostop or nowrap options): function(options)
- easing: null, // easing method for both in and out transitions
- easeIn: null, // easing for "in" transition
- easeOut: null, // easing for "out" transition
- shuffle: null, // coords for shuffle animation, ex: { top:15, left: 200 }
- animIn: null, // properties that define how the slide animates in
- animOut: null, // properties that define how the slide animates out
- cssBefore: null, // properties that define the initial state of the slide before transitioning in
- cssAfter: null, // properties that defined the state of the slide after transitioning out
- fxFn: null, // function used to control the transition: function(currSlideElement, nextSlideElement, options, afterCalback, forwardFlag)
- height: 'auto', // container height
- startingSlide: 0, // zero-based index of the first slide to be displayed
- sync: 1, // true if in/out transitions should occur simultaneously
- random: 0, // true for random, false for sequence (not applicable to shuffle fx)
- fit: 0, // force slides to fit container
- containerResize: 1, // resize container to fit largest slide
- pause: 0, // true to enable "pause on hover"
- pauseOnPagerHover: 0, // true to pause when hovering over pager link
- autostop: 0, // true to end slideshow after X transitions (where X == slide count)
- autostopCount: 0, // number of transitions (optionally used with autostop to define X)
- delay: 0, // additional delay (in ms) for first transition (hint: can be negative)
- slideExpr: null, // expression for selecting slides (if something other than all children is required)
- cleartype: !$.support.opacity, // true if clearType corrections should be applied (for IE)
- cleartypeNoBg: false, // set to true to disable extra cleartype fixing (leave false to force background color setting on slides)
- nowrap: 0, // true to prevent slideshow from wrapping
- fastOnEvent: 0, // force fast transitions when triggered manually (via pager or prev/next); value == time in ms
- randomizeEffects: 1, // valid when multiple effects are used; true to make the effect sequence random
- rev: 0, // causes animations to transition in reverse
- manualTrump: true, // causes manual transition to stop an active transition instead of being ignored
- requeueOnImageNotLoaded: true, // requeue the slideshow if any image slides are not yet loaded
- requeueTimeout: 250, // ms delay for requeue
- activePagerClass: 'activeSlide', // class name used for the active pager link
- updateActivePagerLink: null, // callback fn invoked to update the active pager link (adds/removes activePagerClass style)
- backwards: false // true to start slideshow at last slide and move backwards through the stack
-};
-
-})(jQuery);
-
-
-/*!
- * jQuery Cycle Plugin Transition Definitions
- * This script is a plugin for the jQuery Cycle Plugin
- * Examples and documentation at: http://malsup.com/jquery/cycle/
- * Copyright (c) 2007-2010 M. Alsup
- * Version: 2.72
- * Dual licensed under the MIT and GPL licenses:
- * http://www.opensource.org/licenses/mit-license.php
- * http://www.gnu.org/licenses/gpl.html
- */
-(function($) {
-
-//
-// These functions define one-time slide initialization for the named
-// transitions. To save file size feel free to remove any of these that you
-// don't need.
-//
-$.fn.cycle.transitions.none = function($cont, $slides, opts) {
- opts.fxFn = function(curr,next,opts,after){
- $(next).show();
- $(curr).hide();
- after();
- };
-}
-
-// scrollUp/Down/Left/Right
-$.fn.cycle.transitions.scrollUp = function($cont, $slides, opts) {
- $cont.css('overflow','hidden');
- opts.before.push($.fn.cycle.commonReset);
- var h = $cont.height();
- opts.cssBefore ={ top: h, left: 0 };
- opts.cssFirst = { top: 0 };
- opts.animIn = { top: 0 };
- opts.animOut = { top: -h };
-};
-$.fn.cycle.transitions.scrollDown = function($cont, $slides, opts) {
- $cont.css('overflow','hidden');
- opts.before.push($.fn.cycle.commonReset);
- var h = $cont.height();
- opts.cssFirst = { top: 0 };
- opts.cssBefore= { top: -h, left: 0 };
- opts.animIn = { top: 0 };
- opts.animOut = { top: h };
-};
-$.fn.cycle.transitions.scrollLeft = function($cont, $slides, opts) {
- $cont.css('overflow','hidden');
- opts.before.push($.fn.cycle.commonReset);
- var w = $cont.width();
- opts.cssFirst = { left: 0 };
- opts.cssBefore= { left: w, top: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { left: 0-w };
-};
-$.fn.cycle.transitions.scrollRight = function($cont, $slides, opts) {
- $cont.css('overflow','hidden');
- opts.before.push($.fn.cycle.commonReset);
- var w = $cont.width();
- opts.cssFirst = { left: 0 };
- opts.cssBefore= { left: -w, top: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { left: w };
-};
-$.fn.cycle.transitions.scrollHorz = function($cont, $slides, opts) {
- $cont.css('overflow','hidden').width();
- opts.before.push(function(curr, next, opts, fwd) {
- $.fn.cycle.commonReset(curr,next,opts);
- opts.cssBefore.left = fwd ? (next.cycleW-1) : (1-next.cycleW);
- opts.animOut.left = fwd ? -curr.cycleW : curr.cycleW;
- });
- opts.cssFirst = { left: 0 };
- opts.cssBefore= { top: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { top: 0 };
-};
-$.fn.cycle.transitions.scrollVert = function($cont, $slides, opts) {
- $cont.css('overflow','hidden');
- opts.before.push(function(curr, next, opts, fwd) {
- $.fn.cycle.commonReset(curr,next,opts);
- opts.cssBefore.top = fwd ? (1-next.cycleH) : (next.cycleH-1);
- opts.animOut.top = fwd ? curr.cycleH : -curr.cycleH;
- });
- opts.cssFirst = { top: 0 };
- opts.cssBefore= { left: 0 };
- opts.animIn = { top: 0 };
- opts.animOut = { left: 0 };
-};
-
-// slideX/slideY
-$.fn.cycle.transitions.slideX = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $(opts.elements).not(curr).hide();
- $.fn.cycle.commonReset(curr,next,opts,false,true);
- opts.animIn.width = next.cycleW;
- });
- opts.cssBefore = { left: 0, top: 0, width: 0 };
- opts.animIn = { width: 'show' };
- opts.animOut = { width: 0 };
-};
-$.fn.cycle.transitions.slideY = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $(opts.elements).not(curr).hide();
- $.fn.cycle.commonReset(curr,next,opts,true,false);
- opts.animIn.height = next.cycleH;
- });
- opts.cssBefore = { left: 0, top: 0, height: 0 };
- opts.animIn = { height: 'show' };
- opts.animOut = { height: 0 };
-};
-
-// shuffle
-$.fn.cycle.transitions.shuffle = function($cont, $slides, opts) {
- var i, w = $cont.css('overflow', 'visible').width();
- $slides.css({left: 0, top: 0});
- opts.before.push(function(curr,next,opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,true,true);
- });
- // only adjust speed once!
- if (!opts.speedAdjusted) {
- opts.speed = opts.speed / 2; // shuffle has 2 transitions
- opts.speedAdjusted = true;
- }
- opts.random = 0;
- opts.shuffle = opts.shuffle || {left:-w, top:15};
- opts.els = [];
- for (i=0; i < $slides.length; i++)
- opts.els.push($slides[i]);
-
- for (i=0; i < opts.currSlide; i++)
- opts.els.push(opts.els.shift());
-
- // custom transition fn (hat tip to Benjamin Sterling for this bit of sweetness!)
- opts.fxFn = function(curr, next, opts, cb, fwd) {
- var $el = fwd ? $(curr) : $(next);
- $(next).css(opts.cssBefore);
- var count = opts.slideCount;
- $el.animate(opts.shuffle, opts.speedIn, opts.easeIn, function() {
- var hops = $.fn.cycle.hopsFromLast(opts, fwd);
- for (var k=0; k < hops; k++)
- fwd ? opts.els.push(opts.els.shift()) : opts.els.unshift(opts.els.pop());
- if (fwd) {
- for (var i=0, len=opts.els.length; i < len; i++)
- $(opts.els[i]).css('z-index', len-i+count);
- }
- else {
- var z = $(curr).css('z-index');
- $el.css('z-index', parseInt(z)+1+count);
- }
- $el.animate({left:0, top:0}, opts.speedOut, opts.easeOut, function() {
- $(fwd ? this : curr).hide();
- if (cb) cb();
- });
- });
- };
- opts.cssBefore = { display: 'block', opacity: 1, top: 0, left: 0 };
-};
-
-// turnUp/Down/Left/Right
-$.fn.cycle.transitions.turnUp = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,false);
- opts.cssBefore.top = next.cycleH;
- opts.animIn.height = next.cycleH;
- });
- opts.cssFirst = { top: 0 };
- opts.cssBefore = { left: 0, height: 0 };
- opts.animIn = { top: 0 };
- opts.animOut = { height: 0 };
-};
-$.fn.cycle.transitions.turnDown = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,false);
- opts.animIn.height = next.cycleH;
- opts.animOut.top = curr.cycleH;
- });
- opts.cssFirst = { top: 0 };
- opts.cssBefore = { left: 0, top: 0, height: 0 };
- opts.animOut = { height: 0 };
-};
-$.fn.cycle.transitions.turnLeft = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,false,true);
- opts.cssBefore.left = next.cycleW;
- opts.animIn.width = next.cycleW;
- });
- opts.cssBefore = { top: 0, width: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { width: 0 };
-};
-$.fn.cycle.transitions.turnRight = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,false,true);
- opts.animIn.width = next.cycleW;
- opts.animOut.left = curr.cycleW;
- });
- opts.cssBefore = { top: 0, left: 0, width: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { width: 0 };
-};
-
-// zoom
-$.fn.cycle.transitions.zoom = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,false,false,true);
- opts.cssBefore.top = next.cycleH/2;
- opts.cssBefore.left = next.cycleW/2;
- opts.animIn = { top: 0, left: 0, width: next.cycleW, height: next.cycleH };
- opts.animOut = { width: 0, height: 0, top: curr.cycleH/2, left: curr.cycleW/2 };
- });
- opts.cssFirst = { top:0, left: 0 };
- opts.cssBefore = { width: 0, height: 0 };
-};
-
-// fadeZoom
-$.fn.cycle.transitions.fadeZoom = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,false,false);
- opts.cssBefore.left = next.cycleW/2;
- opts.cssBefore.top = next.cycleH/2;
- opts.animIn = { top: 0, left: 0, width: next.cycleW, height: next.cycleH };
- });
- opts.cssBefore = { width: 0, height: 0 };
- opts.animOut = { opacity: 0 };
-};
-
-// blindX
-$.fn.cycle.transitions.blindX = function($cont, $slides, opts) {
- var w = $cont.css('overflow','hidden').width();
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts);
- opts.animIn.width = next.cycleW;
- opts.animOut.left = curr.cycleW;
- });
- opts.cssBefore = { left: w, top: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { left: w };
-};
-// blindY
-$.fn.cycle.transitions.blindY = function($cont, $slides, opts) {
- var h = $cont.css('overflow','hidden').height();
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts);
- opts.animIn.height = next.cycleH;
- opts.animOut.top = curr.cycleH;
- });
- opts.cssBefore = { top: h, left: 0 };
- opts.animIn = { top: 0 };
- opts.animOut = { top: h };
-};
-// blindZ
-$.fn.cycle.transitions.blindZ = function($cont, $slides, opts) {
- var h = $cont.css('overflow','hidden').height();
- var w = $cont.width();
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts);
- opts.animIn.height = next.cycleH;
- opts.animOut.top = curr.cycleH;
- });
- opts.cssBefore = { top: h, left: w };
- opts.animIn = { top: 0, left: 0 };
- opts.animOut = { top: h, left: w };
-};
-
-// growX - grow horizontally from centered 0 width
-$.fn.cycle.transitions.growX = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,false,true);
- opts.cssBefore.left = this.cycleW/2;
- opts.animIn = { left: 0, width: this.cycleW };
- opts.animOut = { left: 0 };
- });
- opts.cssBefore = { width: 0, top: 0 };
-};
-// growY - grow vertically from centered 0 height
-$.fn.cycle.transitions.growY = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,false);
- opts.cssBefore.top = this.cycleH/2;
- opts.animIn = { top: 0, height: this.cycleH };
- opts.animOut = { top: 0 };
- });
- opts.cssBefore = { height: 0, left: 0 };
-};
-
-// curtainX - squeeze in both edges horizontally
-$.fn.cycle.transitions.curtainX = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,false,true,true);
- opts.cssBefore.left = next.cycleW/2;
- opts.animIn = { left: 0, width: this.cycleW };
- opts.animOut = { left: curr.cycleW/2, width: 0 };
- });
- opts.cssBefore = { top: 0, width: 0 };
-};
-// curtainY - squeeze in both edges vertically
-$.fn.cycle.transitions.curtainY = function($cont, $slides, opts) {
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,false,true);
- opts.cssBefore.top = next.cycleH/2;
- opts.animIn = { top: 0, height: next.cycleH };
- opts.animOut = { top: curr.cycleH/2, height: 0 };
- });
- opts.cssBefore = { left: 0, height: 0 };
-};
-
-// cover - curr slide covered by next slide
-$.fn.cycle.transitions.cover = function($cont, $slides, opts) {
- var d = opts.direction || 'left';
- var w = $cont.css('overflow','hidden').width();
- var h = $cont.height();
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts);
- if (d == 'right')
- opts.cssBefore.left = -w;
- else if (d == 'up')
- opts.cssBefore.top = h;
- else if (d == 'down')
- opts.cssBefore.top = -h;
- else
- opts.cssBefore.left = w;
- });
- opts.animIn = { left: 0, top: 0};
- opts.animOut = { opacity: 1 };
- opts.cssBefore = { top: 0, left: 0 };
-};
-
-// uncover - curr slide moves off next slide
-$.fn.cycle.transitions.uncover = function($cont, $slides, opts) {
- var d = opts.direction || 'left';
- var w = $cont.css('overflow','hidden').width();
- var h = $cont.height();
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,true,true);
- if (d == 'right')
- opts.animOut.left = w;
- else if (d == 'up')
- opts.animOut.top = -h;
- else if (d == 'down')
- opts.animOut.top = h;
- else
- opts.animOut.left = -w;
- });
- opts.animIn = { left: 0, top: 0 };
- opts.animOut = { opacity: 1 };
- opts.cssBefore = { top: 0, left: 0 };
-};
-
-// toss - move top slide and fade away
-$.fn.cycle.transitions.toss = function($cont, $slides, opts) {
- var w = $cont.css('overflow','visible').width();
- var h = $cont.height();
- opts.before.push(function(curr, next, opts) {
- $.fn.cycle.commonReset(curr,next,opts,true,true,true);
- // provide default toss settings if animOut not provided
- if (!opts.animOut.left && !opts.animOut.top)
- opts.animOut = { left: w*2, top: -h/2, opacity: 0 };
- else
- opts.animOut.opacity = 0;
- });
- opts.cssBefore = { left: 0, top: 0 };
- opts.animIn = { left: 0 };
-};
-
-// wipe - clip animation
-$.fn.cycle.transitions.wipe = function($cont, $slides, opts) {
- var w = $cont.css('overflow','hidden').width();
- var h = $cont.height();
- opts.cssBefore = opts.cssBefore || {};
- var clip;
- if (opts.clip) {
- if (/l2r/.test(opts.clip))
- clip = 'rect(0px 0px '+h+'px 0px)';
- else if (/r2l/.test(opts.clip))
- clip = 'rect(0px '+w+'px '+h+'px '+w+'px)';
- else if (/t2b/.test(opts.clip))
- clip = 'rect(0px '+w+'px 0px 0px)';
- else if (/b2t/.test(opts.clip))
- clip = 'rect('+h+'px '+w+'px '+h+'px 0px)';
- else if (/zoom/.test(opts.clip)) {
- var top = parseInt(h/2);
- var left = parseInt(w/2);
- clip = 'rect('+top+'px '+left+'px '+top+'px '+left+'px)';
- }
- }
-
- opts.cssBefore.clip = opts.cssBefore.clip || clip || 'rect(0px 0px 0px 0px)';
-
- var d = opts.cssBefore.clip.match(/(\d+)/g);
- var t = parseInt(d[0]), r = parseInt(d[1]), b = parseInt(d[2]), l = parseInt(d[3]);
-
- opts.before.push(function(curr, next, opts) {
- if (curr == next) return;
- var $curr = $(curr), $next = $(next);
- $.fn.cycle.commonReset(curr,next,opts,true,true,false);
- opts.cssAfter.display = 'block';
-
- var step = 1, count = parseInt((opts.speedIn / 13)) - 1;
- (function f() {
- var tt = t ? t - parseInt(step * (t/count)) : 0;
- var ll = l ? l - parseInt(step * (l/count)) : 0;
- var bb = b < h ? b + parseInt(step * ((h-b)/count || 1)) : h;
- var rr = r < w ? r + parseInt(step * ((w-r)/count || 1)) : w;
- $next.css({ clip: 'rect('+tt+'px '+rr+'px '+bb+'px '+ll+'px)' });
- (step++ <= count) ? setTimeout(f, 13) : $curr.css('display', 'none');
- })();
- });
- opts.cssBefore = { display: 'block', opacity: 1, top: 0, left: 0 };
- opts.animIn = { left: 0 };
- opts.animOut = { left: 0 };
-};
-
-})(jQuery);
diff --git a/docs/iris/src/_static/logo_banner.png b/docs/iris/src/_static/logo_banner.png
deleted file mode 100644
index 4bec22f5dc..0000000000
Binary files a/docs/iris/src/_static/logo_banner.png and /dev/null differ
diff --git a/docs/iris/src/_static/style.css b/docs/iris/src/_static/style.css
deleted file mode 100644
index 69fa84394e..0000000000
--- a/docs/iris/src/_static/style.css
+++ /dev/null
@@ -1,99 +0,0 @@
-body {
- font-family: 'Noto Sans', sans-serif;
-}
-
-.sidebar { z-index: 10; }
-
-.highlight { background: none; }
-
-p.hr_p {
- overflow: hidden;
- text-align: center;
-}
-p.hr_p a {
- font-size: small;
- color: #1C86EE;
-}
-p.hr_p:before,
-p.hr_p:after {
- background-color: #abc;
- border: 1px solid #abc;
- content: "";
- display: inline-block;
- height: 1px;
- position: relative;
- vertical-align: middle;
- width: 50%;
-}
-p.hr_p:before {
- right: 0.5em;
- margin-left: -50%;
-}
-p.hr_p:after {
- left: 0.5em;
- margin-right: -50%;
-}
-
-.header-content {
- background-color: white;
- text-align: left;
- padding: 0px;
- height: 149px;
-}
-
-.header-content img {
- height: 100px;
- vertical-align: middle;
- float: left;
- margin: 20px 2em 0.8em 4%;
- padding: 0px;
-}
-
-.header-content .strapline {
- display: inline-block;
- width: calc(100% - 110px - 2em - 4%);
-}
-
-.strapline p {
- font-size: medium;
- font-family: 'Alike', serif;
- font-weight: bold;
- color: #444444;
- max-width: 52ch;
- margin-top: 0.25em;
-}
-
-.header-content h1 {
- font-size: 3.5rem;
- font-family: 'Alike', serif;
- margin-top: 40px;
- padding: 0px;
- color: #323232;
- padding-bottom: 0.2em;
-}
-
-.header-content h1 span.version {
- font-size: 1.5rem;
-}
-
-.github-forkme {
- position: absolute;
- top: 0;
- right: 80px;
- border: 0;
-}
-
-/* Take into account the resizing effect of the page (which has a minimum */
-/* width of 740px + 80px margins). */
-@media screen and (max-width: calc(740px + 80px + 80px)) {
- .github-forkme {
- right: calc(100% - 740px - 80px);
- }
-}
-
-@media screen and (max-width: calc(740px + 80px)) {
- .github-forkme {
- left: calc(740px + 80px - 149px);
- right: 0px;
- }
-}
diff --git a/docs/iris/src/_templates/index.html b/docs/iris/src/_templates/index.html
deleted file mode 100644
index c18f0268fa..0000000000
--- a/docs/iris/src/_templates/index.html
+++ /dev/null
@@ -1,146 +0,0 @@
-{% extends "layout.html" %}
-{% set title = 'Iris documentation homepage' %}
-{% block extrahead %}
-{{ super() }}
-
-
-
-
-
-
-
-
-{% endblock %}
-
-
-
-{% block body %}
-
-
-
-
-
- Iris implements a data model based on the CF conventions
- giving you a powerful, format-agnostic interface for working with your data.
- It excels when working with multi-dimensional Earth Science data, where tabular
- representations become unwieldy and inefficient.
-
-
- CF Standard names,
- units, and coordinate metadata
- are built into Iris, giving you a rich and expressive interface for maintaining
- an accurate representation of your data. Its treatment of data and
- associated metadata as first-class objects includes:
-
aggregations and reductions (including min, max, mean and weighted averages),
-
interpolation and regridding (including nearest-neighbor, linear and area-weighted), and
-
operator overloads (+, -, *, /, etc.).
-
-
- A number of file formats are recognised by Iris, including CF-compliant NetCDF, GRIB,
- and PP, and it has a plugin architecture to allow other formats to be added seamlessly.
-
-
- Building upon NumPy and
- dask,
- Iris scales from efficient single-machine workflows right through to multi-core
- clusters and HPC.
- Interoperability with packages from the wider scientific Python ecosystem comes from Iris'
- use of standard NumPy/dask arrays as its underlying data storage.
-
- A powerful, format-agnostic, community-driven Python library for analysing and
- visualising Earth science data.
-
-
-
-
-{{ super() }}
-{% endblock %}
-
-
-
-{% block footer %}
-
-
-
-
-
-{% endblock %}
diff --git a/docs/iris/src/conf.py b/docs/iris/src/conf.py
deleted file mode 100644
index 6cdfe634c4..0000000000
--- a/docs/iris/src/conf.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# (C) British Crown Copyright 2010 - 2018, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# -*- coding: utf-8 -*-
-#
-# Iris documentation build configuration file, created by
-# sphinx-quickstart on Tue May 25 13:26:23 2010.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import datetime
-import os
-import sys
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath('sphinxext'))
-
-# add some sample files from the developers guide..
-sys.path.append(os.path.abspath(os.path.join('developers_guide')))
-
-
-# -- General configuration -----------------------------------------------------
-
-# Temporary value for use by LaTeX and 'man' output.
-# Deleted at the end of the module.
-_authors = ('Iris developers')
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.autosummary',
- 'sphinx.ext.coverage',
- 'sphinx.ext.doctest',
- 'sphinx.ext.extlinks',
- 'sphinx.ext.graphviz',
- 'sphinx.ext.imgmath',
- 'sphinx.ext.intersphinx',
- 'matplotlib.sphinxext.mathmpl',
- 'matplotlib.sphinxext.only_directives',
- 'matplotlib.sphinxext.plot_directive',
-
- # better class documentation
- 'custom_class_autodoc',
-
- # Data instance __repr__ filter.
- 'custom_data_autodoc',
-
- 'gen_example_directory',
- 'generate_package_rst',
- 'gen_gallery',
-
- # Add labels to figures automatically
- 'auto_label_figures',
- ]
-
-# list of packages to document
-autopackage_name = ['iris']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'contents'
-
-# General information about the project.
-project = u'Iris'
-# define the copyright information for latex builds. Note, for html builds,
-# the copyright exists directly inside "_templates/layout.html"
-upper_copy_year = datetime.datetime.now().year
-copyright = u'British Crown Copyright 2010 - {}, Met Office'.format(upper_copy_year)
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-import iris
-# The short X.Y version.
-if iris.__version__ == 'dev':
- version = 'dev'
-else:
- # major.feature(.minor)-dev -> major.minor
- version = '.'.join(iris.__version__.split('-')[0].split('.')[:2])
-# The full version, including alpha/beta/rc tags.
-release = iris.__version__
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = ['sphinxext', 'build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# Define the default highlight language. This also allows the >>> removal
-# javascript (copybutton.js) to function.
-highlight_language = 'default'
-
-# A list of ignored prefixes for module index sorting.
-modindex_common_prefix = ['iris']
-
-intersphinx_mapping = {
- 'cartopy': ('http://scitools.org.uk/cartopy/docs/latest/', None),
- 'iris-grib': ('http://iris-grib.readthedocs.io/en/latest/', None),
- 'matplotlib': ('http://matplotlib.org/', None),
- 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
- 'python': ('http://docs.python.org/2.7', None),
- 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
-}
-
-# -- Extlinks extension -------------------------------------------------------
-
-extlinks = {'issue': ('https://github.com/SciTools/iris/issues/%s',
- 'Issue #'),
- 'pull': ('https://github.com/SciTools/iris/pull/%s', 'PR #'),
- }
-
-# -- Doctest ------------------------------------------------------------------
-
-doctest_global_setup = 'import iris'
-
-# -- Autodoc ------------------------------------------------------------------
-
-autodoc_member_order = 'groupwise'
-autodoc_default_flags = ['show-inheritance']
-
-# include the __init__ method when documenting classes
-# document the init/new method at the top level of the class documentation rather than displaying the class docstring
-autoclass_content='init'
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'default'
-html_theme = 'sphinxdoc'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-html_context = {'copyright_years': '2010 - {}'.format(upper_copy_year)}
-
-# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# " v documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-html_additional_pages = {'index': 'index.html', 'gallery':'gallery.html'}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-html_show_sphinx = False
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'Irisdoc'
-
-html_use_modindex = False
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('contents', 'Iris.tex', u'Iris Documentation', ' \\and '.join(_authors), 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-latex_elements = {}
-latex_elements['docclass'] = 'MO_report'
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'iris', u'Iris Documentation', _authors, 1)
-]
-
-##########################
-# plot directive options #
-##########################
-
-plot_formats = [('png', 100),
- #('hires.png', 200), ('pdf', 250)
- ]
-
-
-
-
-
-# Delete the temporary value.
-del _authors
diff --git a/docs/iris/src/contents.rst b/docs/iris/src/contents.rst
deleted file mode 100644
index ecaf025a7a..0000000000
--- a/docs/iris/src/contents.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-=====================================
-Iris documentation table of contents
-=====================================
-.. toctree::
- :maxdepth: 1
-
- installing.rst
-
-.. toctree::
- :maxdepth: 3
-
- userguide/index.rst
-
-.. toctree::
- :maxdepth: 1
- :hidden:
-
- iris/iris.rst
-
-.. toctree::
- :maxdepth: 2
-
- whatsnew/index.rst
-
-.. toctree::
- :maxdepth: 1
-
- examples/index.rst
- developers_guide/index.rst
- whitepapers/index.rst
- copyright.rst
-
diff --git a/docs/iris/src/copyright.rst b/docs/iris/src/copyright.rst
deleted file mode 100644
index ed611c5ba8..0000000000
--- a/docs/iris/src/copyright.rst
+++ /dev/null
@@ -1,51 +0,0 @@
-==========================================
-Iris copyright, licensing and contributors
-==========================================
-
-.. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN
-
-Iris code
----------
-
-All Iris source code, unless explicitly stated, is |copy| ``British Crown copyright, 2014`` and
-is licensed under the **GNU Lesser General Public License** as published by the
-Free Software Foundation, either version 3 of the License, or (at your option) any later version.
-You should find all source files with the following header:
-
-.. admonition:: Code License
-
- |copy| British Crown Copyright 2010 - 2014, Met Office
-
- This file is part of Iris.
-
- Iris is free software: you can redistribute it and/or modify it under
- the terms of the GNU Lesser General Public License as published by the
- Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Iris is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public License
- along with Iris. If not, see ``_.
-
-
-Iris documentation and examples
--------------------------------
-
-All documentation, examples and sample data found on this website and in source repository
-are licensed under the UK's Open Government Licence:
-
-.. admonition:: Documentation, example and data license
-
- |copy| British Crown copyright, 2014.
-
- You may use and re-use the information featured on this website (not including logos) free of
- charge in any format or medium, under the terms of the
- `Open Government Licence `_.
- We encourage users to establish hypertext links to this website.
-
- Any email enquiries regarding the use and re-use of this information resource should be
- sent to: psi@nationalarchives.gsi.gov.uk.
diff --git a/docs/iris/src/developers_guide/deprecations.rst b/docs/iris/src/developers_guide/deprecations.rst
deleted file mode 100644
index c7a6888984..0000000000
--- a/docs/iris/src/developers_guide/deprecations.rst
+++ /dev/null
@@ -1,120 +0,0 @@
-.. _iris_development_deprecations:
-
-Deprecations
-************
-
-If you need to make a backwards-incompatible change to a public API
-[#public-api]_ that has been included in a release (e.g. deleting a
-method), then you must first deprecate the old behaviour in at least
-one release, before removing/updating it in the next
-`major release `_.
-
-
-Adding a deprecation
-====================
-
-.. _removing-a-public-api:
-
-Removing a public API
----------------------
-
-The simplest form of deprecation occurs when you need to remove a public
-API. The public API in question is deprecated for a period before it is
-removed to allow time for user code to be updated. Sometimes the
-deprecation is accompanied by the introduction of a new public API.
-
-Under these circumstances the following points apply:
-
- - Using the deprecated API must result in a concise deprecation warning which
- is an instance of :class:`iris.IrisDeprecation`.
- It is easiest to call
- :func:`iris._deprecation.warn_deprecated`, which is a
- simple wrapper to :func:`warnings.warn` with the signature
- `warn_deprecation(message, **kwargs)`.
- - Where possible, your deprecation warning should include advice on
- how to avoid using the deprecated API. For example, you might
- reference a preferred API, or more detailed documentation elsewhere.
- - You must update the docstring for the deprecated API to include a
- Sphinx deprecation directive:
-
- :literal:`.. deprecated:: `
-
- where you should replace `` with the major and minor version
- of Iris in which this API is first deprecated. For example: `1.8`.
-
- As with the deprecation warning, you should include advice on how to
- avoid using the deprecated API within the content of this directive.
- Feel free to include more detail in the updated docstring than in the
- deprecation warning.
- - You should check the documentation for references to the deprecated
- API and update them as appropriate.
-
-Changing a default
-------------------
-
-When you need to change the default behaviour of a public API the
-situation is slightly more complex. The recommended solution is to use
-the :data:`iris.FUTURE` object. The :data:`iris.FUTURE` object provides
-boolean attributes that allow user code to control at run-time the
-default behaviour of corresponding public APIs. When a boolean attribute
-is set to `False` it causes the corresponding public API to use its
-deprecated default behaviour. When a boolean attribute is set to `True`
-it causes the corresponding public API to use its new default behaviour.
-
-The following points apply in addition to those for removing a public
-API:
-
- - You should add a new boolean attribute to :data:`iris.FUTURE` (by
- modifying :class:`iris.Future`) that controls the default behaviour
- of the public API that needs updating. The initial state of the new
- boolean attribute should be `False`. You should name the new boolean
- attribute to indicate that setting it to `True` will select the new
- default behaviour.
- - You should include a reference to this :data:`iris.FUTURE` flag in your
- deprecation warning and corresponding Sphinx deprecation directive.
-
-
-Removing a deprecation
-======================
-
-When the time comes to make a new major release you should locate any
-deprecated APIs within the code that satisfy the one release
-minimum period described previously. Locating deprecated APIs can easily
-be done by searching for the Sphinx deprecation directives and/or
-deprecation warnings.
-
-Removing a public API
----------------------
-
-The deprecated API should be removed and any corresponding documentation
-and/or example code should be removed/updated as appropriate.
-
-.. _iris_developer_future:
-
-Changing a default
-------------------
-
- - You should update the initial state of the relevant boolean attribute
- of :data:`iris.FUTURE` to `True`.
- - You should deprecate setting the relevant boolean attribute of
- :class:`iris.Future` in the same way as described in
- :ref:`removing-a-public-api`.
-
-
-.. rubric:: Footnotes
-
-.. [#public-api] A name without a leading underscore in any of its
- components, with the exception of the :mod:`iris.experimental` and
- :mod:`iris.tests` packages.
-
- Example public names are:
- - `iris.this.`
- - `iris.this.that`
-
- Example private names are:
- - `iris._this`
- - `iris.this._that`
- - `iris._this.that`
- - `iris._this._that`
- - `iris.experimental.something`
- - `iris.tests.get_data_path`
diff --git a/docs/iris/src/developers_guide/documenting/__init__.py b/docs/iris/src/developers_guide/documenting/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/docs/iris/src/developers_guide/documenting/docstrings.rst b/docs/iris/src/developers_guide/documenting/docstrings.rst
deleted file mode 100644
index 4499f3fe34..0000000000
--- a/docs/iris/src/developers_guide/documenting/docstrings.rst
+++ /dev/null
@@ -1,77 +0,0 @@
-================
- Docstrings
-================
-
-
-Guiding principle: Every public object in the Iris package should have an appropriate docstring.
-
-This document has been influenced by the following PEP's,
- * Attribute Docstrings `PEP-224 `_
- * Docstring Conventions `PEP-257 `_
-
-
-For consistency, always use ``"""triple double quotes"""`` around docstrings. Use ``r"""raw triple double quotes"""`` if you use any backslashes in your docstrings. For Unicode docstrings, use ``u"""Unicode triple-quoted string"""``.
-
-All docstrings should be written in rST (reStructuredText) markup; an rST guide follows this page.
-
-There are two forms of docstrings: **single-line** and **multi-line** docstrings.
-
-
-Single-line docstrings
-======================
-The single line docstring of an object must state the *purpose* of that object, known as the *purpose section*. This terse overview must be on one line and ideally no longer than 90 characters.
-
-
-Multi-line docstrings
-=====================
-Multi-line docstrings must consist of at least a purpose section akin to the single-line docstring, followed by a blank line and then any other content, as described below. The entire docstring should be indented to the same level as the quotes at the docstring's first line.
-
-
-Description
------------
-The multi-line docstring *description section* should expand on what was stated in the one line *purpose section*. The description section should try not to document *argument* and *keyword argument* details. Such information should be documented in the following *arguments and keywords section*.
-
-
-Sample multi-line docstring
----------------------------
-Here is a simple example of a standard dosctring:
-
-.. literalinclude:: docstrings_sample_routine.py
-
-This would be rendered as:
-
- .. currentmodule:: documenting.docstrings_sample_routine
-
- .. automodule:: documenting.docstrings_sample_routine
- :members:
- :undoc-members:
-
-Additionally, a summary can be extracted automatically, which would result in:
-
- .. autosummary::
-
- documenting.docstrings_sample_routine.sample_routine
-
-
-Documenting classes
-===================
-The class constructor should be documented in the docstring for its ``__init__`` or ``__new__`` method. Methods should be documented by their own docstring, not in the class header itself.
-
-If a class subclasses another class and its behavior is mostly inherited from that class, its docstring should mention this and summarise the differences. Use the verb "override" to indicate that a subclass method replaces a superclass method and does not call the superclass method; use the verb "extend" to indicate that a subclass method calls the superclass method (in addition to its own behavior).
-
-
-Attribute and Property docstrings
----------------------------------
-Here is a simple example of a class containing an attribute docstring and a property docstring:
-
-.. literalinclude:: docstrings_attribute.py
-
-This would be rendered as:
-
- .. currentmodule:: documenting.docstrings_attribute
-
- .. automodule:: documenting.docstrings_attribute
- :members:
- :undoc-members:
-
-.. note:: The purpose section of the property docstring **must** state whether the property is read-only.
diff --git a/docs/iris/src/developers_guide/documenting/docstrings_attribute.py b/docs/iris/src/developers_guide/documenting/docstrings_attribute.py
deleted file mode 100644
index 24e4eec5d1..0000000000
--- a/docs/iris/src/developers_guide/documenting/docstrings_attribute.py
+++ /dev/null
@@ -1,37 +0,0 @@
-class ExampleClass(object):
- """
- Class Summary
-
- """
- def __init__(self, arg1, arg2):
- """
- Purpose section description.
-
- Description section text.
-
- Args:
-
- * arg1 (int):
- First argument description.
- * arg2 (float):
- Second argument description.
-
- Returns:
- Boolean.
-
- """
- self.a = arg1
- 'Attribute arg1 docstring.'
- self.b = arg2
- 'Attribute arg2 docstring.'
-
- @property
- def square(self):
- """
- *(read-only)* Purpose section description.
-
- Returns:
- int.
-
- """
- return self.a*self.a
diff --git a/docs/iris/src/developers_guide/documenting/docstrings_sample_routine.py b/docs/iris/src/developers_guide/documenting/docstrings_sample_routine.py
deleted file mode 100644
index 92eec42d90..0000000000
--- a/docs/iris/src/developers_guide/documenting/docstrings_sample_routine.py
+++ /dev/null
@@ -1,26 +0,0 @@
-def sample_routine(arg1, arg2, kwarg1='foo', kwarg2=None):
- """
- Purpose section text goes here.
-
- Description section longer text goes here.
-
- Args:
-
- * arg1 (numpy.ndarray):
- First argument description.
- * arg2 (numpy.ndarray):
- Second argument description.
-
- Kwargs:
-
- * kwarg1 (string):
- The first keyword argument. This argument description
- can be multi-lined.
- * kwarg2 (Boolean or None):
- The second keyword argument.
-
- Returns:
- numpy.ndarray of arg1 * arg2
-
- """
- pass
diff --git a/docs/iris/src/developers_guide/documenting/index.rst b/docs/iris/src/developers_guide/documenting/index.rst
deleted file mode 100644
index b30a16b2a6..0000000000
--- a/docs/iris/src/developers_guide/documenting/index.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-=======================
- Documentation in Iris
-=======================
-
-.. toctree::
- :maxdepth: 2
-
- docstrings.rst
- rest_guide.rst
- whats_new_contributions.rst
diff --git a/docs/iris/src/developers_guide/documenting/rest_guide.rst b/docs/iris/src/developers_guide/documenting/rest_guide.rst
deleted file mode 100644
index 8ce97a3c4a..0000000000
--- a/docs/iris/src/developers_guide/documenting/rest_guide.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-===============
-reST quickstart
-===============
-
-
-reST (http://en.wikipedia.org/wiki/ReStructuredText) is a lightweight markup language intended to be highly readable in source format. This guide will cover some of the more frequently used advanced reST markup syntaxes, for the basics of reST the following links may be useful:
-
- * http://sphinx.pocoo.org/rest.html
- * http://docs.geoserver.org/trunk/en/docguide/sphinx.html
- * http://packages.python.org/an_example_pypi_project/sphinx.html
-
-Reference documentation for reST can be found at http://docutils.sourceforge.net/rst.html.
-
-Creating links
---------------
-Basic links can be created with ```Text of the link `_`` which will look like `Text of the link `_
-
-
-Documents in the same project can be cross referenced with the syntax ``:doc:`document_name``` for example, to reference the "docstrings" page ``:doc:`docstrings``` creates the following link :doc:`docstrings`
-
-
-References can be created between sections by first making a "label" where you would like the link to point to ``.. _name_of_reference::`` the appropriate link can now be created with ``:ref:`name_of_reference``` (note the trailing underscore on the label)
-
-
-Cross referencing other reference documentation can be achieved with the syntax ``:py:class:`zipfile.ZipFile``` which will result in links such as :py:class:`zipfile.ZipFile` and :py:class:`numpy.ndarray`.
-
-
-
diff --git a/docs/iris/src/developers_guide/documenting/whats_new_contributions.rst b/docs/iris/src/developers_guide/documenting/whats_new_contributions.rst
deleted file mode 100644
index 203a422457..0000000000
--- a/docs/iris/src/developers_guide/documenting/whats_new_contributions.rst
+++ /dev/null
@@ -1,123 +0,0 @@
-.. _whats_new_contributions:
-
-=================================
-Contributing a "What's New" entry
-=================================
-
-Iris has an aggregator for building a draft what's new document for each
-release. The draft what's new document is built from contributions by code authors.
-This means contributions to the what's new document are written by the
-developer most familiar with the change made.
-
-A contribution provides an entry in the what's new document, which describes a
-change that improved Iris in some way. This change may be a new feature in Iris
-or the fix for a bug introduced in a previous release. The contribution should
-be included as part of the Iris Pull Request that introduces the change.
-
-When a new release is prepared, the what's new contributions are combined into
-a draft what's new document for the release.
-
-
-Writing a Contribution
-======================
-
-As introduced above, a contribution is the description of a change to Iris
-which improved Iris in some way. As such, a single Iris Pull Request may
-contain multiple changes that are worth highlighting as contributions to the
-what's new document.
-
-Each contribution will ideally be written as a single concise bullet point.
-The content of the bullet point should highlight the change that has been made
-to Iris, targeting an Iris user as the audience.
-
-A contribution is a feature summary by the code author, which avoids the
-release developer having to personally review the change in detail :
-It is not in itself the final documentation content,
-so it does not have to be perfect or complete in every respect.
-
-
-Adding Contribution Files
-=========================
-
-Each release must have a directory called ``contributions_``,
-which should be created following the release of the current version of Iris. Each
-release directory must be placed in ``docs/iris/src/whatsnew/``.
-Contributions to the what's new must be written in markdown and placed into this
-directory in text files. The filename for each item should be structured as follows:
-
-``__.txt``
-
-Category
---------
-The category must be one of the following:
-
-*newfeature*
- Features that are new or changed to add functionality.
-*bugfix*
- A bugfix.
-*incompatiblechange*
- A change that causes an incompatibility with prior versions of Iris.
-*deprecate*
- Deprecations of functionality.
-*docchange*
- Changes to documentation.
-
-Date
-----
-
-The date must be a hyphen-separated date in the format of:
-
- * a four digit year,
- * a three character month name, and
- * a two digit day.
-
-For example:
-
- * 2012-Jan-30
- * 2014-May-03
- * 2015-Feb-19
-
-Summary
--------
-
-The summary can be any remaining filename characters, and simply provides a
-short identifying description of the change.
-
-For example:
-
- * whats-new-aggregator
- * using_mo_pack
- * correction-to-bilinear-regrid
- * GRIB2_pdt11
-
-
-Complete Examples
------------------
-
-Some sample what's new contribution filenames:
-
- * bugfix_2015-Aug-18_partial_pp_constraints.txt
- * deprecate_2015-Nov-01_unit-module.txt
- * incompatiblechange_2015-Oct-12_GRIB_optional_Python3_unavailable.txt
- * newfeature_2015-Jul-03_pearsonr_rewrite.txt
-
-.. note::
- A test in the standard test suite ensures that all the contents of the
- latest contributions directory conform to this naming scheme.
-
-
-Compiling a Draft
-=================
-
-Compiling a draft from the supplied contributions should be done when preparing
-a release. Running ``docs/iris/src/whatsnew/aggregate_directory.py`` with the
-release number as the argument will create a draft what's new with the name
-``.rst`` file for the specified release, by aggregating the individual
-contributions from the relevant folder.
-Omitting the release number will build the latest version for which a
-contributions folder is present.
-This command fails if a file with the relevant name already exists.
-
-The resulting draft document is only a starting point, which the release
-developer will then edit to produce the final 'What's new in Iris x.x'
-documentation.
diff --git a/docs/iris/src/developers_guide/gitwash/LICENSE b/docs/iris/src/developers_guide/gitwash/LICENSE
deleted file mode 100644
index 0ea9a5957b..0000000000
--- a/docs/iris/src/developers_guide/gitwash/LICENSE
+++ /dev/null
@@ -1,34 +0,0 @@
-=========
- LICENSE
-=========
-
-We release the documents under the Creative Commons attribution license:
-http://creativecommons.org/licenses/by/3.0/
-
-We release the code under the simplified BSD license:
-
-Copyright (c) 2010, Matthew Brett
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/docs/iris/src/developers_guide/gitwash/branch_dropdown.png b/docs/iris/src/developers_guide/gitwash/branch_dropdown.png
deleted file mode 100644
index 1bb7a57773..0000000000
Binary files a/docs/iris/src/developers_guide/gitwash/branch_dropdown.png and /dev/null differ
diff --git a/docs/iris/src/developers_guide/gitwash/development_workflow.rst b/docs/iris/src/developers_guide/gitwash/development_workflow.rst
deleted file mode 100644
index 4da6b700ba..0000000000
--- a/docs/iris/src/developers_guide/gitwash/development_workflow.rst
+++ /dev/null
@@ -1,421 +0,0 @@
-.. _development-workflow:
-
-####################
-Development workflow
-####################
-
-You already have your own forked copy of the `iris`_ repository, by
-following :ref:`forking`. You have :ref:`set-up-fork`. You have configured
-git by following :ref:`configure-git`. Now you are ready for some real work.
-
-Workflow summary
-================
-
-In what follows we'll refer to the upstream iris ``master`` branch, as
-"trunk".
-
-* Don't use your ``master`` branch for anything. Consider deleting it.
-* When you are starting a new set of changes, fetch any changes from trunk,
- and start a new *feature branch* from that.
-* Make a new branch for each separable set of changes |emdash| "one task, one
- branch" (`ipython git workflow`_).
-* Name your branch for the purpose of the changes - e.g.
- ``bugfix-for-issue-14`` or ``refactor-database-code``.
-* If you can possibly avoid it, avoid merging trunk or any other branches into
- your feature branch while you are working.
-* If you do find yourself merging from trunk, consider :ref:`rebase-on-trunk`
-* Ask on the `iris mailing list`_ if you get stuck.
-* Ask for code review!
-
-This way of working helps to keep work well organized, with readable history.
-This in turn makes it easier for project maintainers (that might be you) to see
-what you've done, and why you did it.
-
-See `linux git workflow`_ and `ipython git workflow`_ for some explanation.
-
-Consider deleting your master branch
-====================================
-
-It may sound strange, but deleting your own ``master`` branch can help reduce
-confusion about which branch you are on. See `deleting master on github`_ for
-details.
-
-.. _update-mirror-trunk:
-
-Update the mirror of trunk
-==========================
-
-First make sure you have done :ref:`linking-to-upstream`.
-
-From time to time you should fetch the upstream (trunk) changes from github::
-
- git fetch upstream
-
-This will pull down any commits you don't have, and set the remote branches to
-point to the right commit. For example, 'trunk' is the branch referred to by
-(remote/branchname) ``upstream/master`` - and if there have been commits since
-you last checked, ``upstream/master`` will change after you do the fetch.
-
-.. _make-feature-branch:
-
-Make a new feature branch
-=========================
-
-When you are ready to make some changes to the code, you should start a new
-branch. Branches that are for a collection of related edits are often called
-'feature branches'.
-
-Making an new branch for each set of related changes will make it easier for
-someone reviewing your branch to see what you are doing.
-
-Choose an informative name for the branch to remind yourself and the rest of us
-what the changes in the branch are for. For example ``add-ability-to-fly``, or
-``buxfix-for-issue-42``.
-
-::
-
- # Update the mirror of trunk
- git fetch upstream
- # Make new feature branch starting at current trunk
- git branch my-new-feature upstream/master
- git checkout my-new-feature
-
-Generally, you will want to keep your feature branches on your public github_
-fork of `iris`_. To do this, you `git push`_ this new branch up to your
-github repo. Generally (if you followed the instructions in these pages, and by
-default), git will have a link to your github repo, called ``origin``. You push
-up to your own repo on github with::
-
- git push origin my-new-feature
-
-In git >= 1.7 you can ensure that the link is correctly set by using the
-``--set-upstream`` option::
-
- git push --set-upstream origin my-new-feature
-
-From now on git will know that ``my-new-feature`` is related to the
-``my-new-feature`` branch in the github repo.
-
-.. _edit-flow:
-
-The editing workflow
-====================
-
-Overview
---------
-
-::
-
- # hack hack
- git add my_new_file
- git commit -am 'NF - some message'
- git push
-
-In more detail
---------------
-
-#. Make some changes
-#. See which files have changed with ``git status`` (see `git status`_).
- You'll see a listing like this one::
-
- # On branch ny-new-feature
- # Changed but not updated:
- # (use "git add ..." to update what will be committed)
- # (use "git checkout -- ..." to discard changes in working directory)
- #
- # modified: README
- #
- # Untracked files:
- # (use "git add ..." to include in what will be committed)
- #
- # INSTALL
- no changes added to commit (use "git add" and/or "git commit -a")
-
-#. Check what the actual changes are with ``git diff`` (`git diff`_).
-#. Add any new files to version control ``git add new_file_name`` (see
- `git add`_).
-#. To commit all modified files into the local copy of your repo,, do
- ``git commit -am 'A commit message'``. Note the ``-am`` options to
- ``commit``. The ``m`` flag just signals that you're going to type a
- message on the command line. The ``a`` flag |emdash| you can just take on
- faith |emdash| or see `why the -a flag?`_ |emdash| and the helpful use-case
- description in the `tangled working copy problem`_. The `git commit`_ manual
- page might also be useful.
-#. To push the changes up to your forked repo on github, do a ``git
- push`` (see `git push`_).
-
-Testing your changes
-====================
-
-Once you are happy with your changes, work thorough the :ref:`pr_check` and make sure
-your branch passess all the relevant tests.
-
-Ask for your changes to be reviewed or merged
-=============================================
-
-When you are ready to ask for someone to review your code and consider a merge:
-
-#. Go to the URL of your forked repo, say
- ``http://github.com/your-user-name/iris``.
-#. Use the 'Switch Branches' dropdown menu near the top left of the page to
- select the branch with your changes:
-
- .. image:: branch_dropdown.png
-
-#. Click on the 'Pull request' button:
-
- .. image:: pull_button.png
-
- Enter a title for the set of changes, and some explanation of what you've
- done. Say if there is anything you'd like particular attention for - like a
- complicated change or some code you are not happy with.
-
- If you don't think your request is ready to be merged, just say so in your
- pull request message. This is still a good way of getting some preliminary
- code review.
-
-Some other things you might want to do
-======================================
-
-Delete a branch on github
--------------------------
-
-::
-
- git checkout master
- # delete branch locally
- git branch -D my-unwanted-branch
- # delete branch on github
- git push origin :my-unwanted-branch
-
-(Note the colon ``:`` before ``test-branch``. See also:
-http://github.com/guides/remove-a-remote-branch
-
-Several people sharing a single repository
-------------------------------------------
-
-If you want to work on some stuff with other people, where you are all
-committing into the same repository, or even the same branch, then just
-share it via github.
-
-First fork iris into your account, as from :ref:`forking`.
-
-Then, go to your forked repository github page, say
-``http://github.com/your-user-name/iris``
-
-Click on the 'Admin' button, and add anyone else to the repo as a
-collaborator:
-
- .. image:: pull_button.png
-
-Now all those people can do::
-
- git clone git@githhub.com:your-user-name/iris.git
-
-Remember that links starting with ``git@`` use the ssh protocol and are
-read-write; links starting with ``git://`` are read-only.
-
-Your collaborators can then commit directly into that repo with the
-usual::
-
- git commit -am 'ENH - much better code'
- git push origin master # pushes directly into your repo
-
-Explore your repository
------------------------
-
-To see a graphical representation of the repository branches and
-commits::
-
- gitk --all
-
-To see a linear list of commits for this branch::
-
- git log
-
-You can also look at the `network graph visualizer`_ for your github
-repo.
-
-Finally the :ref:`fancy-log` ``lg`` alias will give you a reasonable text-based
-graph of the repository.
-
-.. _rebase-on-trunk:
-
-Rebasing on trunk
------------------
-
-Let's say you thought of some work you'd like to do. You
-:ref:`update-mirror-trunk` and :ref:`make-feature-branch` called
-``cool-feature``. At this stage trunk is at some commit, let's call it E. Now
-you make some new commits on your ``cool-feature`` branch, let's call them A, B,
-C. Maybe your changes take a while, or you come back to them after a while. In
-the meantime, trunk has progressed from commit E to commit (say) G::
-
- A---B---C cool-feature
- /
- D---E---F---G trunk
-
-At this stage you consider merging trunk into your feature branch, and you
-remember that this here page sternly advises you not to do that, because the
-history will get messy. Most of the time you can just ask for a review, and not
-worry that trunk has got a little ahead. But sometimes, the changes in trunk
-might affect your changes, and you need to harmonize them. In this situation
-you may prefer to do a rebase.
-
-rebase takes your changes (A, B, C) and replays them as if they had been made to
-the current state of ``trunk``. In other words, in this case, it takes the
-changes represented by A, B, C and replays them on top of G. After the rebase,
-your history will look like this::
-
- A'--B'--C' cool-feature
- /
- D---E---F---G trunk
-
-See `rebase without tears`_ for more detail.
-
-To do a rebase on trunk::
-
- # Update the mirror of trunk
- git fetch upstream
- # go to the feature branch
- git checkout cool-feature
- # make a backup in case you mess up
- git branch tmp cool-feature
- # rebase cool-feature onto trunk
- git rebase --onto upstream/master upstream/master cool-feature
-
-In this situation, where you are already on branch ``cool-feature``, the last
-command can be written more succinctly as::
-
- git rebase upstream/master
-
-When all looks good you can delete your backup branch::
-
- git branch -D tmp
-
-If it doesn't look good you may need to have a look at
-:ref:`recovering-from-mess-up`.
-
-If you have made changes to files that have also changed in trunk, this may
-generate merge conflicts that you need to resolve - see the `git rebase`_ man
-page for some instructions at the end of the "Description" section. There is
-some related help on merging in the git user manual - see `resolving a merge`_.
-
-.. _recovering-from-mess-up:
-
-Recovering from mess-ups
-------------------------
-
-Sometimes, you mess up merges or rebases. Luckily, in git it is
-relatively straightforward to recover from such mistakes.
-
-If you mess up during a rebase::
-
- git rebase --abort
-
-If you notice you messed up after the rebase::
-
- # reset branch back to the saved point
- git reset --hard tmp
-
-If you forgot to make a backup branch::
-
- # look at the reflog of the branch
- git reflog show cool-feature
-
- 8630830 cool-feature@{0}: commit: BUG: io: close file handles immediately
- 278dd2a cool-feature@{1}: rebase finished: refs/heads/my-feature-branch onto 11ee694744f2552d
- 26aa21a cool-feature@{2}: commit: BUG: lib: make seek_gzip_factory not leak gzip obj
- ...
-
- # reset the branch to where it was before the botched rebase
- git reset --hard cool-feature@{2}
-
-.. _rewriting-commit-history:
-
-Rewriting commit history
-------------------------
-
-.. note::
-
- Do this only for your own feature branches.
-
-There's an embarrassing typo in a commit you made? Or perhaps the you
-made several false starts you would like the posterity not to see.
-
-This can be done via *interactive rebasing*.
-
-Suppose that the commit history looks like this::
-
- git log --oneline
- eadc391 Fix some remaining bugs
- a815645 Modify it so that it works
- 2dec1ac Fix a few bugs + disable
- 13d7934 First implementation
- 6ad92e5 * masked is now an instance of a new object, MaskedConstant
- 29001ed Add pre-nep for a copule of structured_array_extensions.
- ...
-
-and ``6ad92e5`` is the last commit in the ``cool-feature`` branch. Suppose we
-want to make the following changes:
-
-* Rewrite the commit message for ``13d7934`` to something more sensible.
-* Combine the commits ``2dec1ac``, ``a815645``, ``eadc391`` into a single one.
-
-We do as follows::
-
- # make a backup of the current state
- git branch tmp HEAD
- # interactive rebase
- git rebase -i 6ad92e5
-
-This will open an editor with the following text in it::
-
- pick 13d7934 First implementation
- pick 2dec1ac Fix a few bugs + disable
- pick a815645 Modify it so that it works
- pick eadc391 Fix some remaining bugs
-
- # Rebase 6ad92e5..eadc391 onto 6ad92e5
- #
- # Commands:
- # p, pick = use commit
- # r, reword = use commit, but edit the commit message
- # e, edit = use commit, but stop for amending
- # s, squash = use commit, but meld into previous commit
- # f, fixup = like "squash", but discard this commit's log message
- #
- # If you remove a line here THAT COMMIT WILL BE LOST.
- # However, if you remove everything, the rebase will be aborted.
- #
-
-To achieve what we want, we will make the following changes to it::
-
- r 13d7934 First implementation
- pick 2dec1ac Fix a few bugs + disable
- f a815645 Modify it so that it works
- f eadc391 Fix some remaining bugs
-
-This means that (i) we want to edit the commit message for
-``13d7934``, and (ii) collapse the last three commits into one. Now we
-save and quit the editor.
-
-Git will then immediately bring up an editor for editing the commit
-message. After revising it, we get the output::
-
- [detached HEAD 721fc64] FOO: First implementation
- 2 files changed, 199 insertions(+), 66 deletions(-)
- [detached HEAD 0f22701] Fix a few bugs + disable
- 1 files changed, 79 insertions(+), 61 deletions(-)
- Successfully rebased and updated refs/heads/my-feature-branch.
-
-and the history looks now like this::
-
- 0f22701 Fix a few bugs + disable
- 721fc64 ENH: Sophisticated feature
- 6ad92e5 * masked is now an instance of a new object, MaskedConstant
-
-If it went wrong, recovery is again possible as explained :ref:`above
-`.
-
-.. include:: links.inc
diff --git a/docs/iris/src/developers_guide/gitwash/forking_button.png b/docs/iris/src/developers_guide/gitwash/forking_button.png
deleted file mode 100644
index d0e04134d4..0000000000
Binary files a/docs/iris/src/developers_guide/gitwash/forking_button.png and /dev/null differ
diff --git a/docs/iris/src/developers_guide/gitwash/forking_hell.rst b/docs/iris/src/developers_guide/gitwash/forking_hell.rst
deleted file mode 100644
index 2b38c02736..0000000000
--- a/docs/iris/src/developers_guide/gitwash/forking_hell.rst
+++ /dev/null
@@ -1,33 +0,0 @@
-.. _forking:
-
-======================================================
-Making your own copy (fork) of iris
-======================================================
-
-You need to do this only once. The instructions here are very similar
-to the instructions at http://help.github.com/forking/ |emdash| please see
-that page for more detail. We're repeating some of it here just to give the
-specifics for the `iris`_ project, and to suggest some default names.
-
-Set up and configure a github account
-=====================================
-
-If you don't have a github account, go to the github page, and make one.
-
-You then need to configure your account to allow write access |emdash| see
-the ``Generating SSH keys`` help on `github help`_.
-
-Create your own forked copy of `iris`_
-======================================================
-
-#. Log into your github account.
-#. Go to the `iris`_ github home at `iris github`_.
-#. Click on the *fork* button:
-
- .. image:: forking_button.png
-
- Now, after a short pause and some 'Hardcore forking action', you
- should find yourself at the home page for your own forked copy of `iris`_.
-
-.. include:: links.inc
-
diff --git a/docs/iris/src/developers_guide/gitwash/git_development.rst b/docs/iris/src/developers_guide/gitwash/git_development.rst
deleted file mode 100644
index c5b910d863..0000000000
--- a/docs/iris/src/developers_guide/gitwash/git_development.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-.. _git-development:
-
-=====================
- Git for development
-=====================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- forking_hell
- set_up_fork
- configure_git
- development_workflow
- maintainer_workflow
diff --git a/docs/iris/src/developers_guide/gitwash/git_install.rst b/docs/iris/src/developers_guide/gitwash/git_install.rst
deleted file mode 100644
index 3be5149b90..0000000000
--- a/docs/iris/src/developers_guide/gitwash/git_install.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-.. _install-git:
-
-=============
- Install git
-=============
-
-Overview
-========
-
-================ =============
-Debian / Ubuntu ``sudo apt-get install git``
-Fedora ``sudo yum install git``
-Windows Download and install msysGit_
-OS X Use the git-osx-installer_
-================ =============
-
-In detail
-=========
-
-See the git page for the most recent information.
-
-Have a look at the github install help pages available from `github help`_
-
-There are good instructions here: http://book.git-scm.com/2_installing_git.html
-
-.. include:: links.inc
diff --git a/docs/iris/src/developers_guide/gitwash/git_intro.rst b/docs/iris/src/developers_guide/gitwash/git_intro.rst
deleted file mode 100644
index 486e1c6c08..0000000000
--- a/docs/iris/src/developers_guide/gitwash/git_intro.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-==============
- Introduction
-==============
-
-These pages describe a git_ and github_ workflow for the `iris`_
-project.
-
-There are several different workflows here, for different ways of
-working with *iris*.
-
-This is not a comprehensive git reference, it's just a workflow for our
-own project. It's tailored to the github hosting service. You may well
-find better or quicker ways of getting stuff done with git, but these
-should get you started.
-
-For general resources for learning git, see :ref:`git-resources`.
-
-.. include:: links.inc
diff --git a/docs/iris/src/developers_guide/gitwash/git_links.inc b/docs/iris/src/developers_guide/gitwash/git_links.inc
deleted file mode 100644
index 8e628ae19e..0000000000
--- a/docs/iris/src/developers_guide/gitwash/git_links.inc
+++ /dev/null
@@ -1,61 +0,0 @@
-.. This (-*- rst -*-) format file contains commonly used link targets
- and name substitutions. It may be included in many files,
- therefore it should only contain link targets and name
- substitutions. Try grepping for "^\.\. _" to find plausible
- candidates for this list.
-
-.. NOTE: reST targets are
- __not_case_sensitive__, so only one target definition is needed for
- nipy, NIPY, Nipy, etc...
-
-.. git stuff
-.. _git: http://git-scm.com/
-.. _github: http://github.com
-.. _github help: http://help.github.com
-.. _msysgit: http://code.google.com/p/msysgit/downloads/list
-.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list
-.. _subversion: http://subversion.tigris.org/
-.. _git cheat sheet: http://github.com/guides/git-cheat-sheet
-.. _pro git book: http://progit.org/
-.. _git svn crash course: http://git-scm.com/course/svn.html
-.. _learn.github: http://learn.github.com/
-.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer
-.. _git user manual: http://schacon.github.com/git/user-manual.html
-.. _git tutorial: http://schacon.github.com/git/gittutorial.html
-.. _git community book: http://book.git-scm.com/
-.. _git ready: http://www.gitready.com/
-.. _git casts: http://www.gitcasts.com/
-.. _Fernando's git page: http://www.fperez.org/py4science/git.html
-.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html
-.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/
-.. _git clone: http://schacon.github.com/git/git-clone.html
-.. _git checkout: http://schacon.github.com/git/git-checkout.html
-.. _git commit: http://schacon.github.com/git/git-commit.html
-.. _git push: http://schacon.github.com/git/git-push.html
-.. _git pull: http://schacon.github.com/git/git-pull.html
-.. _git add: http://schacon.github.com/git/git-add.html
-.. _git status: http://schacon.github.com/git/git-status.html
-.. _git diff: http://schacon.github.com/git/git-diff.html
-.. _git log: http://schacon.github.com/git/git-log.html
-.. _git branch: http://schacon.github.com/git/git-branch.html
-.. _git remote: http://schacon.github.com/git/git-remote.html
-.. _git rebase: http://schacon.github.com/git/git-rebase.html
-.. _git config: http://schacon.github.com/git/git-config.html
-.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
-.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
-.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git
-.. _git management: http://kerneltrap.org/Linux/Git_Management
-.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
-.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html
-.. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html
-.. _deleting master on github: http://matthew-brett.github.com/pydagogue/gh_delete_master.html
-.. _rebase without tears: http://matthew-brett.github.com/pydagogue/rebase_without_tears.html
-.. _resolving a merge: http://schacon.github.com/git/user-manual.html#resolving-a-merge
-.. _ipython git workflow: http://mail.scipy.org/pipermail/ipython-dev/2010-October/006746.html
-
-.. other stuff
-.. _python: http://www.python.org
-
-.. |emdash| unicode:: U+02014
-
-.. vim: ft=rst
diff --git a/docs/iris/src/developers_guide/gitwash/git_resources.rst b/docs/iris/src/developers_guide/gitwash/git_resources.rst
deleted file mode 100644
index d18b0ef48b..0000000000
--- a/docs/iris/src/developers_guide/gitwash/git_resources.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. _git-resources:
-
-=============
-git resources
-=============
-
-Tutorials and summaries
-=======================
-
-* `github help`_ has an excellent series of how-to guides.
-* `learn.github`_ has an excellent series of tutorials
-* The `pro git book`_ is a good in-depth book on git.
-* A `git cheat sheet`_ is a page giving summaries of common commands.
-* The `git user manual`_
-* The `git tutorial`_
-* The `git community book`_
-* `git ready`_ |emdash| a nice series of tutorials
-* `git casts`_ |emdash| video snippets giving git how-tos.
-* `git magic`_ |emdash| extended introduction with intermediate detail
-* The `git parable`_ is an easy read explaining the concepts behind git.
-* `git foundation`_ expands on the `git parable`_.
-* Fernando Perez' git page |emdash| `Fernando's git page`_ |emdash| many
- links and tips
-* A good but technical page on `git concepts`_
-* `git svn crash course`_: git for those of us used to subversion_
-
-Advanced git workflow
-=====================
-
-There are many ways of working with git; here are some posts on the
-rules of thumb that other projects have come up with:
-
-* Linus Torvalds on `git management`_
-* Linus Torvalds on `linux git workflow`_ . Summary; use the git tools
- to make the history of your edits as clean as possible; merge from
- upstream edits as little as possible in branches where you are doing
- active development.
-
-Manual pages online
-===================
-
-You can get these on your own machine with (e.g) ``git help push`` or
-(same thing) ``git push --help``, but, for convenience, here are the
-online manual pages for some common commands:
-
-* `git add`_
-* `git branch`_
-* `git checkout`_
-* `git clone`_
-* `git commit`_
-* `git config`_
-* `git diff`_
-* `git log`_
-* `git pull`_
-* `git push`_
-* `git remote`_
-* `git status`_
-
-.. include:: links.inc
diff --git a/docs/iris/src/developers_guide/gitwash/index.rst b/docs/iris/src/developers_guide/gitwash/index.rst
deleted file mode 100644
index 35eee1944a..0000000000
--- a/docs/iris/src/developers_guide/gitwash/index.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-.. _using-git:
-
-Working with *iris* source code
-================================================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- git_intro
- git_install
- git_development
- git_resources
-
-
diff --git a/docs/iris/src/developers_guide/gitwash/known_projects.inc b/docs/iris/src/developers_guide/gitwash/known_projects.inc
deleted file mode 100644
index 1761d975aa..0000000000
--- a/docs/iris/src/developers_guide/gitwash/known_projects.inc
+++ /dev/null
@@ -1,41 +0,0 @@
-.. Known projects
-
-.. PROJECTNAME placeholders
-.. _PROJECTNAME: http://nipy.org
-.. _`PROJECTNAME github`: https://github.com/nipy
-.. _`PROJECTNAME mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging
-
-.. numpy
-.. _numpy: http://www.numpy.org
-.. _`numpy github`: https://github.com/numpy/numpy
-.. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion
-
-.. scipy
-.. _scipy: https://www.scipy.org
-.. _`scipy github`: https://github.com/scipy/scipy
-.. _`scipy mailing list`: http://mail.scipy.org/mailman/listinfo/scipy-dev
-
-.. nipy
-.. _nipy: http://nipy.org/nipy
-.. _`nipy github`: https://github.com/nipy/nipy
-.. _`nipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging
-
-.. ipython
-.. _ipython: https://ipython.org
-.. _`ipython github`: https://github.com/ipython/ipython
-.. _`ipython mailing list`: http://mail.scipy.org/mailman/listinfo/IPython-dev
-
-.. dipy
-.. _dipy: http://nipy.org/dipy
-.. _`dipy github`: https://github.com/Garyfallidis/dipy
-.. _`dipy mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging
-
-.. nibabel
-.. _nibabel: http://nipy.org/nibabel
-.. _`nibabel github`: https://github.com/nipy/nibabel
-.. _`nibabel mailing list`: https://mail.python.org/mailman/listinfo/neuroimaging
-
-.. marsbar
-.. _marsbar: http://marsbar.sourceforge.net
-.. _`marsbar github`: https://github.com/matthew-brett/marsbar
-.. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users
diff --git a/docs/iris/src/developers_guide/gitwash/links.inc b/docs/iris/src/developers_guide/gitwash/links.inc
deleted file mode 100644
index 20f4dcfffd..0000000000
--- a/docs/iris/src/developers_guide/gitwash/links.inc
+++ /dev/null
@@ -1,4 +0,0 @@
-.. compiling links file
-.. include:: known_projects.inc
-.. include:: this_project.inc
-.. include:: git_links.inc
diff --git a/docs/iris/src/developers_guide/gitwash/maintainer_workflow.rst b/docs/iris/src/developers_guide/gitwash/maintainer_workflow.rst
deleted file mode 100644
index b05be47611..0000000000
--- a/docs/iris/src/developers_guide/gitwash/maintainer_workflow.rst
+++ /dev/null
@@ -1,96 +0,0 @@
-.. _maintainer-workflow:
-
-###################
-Maintainer workflow
-###################
-
-This page is for maintainers |emdash| those of us who merge our own or other
-peoples' changes into the upstream repository.
-
-Being as how you're a maintainer, you are completely on top of the basic stuff
-in :ref:`development-workflow`.
-
-The instructions in :ref:`linking-to-upstream` add a remote that has read-only
-access to the upstream repo. Being a maintainer, you've got read-write access.
-
-It's good to have your upstream remote have a scary name, to remind you that
-it's a read-write remote::
-
- git remote add upstream-rw git@github.com:SciTools/iris.git
- git fetch upstream-rw
-
-*******************
-Integrating changes
-*******************
-
-Let's say you have some changes that need to go into trunk
-(``upstream-rw/master``).
-
-The changes are in some branch that you are currently on. For example, you are
-looking at someone's changes like this::
-
- git remote add someone git://github.com/someone/iris.git
- git fetch someone
- git branch cool-feature --track someone/cool-feature
- git checkout cool-feature
-
-So now you are on the branch with the changes to be incorporated upstream. The
-rest of this section assumes you are on this branch.
-
-A few commits
-=============
-
-If there are only a few commits, consider rebasing to upstream::
-
- # Fetch upstream changes
- git fetch upstream-rw
- # rebase
- git rebase upstream-rw/master
-
-Remember that, if you do a rebase, and push that, you'll have to close any
-github pull requests manually, because github will not be able to detect the
-changes have already been merged.
-
-A long series of commits
-========================
-
-If there are a longer series of related commits, consider a merge instead::
-
- git fetch upstream-rw
- git merge --no-ff upstream-rw/master
-
-The merge will be detected by github, and should close any related pull requests
-automatically.
-
-Note the ``--no-ff`` above. This forces git to make a merge commit, rather than
-doing a fast-forward, so that these set of commits branch off trunk then rejoin
-the main history with a merge, rather than appearing to have been made directly
-on top of trunk.
-
-Check the history
-=================
-
-Now, in either case, you should check that the history is sensible and you have
-the right commits::
-
- git log --oneline --graph
- git log -p upstream-rw/master..
-
-The first line above just shows the history in a compact way, with a text
-representation of the history graph. The second line shows the log of commits
-excluding those that can be reached from trunk (``upstream-rw/master``), and
-including those that can be reached from current HEAD (implied with the ``..``
-at the end). So, it shows the commits unique to this branch compared to trunk.
-The ``-p`` option shows the diff for these commits in patch form.
-
-Push to trunk
-=============
-
-::
-
- git push upstream-rw my-new-feature:master
-
-This pushes the ``my-new-feature`` branch in this repository to the ``master``
-branch in the ``upstream-rw`` repository.
-
-.. include:: links.inc
diff --git a/docs/iris/src/developers_guide/gitwash/pull_button.png b/docs/iris/src/developers_guide/gitwash/pull_button.png
deleted file mode 100644
index e5031681b9..0000000000
Binary files a/docs/iris/src/developers_guide/gitwash/pull_button.png and /dev/null differ
diff --git a/docs/iris/src/developers_guide/gitwash/set_up_fork.rst b/docs/iris/src/developers_guide/gitwash/set_up_fork.rst
deleted file mode 100644
index 172cbb2051..0000000000
--- a/docs/iris/src/developers_guide/gitwash/set_up_fork.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-.. _set-up-fork:
-
-==================
- Set up your fork
-==================
-
-First you follow the instructions for :ref:`forking`.
-
-Overview
-========
-
-::
-
- git clone git@github.com:your-user-name/iris.git
- cd iris
- git remote add upstream git://github.com/SciTools/iris.git
-
-In detail
-=========
-
-Clone your fork
----------------
-
-#. Clone your fork to the local computer with ``git clone
- git@github.com:your-user-name/iris.git``
-#. Investigate. Change directory to your new repo: ``cd iris``. Then
- ``git branch -a`` to show you all branches. You'll get something
- like::
-
- * master
- remotes/origin/master
-
- This tells you that you are currently on the ``master`` branch, and
- that you also have a ``remote`` connection to ``origin/master``.
- What remote repository is ``remote/origin``? Try ``git remote -v`` to
- see the URLs for the remote. They will point to your github fork.
-
- Now you want to connect to the upstream `iris github`_ repository, so
- you can merge in changes from trunk.
-
-.. _linking-to-upstream:
-
-Linking your repository to the upstream repo
---------------------------------------------
-
-::
-
- cd iris
- git remote add upstream git://github.com/SciTools/iris.git
-
-``upstream`` here is just the arbitrary name we're using to refer to the
-main `iris`_ repository at `iris github`_.
-
-Note that we've used ``git://`` for the URL rather than ``git@``. The
-``git://`` URL is read only. This means we that we can't accidentally
-(or deliberately) write to the upstream repo, and we are only going to
-use it to merge into our own code.
-
-Just for your own satisfaction, show yourself that you now have a new
-'remote', with ``git remote -v show``, giving you something like::
-
- upstream git://github.com/SciTools/iris.git (fetch)
- upstream git://github.com/SciTools/iris.git (push)
- origin git@github.com:your-user-name/iris.git (fetch)
- origin git@github.com:your-user-name/iris.git (push)
-
-.. include:: links.inc
-
diff --git a/docs/iris/src/developers_guide/gitwash/this_project.inc b/docs/iris/src/developers_guide/gitwash/this_project.inc
deleted file mode 100644
index 38219bf4b4..0000000000
--- a/docs/iris/src/developers_guide/gitwash/this_project.inc
+++ /dev/null
@@ -1,5 +0,0 @@
-.. iris
-.. _`iris`: http://scitools.org.uk/iris
-.. _`iris github`: http://github.com/SciTools/iris
-
-.. _`iris mailing list`: https://groups.google.com/forum/#!forum/scitools-iris
diff --git a/docs/iris/src/developers_guide/gitwash_build.sh b/docs/iris/src/developers_guide/gitwash_build.sh
deleted file mode 100755
index e1c4cdb2af..0000000000
--- a/docs/iris/src/developers_guide/gitwash_build.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/env sh
-
-
-# Generate the gitwash sub-directory.
-echo
-echo "Building gitwash ..."
-echo
-python gitwash_dumper.py --repo-name=iris --github-user=SciTools --gitwash-url=https://github.com/matthew-brett/gitwash.git --project-url=http://scitools.org.uk/iris --project-ml-url=https://groups.google.com/forum/#!forum/scitools-iris ./ iris
diff --git a/docs/iris/src/developers_guide/gitwash_dumper.py b/docs/iris/src/developers_guide/gitwash_dumper.py
deleted file mode 100644
index 999a738fa6..0000000000
--- a/docs/iris/src/developers_guide/gitwash_dumper.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/env python
-''' Checkout gitwash repo into directory and do search replace on name '''
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import os
-from os.path import join as pjoin
-import shutil
-import sys
-import re
-import glob
-import fnmatch
-import tempfile
-from subprocess import call
-from optparse import OptionParser
-
-verbose = False
-
-
-def clone_repo(url, branch):
- cwd = os.getcwd()
- tmpdir = tempfile.mkdtemp()
- try:
- cmd = 'git clone %s %s' % (url, tmpdir)
- call(cmd, shell=True)
- os.chdir(tmpdir)
- cmd = 'git checkout %s' % branch
- call(cmd, shell=True)
- except:
- shutil.rmtree(tmpdir)
- raise
- finally:
- os.chdir(cwd)
- return tmpdir
-
-
-def cp_files(in_path, globs, out_path):
- try:
- os.makedirs(out_path)
- except OSError:
- pass
- out_fnames = []
- for in_glob in globs:
- in_glob_path = pjoin(in_path, in_glob)
- for in_fname in glob.glob(in_glob_path):
- out_fname = in_fname.replace(in_path, out_path)
- pth, _ = os.path.split(out_fname)
- if not os.path.isdir(pth):
- os.makedirs(pth)
- shutil.copyfile(in_fname, out_fname)
- out_fnames.append(out_fname)
- return out_fnames
-
-
-def filename_search_replace(sr_pairs, filename, backup=False):
- ''' Search and replace for expressions in files
-
- '''
- with open(filename, 'rt') as in_fh:
- in_txt = in_fh.read(-1)
- out_txt = in_txt[:]
- for in_exp, out_exp in sr_pairs:
- in_exp = re.compile(in_exp)
- out_txt = in_exp.sub(out_exp, out_txt)
- if in_txt == out_txt:
- return False
- with open(filename, 'wt') as out_fh:
- out_fh.write(out_txt)
- if backup:
- with open(filename + '.bak', 'wt') as bak_fh:
- bak_fh.write(in_txt)
- return True
-
-
-def copy_replace(replace_pairs,
- repo_path,
- out_path,
- cp_globs=('*',),
- rep_globs=('*',),
- renames = ()):
- out_fnames = cp_files(repo_path, cp_globs, out_path)
- renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames]
- fnames = []
- for rep_glob in rep_globs:
- fnames += fnmatch.filter(out_fnames, rep_glob)
- if verbose:
- print('\n'.join(fnames))
- for fname in fnames:
- filename_search_replace(replace_pairs, fname, False)
- for in_exp, out_exp in renames:
- new_fname, n = in_exp.subn(out_exp, fname)
- if n:
- os.rename(fname, new_fname)
- break
-
-
-def make_link_targets(proj_name,
- user_name,
- repo_name,
- known_link_fname,
- out_link_fname,
- url=None,
- ml_url=None):
- """ Check and make link targets
-
- If url is None or ml_url is None, check if there are links present for these
- in `known_link_fname`. If not, raise error. The check is:
-
- Look for a target `proj_name`.
- Look for a target `proj_name` + ' mailing list'
-
- Also, look for a target `proj_name` + 'github'. If this exists, don't write
- this target into the new file below.
-
- If we are writing any of the url, ml_url, or github address, then write new
- file with these links, of form:
-
- .. _`proj_name`
- .. _`proj_name`: url
- .. _`proj_name` mailing list: url
- """
- with open(known_link_fname, 'rt') as link_fh:
- link_contents = link_fh.readlines()
- have_url = not url is None
- have_ml_url = not ml_url is None
- have_gh_url = None
- for line in link_contents:
- if not have_url:
- match = re.match(r'..\s+_`%s`:\s+' % proj_name, line)
- if match:
- have_url = True
- if not have_ml_url:
- match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line)
- if match:
- have_ml_url = True
- if not have_gh_url:
- match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line)
- if match:
- have_gh_url = True
- if not have_url or not have_ml_url:
- raise RuntimeError('Need command line or known project '
- 'and / or mailing list URLs')
- lines = []
- if not url is None:
- lines.append('.. _`%s`: %s\n' % (proj_name, url))
- if not have_gh_url:
- gh_url = 'http://github.com/%s/%s\n' % (user_name, repo_name)
- lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url))
- if not ml_url is None:
- lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url))
- if len(lines) == 0:
- # Nothing to do
- return
- # A neat little header line
- lines = ['.. %s\n' % proj_name] + lines
- with open(out_link_fname, 'wt') as out_links:
- out_links.writelines(lines)
-
-
-USAGE = '''
-
-If not set with options, the repository name is the same as the
-
-If not set with options, the main github user is the same as the
-repository name.'''
-
-
-GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git'
-GITWASH_BRANCH = 'master'
-
-
-def main():
- parser = OptionParser()
- parser.set_usage(parser.get_usage().strip() + USAGE)
- parser.add_option("--repo-name", dest="repo_name",
- help="repository name - e.g. nitime",
- metavar="REPO_NAME")
- parser.add_option("--github-user", dest="main_gh_user",
- help="github username for main repo - e.g fperez",
- metavar="MAIN_GH_USER")
- parser.add_option("--gitwash-url", dest="gitwash_url",
- help="URL to gitwash repository - default %s"
- % GITWASH_CENTRAL,
- default=GITWASH_CENTRAL,
- metavar="GITWASH_URL")
- parser.add_option("--gitwash-branch", dest="gitwash_branch",
- help="branch in gitwash repository - default %s"
- % GITWASH_BRANCH,
- default=GITWASH_BRANCH,
- metavar="GITWASH_BRANCH")
- parser.add_option("--source-suffix", dest="source_suffix",
- help="suffix of ReST source files - default '.rst'",
- default='.rst',
- metavar="SOURCE_SUFFIX")
- parser.add_option("--project-url", dest="project_url",
- help="URL for project web pages",
- default=None,
- metavar="PROJECT_URL")
- parser.add_option("--project-ml-url", dest="project_ml_url",
- help="URL for project mailing list",
- default=None,
- metavar="PROJECT_ML_URL")
- (options, args) = parser.parse_args()
- if len(args) < 2:
- parser.print_help()
- sys.exit()
- out_path, project_name = args
- if options.repo_name is None:
- options.repo_name = project_name
- if options.main_gh_user is None:
- options.main_gh_user = options.repo_name
- repo_path = clone_repo(options.gitwash_url, options.gitwash_branch)
- try:
- copy_replace((('PROJECTNAME', project_name),
- ('REPONAME', options.repo_name),
- ('MAIN_GH_USER', options.main_gh_user)),
- repo_path,
- out_path,
- cp_globs=(pjoin('gitwash', '*'),),
- rep_globs=('*.rst',),
- renames=(('\.rst$', options.source_suffix),))
- make_link_targets(project_name,
- options.main_gh_user,
- options.repo_name,
- pjoin(out_path, 'gitwash', 'known_projects.inc'),
- pjoin(out_path, 'gitwash', 'this_project.inc'),
- options.project_url,
- options.project_ml_url)
- finally:
- shutil.rmtree(repo_path)
-
-
-if __name__ == '__main__':
- main()
diff --git a/docs/iris/src/developers_guide/gitwash_get.sh b/docs/iris/src/developers_guide/gitwash_get.sh
deleted file mode 100755
index c61b406603..0000000000
--- a/docs/iris/src/developers_guide/gitwash_get.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env sh
-
-
-# Get the latest gitwash_dumper.py from GitHub.
-echo "Downloading latest gitwash_dumper.py from GitHub ..."
-echo
-curl -O https://raw.github.com/matthew-brett/gitwash/master/gitwash_dumper.py
diff --git a/docs/iris/src/developers_guide/graphics_tests.rst b/docs/iris/src/developers_guide/graphics_tests.rst
deleted file mode 100644
index 684ccfa4ab..0000000000
--- a/docs/iris/src/developers_guide/graphics_tests.rst
+++ /dev/null
@@ -1,117 +0,0 @@
-.. _developer_graphics_tests:
-
-Graphics tests
-**************
-
-The only practical way of testing plotting functionality is to check actual
-output plots.
-For this, a basic 'graphics test' assertion operation is provided in the method
-:meth:`iris.tests.IrisTest.check_graphic` : This tests plotted output for a
-match against a stored reference.
-A "graphics test" is any test which employs this.
-
-At present (Iris version 1.10), such tests include the testing for modules
-`iris.tests.test_plot` and `iris.tests.test_quickplot`, and also some other
-'legacy' style tests (as described in :ref:`developer_tests`).
-It is conceivable that new 'graphics tests' of this sort can still be added.
-However, as graphics tests are inherently "integration" style rather than true
-unit tests, results can differ with the installed versions of dependent
-libraries (see below), so this is not recommended except where no alternative
-is practical.
-
-Testing actual plot results introduces some significant difficulties :
- * Graphics tests are inherently 'integration' style tests, so results will
- often vary with the versions of key dependencies, i.e. the exact versions of
- third-party modules which are installed : Obviously, results will depend on
- the matplotlib version, but they can also depend on numpy and other
- installed packages.
- * Although it seems possible in principle to accommodate 'small' result changes
- by distinguishing plots which are 'nearly the same' from those which are
- 'significantly different', in practice no *automatic* scheme for this can be
- perfect : That is, any calculated tolerance in output matching will allow
- some changes which a human would judge as a significant error.
- * Storing a variety of alternative 'acceptable' results as reference images
- can easily lead to uncontrolled increases in the size of the repository,
- given multiple independent sources of variation.
-
-
-Graphics Testing Strategy
-=========================
-
-Prior to Iris 1.10, all graphics tests compared against a stored reference
-image with a small tolerance on pixel values.
-
-From Iris v1.11 onward, we want to support testing Iris against multiple
-versions of matplotlib (and some other dependencies).
-To make this manageable, we have now rewritten "check_graphic" to allow
-multiple alternative 'correct' results without including many more images in
-the Iris repository.
-This consists of :
-
- * using a perceptual 'image hash' of the outputs (see
- https://github.com/JohannesBuchner/imagehash) as the basis for checking
- test results.
- * storing the hashes of 'known accepted results' for each test in a
- database in the repo (which is actually stored in
- ``lib/iris/tests/results/imagerepo.json``).
- * storing associated reference images for each hash value in a separate public
- repository, currently in https://github.com/SciTools/test-images-scitools ,
- allowing human-eye judgement of 'valid equivalent' results.
- * a new version of the 'iris/tests/idiff.py' assists in comparing proposed
- new 'correct' result images with the existing accepted ones.
-
-BRIEF...
-There should be sufficient work-flow detail here to allow an iris developer to:
-
- * understand the new check graphic test process
- * understand the steps to take and tools to use to add a new graphic test
- * understand the steps to take and tools to use to diagnose and fix an graphic test failure
-
-
-Basic workflow
-==============
-
-If you notice that a graphics test in the Iris testing suite has failed
-following changes in Iris or any of its dependencies, this is the process
-you now need to follow:
-
-#. Create a directory in iris/lib/iris/tests called 'result_image_comparison'.
-#. From your Iris root directory, run the tests by using the command:
- ``python setup.py test``.
-#. Navigate to iris/lib/iris/tests and run the command: ``python idiff.py``.
- This will open a window for you to visually inspect the changes to the
- graphic and then either accept or reject the new result.
-#. Upon acceptance of a change or a new image, a copy of the output PNG file
- is added to the reference image repository in
- https://github.com/SciTools/test-images-scitools. The file is named
- according to the image hash value, as ``.png``.
-#. The hash value of the new result is added into the relevant set of 'valid
- result hashes' in the image result database file,
- ``tests/results/imagerepo.json``.
-#. The tests must now be re-run, and the 'new' result should be accepted.
- Occasionally there are several graphics checks in a single test, only the
- first of which will be run should it fail. If this is the case, then you
- may well encounter further graphical test failures in your next runs, and
- you must repeat the process until all the graphical tests pass.
-#. To add your changes to Iris, you need to make two pull requests. The first
- should be made to the test-images-scitools repository, and this should
- contain all the newly-generated png files copied into the folder named
- 'image_files'.
-#. The second pull request should be created in the Iris repository, and should
- only include the change to the image results database
- (``tests/results/imagerepo.json``) :
- This pull request must contain a reference to the matching one in
- test-images-scitools.
-
-Note: the Iris pull-request will not test out successfully in Travis until the
-test-images-scitools pull request has been merged : This is because there is
-an Iris test which ensures the existence of the reference images (uris) for all
-the targets in the image results database.
-
-
-Fixing a failing graphics test
-==============================
-
-
-Adding a new graphics test
-==========================
diff --git a/docs/iris/src/developers_guide/index.rst b/docs/iris/src/developers_guide/index.rst
deleted file mode 100644
index a1ecd0756f..0000000000
--- a/docs/iris/src/developers_guide/index.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-..
- ##########################################################################
- (C) British Crown Copyright 2010 - 2012, Met Office
-
- This file is part of Iris.
-
- Iris is free software: you can redistribute it and/or modify it under
- the terms of the GNU Lesser General Public License as published by the
- Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Iris is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public License
- along with Iris. If not, see .
-
- ##########################################################################
-
-
-.. _userguide-index:
-
-.. This is the source doc for the user guide
-
-#####################
- Iris developer guide
-#####################
-
-
-.. toctree::
- :maxdepth: 3
-
- documenting/index.rst
- gitwash/index.rst
- pulls.rst
- tests.rst
- deprecations.rst
- release.rst
diff --git a/docs/iris/src/developers_guide/pulls.rst b/docs/iris/src/developers_guide/pulls.rst
deleted file mode 100644
index 6546a15642..0000000000
--- a/docs/iris/src/developers_guide/pulls.rst
+++ /dev/null
@@ -1,116 +0,0 @@
-.. _pr_check:
-
-Pull Request Check List
-***********************
-
-A pull request to a SciTools project master should be ready to merge into the
-master branch.
-
-All pull request will be reviewed by a core developer who will manage the
-process of merging. It is the responsibility of a developer submitting a
-pull request to do their best to deliver a pull request which meets the
-requirements of the project it is submitted to.
-
-The check list summarises criteria which will be checked before a pull request
-is merged. Before submitting a pull request please consider this list.
-
-
-The Iris Check List
-====================
-
-* Have you provided a helpful description of the Pull Request?
- I.E. what has changed and why. This should include:
-
- * the aim of the change ; the problem addressed ; a link to the issue.
- * how the change has been delivered.
- * a "What's New" entry, submitted as a new file added in the pull request.
- See `Contributing a "What's New" entry`_.
-
-* Do all the tests pass locally?
-
- * The Iris tests may be run with ``python setup.py test`` which has a command
- line utility included.
-
-* Have new tests been provided for all additional functionality?
-
-* Do all modified and new sourcefiles pass PEP8?
-
- * PEP8_ is the Python source code style guide.
- * There is a python module for checking pep8 compliance: python-pep8_
- * a standard Iris test checks that all sourcefiles meet PEP8 compliance
- (see "iris.tests.test_coding_standards.TestCodeFormat").
-
-* Do all modified and new sourcefiles have a correct, up-to-date copyright
- header?
-
- * a standard Iris test checks that all sourcefiles include a copyright
- message, including the correct year of the latest change
- (see "iris.tests.test_coding_standards.TestLicenseHeaders").
-
-* Has the documentation been updated to explain all new or changed features?
-
- * refer to the developer guide on docstrings_
-
-* Have code examples been provided inside docstrings, where relevant?
-
- * these are strongly recommended as concrete (working) examples always
- considerably enhance the documentation.
-
- * live test code can be included in docstrings.
-
- * See for example :data:`iris.cube.Cube.data`
- * Details at http://www.sphinx-doc.org/en/stable/ext/doctest.html
-
- * The documentation tests may be run with ``make doctest``, from within the
- ``./docs/iris`` subdirectory.
-
-* Have you provided a 'whats new' contribution?
-
- * this should be done for all changes that affect API or behaviour.
- See :ref:`whats_new_contributions`
-
-* Does the documentation build without errors?
-
- * The documentation is built using ``make html`` in ``./docs/iris``.
-
-* Do the documentation and code-example tests pass?
-
- * Run with ``make doctest`` and ``make extest``, from within the subdirectory
- ``./docs/iris``.
- * note that code examples must *not* raise deprecations. This is now checked
- and will result in an error.
- When an existing code example encounters a deprecation, it must be fixed.
-
-* Has the travis file been updated to reflect any dependency updates?
-
- * ``./.travis.yml`` is used to manage the continuous integration testing.
- * the files ``./conda-requirements.yml`` and
- ``./minimal-conda-requirements.yml`` are used to define the software
- environments used, using the conda_ package manager.
-
-* Have you provided updates to supporting projects for test or example data?
-
- * the following separate repos are used to manage larger files used by tests
- and code examples :
-
- * iris-test-data_ is a github project containing all the data to support the
- tests.
- * iris-sample-data_ is a github project containing all the data to support
- the gallery and examples.
- * test-images-scitools_ is a github project containing reference plot images
- to support iris graphics tests : see :ref:`developer_graphics_tests`.
-
- * If new files are required by tests or code examples, they must be added to
- the appropriate supporting project via a suitable pull-request.
- This new 'supporting pull request' should be referenced in the main Iris
- pull request, and must be accepted and merged before the Iris one can be.
-
-
-.. _PEP8: http://www.python.org/dev/peps/pep-0008/
-.. _python-pep8: https://pypi.python.org/pypi/pep8
-.. _conda: http://conda.readthedocs.io/en/latest/
-.. _iris-test-data: https://github.com/SciTools/iris-test-data
-.. _iris-sample-data: https://github.com/SciTools/iris-sample-data
-.. _test-images-scitools: https://github.com/SciTools/test-images-scitools
-.. _docstrings: http://scitools.org.uk/iris/docs/latest/developers_guide/documenting/docstrings.html
-.. _Contributing a "What's New" entry: http://scitools.org.uk/iris/docs/latest/developers_guide/documenting/whats_new_contributions.html
diff --git a/docs/iris/src/developers_guide/release.rst b/docs/iris/src/developers_guide/release.rst
deleted file mode 100644
index 437478a6a0..0000000000
--- a/docs/iris/src/developers_guide/release.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-.. _iris_development_releases:
-
-Releases
-********
-
-A release of Iris is a tag on the SciTools/Iris Github repository.
-
-Release Branch
-==============
-
-Once the features intended for the release are on master, a release branch should be created, in the SciTools/Iris repository. This will have the name:
-
- :literal:`{major release number}.{minor release number}.x`
-
-for example:
-
- :literal:`v1.9.x`
-
-This branch shall be used to finalise the release details in preparation for the release candidate.
-
-Release Candidate
-=================
-
-Prior to a release, a release candidate tag may be created, marked as a pre-release in github, with a tag ending with :literal:`rc` followed by a number, e.g.:
-
- :literal:`v1.9.0rc1`
-
-If created, the pre-release shall be available for at least one week prior to the release being cut. User groups should be notified of the existence of the pre-release and encouraged to test the functionality.
-
-A pre-release is expected for a minor release, but not normally provided for a point release.
-
-If new features are required for a release after a release candidate has been cut, a new pre-release shall be issued first.
-
-Documentation
-=============
-
-The documentation should include all of the what's new snippets, which must be compiled into a what's new. This content should be reviewed and adapted as required and the snippets removed from the branch to produce a coherent what's new page.
-
-Upon release, the documentation shall be added to the SciTools scitools.org.uk github project's gh-pages branch as the latest documentation.
-
-Testing the Conda Recipe
-========================
-
-Before a release is cut, the SciTools conda-recipes-scitools recipe for Iris shall be tested to build the release branch of Iris; this test recipe shall not be merged onto conda-recipes-scitools.
-
-The Release
-===========
-
-The final steps are to change the version string in the source of :literal:`Iris.__init__.py` and include the release date in the relevant what's new page within the documentation.
-
-Once all checks are complete, the release is cut by the creation of a new tag in the SciTools Iris repository.
-
-Conda Recipe
-============
-
-Once a release is cut, the SciTools conda-recipes-scitools recipe for Iris shall be updated to build the latest release of Iris and push this artefact to anaconda.org. The build and push is all automated as part of the merge process.
-
-Merge Back
-==========
-
-After the release is cut, the changes shall be merged back onto the scitools master.
-
-To achieve this, first cut a local branch from the release branch, :literal:`{release}.x`. Next add a commit changing the release string to match the release string on scitools/master.
-This branch can now be proposed as a pull request to master. This work flow ensures that the commit identifiers are consistent between the :literal:`.x` branch and :literal:`master`.
-
-Point Releases
-==============
-
-Bug fixes may be implemented and targeted as the :literal:`.x` branch. These should lead to a new point release, another tag.
-For example, a fix for a problem with 1.9.0 will be merged into 1.9.x, and then released by tagging 1.9.1.
-
-New features shall not be included in a point release, these are for bug fixes.
-
-A point release does not require a release candidate, but the rest of the release process is to be followed, including the merge back of changes into :literal:`master`.
-
diff --git a/docs/iris/src/developers_guide/tests.rst b/docs/iris/src/developers_guide/tests.rst
deleted file mode 100644
index 929073b569..0000000000
--- a/docs/iris/src/developers_guide/tests.rst
+++ /dev/null
@@ -1,154 +0,0 @@
-.. _developer_tests:
-
-Testing
-*******
-
-The Iris tests may be run with ``python setup.py test`` which has a
-command line utility included.
-
-There are three categories of tests within Iris:
- - Unit tests
- - Integration tests
- - Legacy tests
-
-Ideally, all code changes should be accompanied by one or more unit
-tests, and by zero or more integration tests. And where possible, new
-tests should not be added to the legacy tests.
-
-But if in any doubt about what tests to add or how to write them please
-feel free to submit a pull-request in any state and ask for assistance.
-
-
-Unit tests
-==========
-
-Code changes should be accompanied by enough unit tests to give a
-high degree of confidence that the change works as expected. In
-addition, the unit tests can help describe the intent behind a change.
-
-The docstring for each test module must state the unit under test.
-For example:
-
- :literal:`"""Unit tests for the \`iris.experimental.raster.export_geotiff\` function."""`
-
-All unit tests must be placed and named according to the following
-structure:
-
-Classes
--------
-When testing a class all the tests must reside in the module:
-
- :literal:`lib/iris/tests/unit//test_.py`
-
-Within this test module each tested method must have one or more
-corresponding test classes:
-- Either: `Test_name_of_public_method`
-- Or: `Test_name_of_public_method__aspect_of_method`
-
-And within those test classes, the test methods must be named according
-to the aspect of the tested method which they address.
-
-**Examples**:
-
-All unit tests for :py:class:`iris.cube.Cube` must reside in:
-
- :literal:`lib/iris/tests/unit/cube/test_Cube.py`
-
-Within that file the tests might look something like:
-
-.. code-block:: python
-
- # Tests for the Cube.xml() method.
- class Test_xml(tests.IrisTest):
- def test_some_general_stuff(self):
- ...
-
- # Tests for the Cube.xml() method, focussing on the behaviour of
- # the checksums.
- class Test_xml__checksum(tests.IrisTest):
- def test_checksum_ignores_masked_values(self):
- ...
-
- # Tests for the Cube.add_dim_coord() method.
- class Test_add_dim_coord(tests.IrisTest):
- def test_normal_usage(self):
- ...
-
- def test_coord_already_present(self):
- ...
-
-
-Functions
----------
-When testing a function all the tests must reside in the module:
-
- :literal:`lib/iris/tests/unit//test_.py`
-
-Within this test module there must be one or more test classes:
-- Either: `Test`
-- Or: `TestAspectOfFunction`
-
-And within those test classes, the test methods must be named according
-to the aspect of the tested function which they address.
-
-**Examples**:
-
-All unit tests for :py:func:`iris.experimental.raster.export_geotiff`
-must reside in:
-
- :literal:`lib/iris/tests/unit/experimental/raster/test_export_geotiff.py`
-
-Within that file the tests might look something like:
-
-.. code-block:: python
-
- # Tests focussing on the handling of different data types.
- class TestDtypeAndValues(tests.IrisTest):
- def test_int16(self):
- ...
-
- def test_int16_big_endian(self):
- ...
-
- # Tests focussing on the handling of different projections.
- class TestProjection(tests.IrisTest):
- def test_no_ellipsoid(self):
- ...
-
-
-Integration tests
-=================
-
-Some code changes may require tests which exercise several units in
-order to demonstrate an important consequence of their interaction which
-may not be apparent when considering the units in isolation.
-
-These tests must be placed in the `lib/iris/tests/integration` folder.
-Unlike unit tests, there is no fixed naming scheme for integration
-tests. But folders and files must be created as required to help
-developers locate relevant tests. It is recommended they are named
-according to the capabilities under test, e.g.
-`metadata/test_pp_preservation.py`, and not named according to the
-module(s) under test.
-
-
-Graphics tests
-=================
-Certain Iris tests are based on checking plotted images.
-This the only way of testing the modules :mod:`iris.plot` and
-:mod:`iris.quickplot`, but is also used for some other legacy and integration-
-style testcases.
-
-Prior to Iris version 1.10, a single reference image for each testcase was
-stored in the main Iris repository, and a 'tolerant' comparison was performed
-against this.
-
-From version 1.11 onwards, graphics testcase outputs are compared against
-possibly *multiple* known-good images, of which only the signature is stored.
-This uses a sophisticated perceptual "image hashing" scheme (see:
-).
-Only imagehash signatures are stored in the Iris repo itself, thus freeing up
-valuable space. Meanwhile, the actual reference *images* -- which are required
-for human-eyes evaluation of proposed new "good results" -- are all stored
-elsewhere in a separate public repository.
-See :ref:`developer_graphics_tests`.
diff --git a/docs/iris/src/installing.rst b/docs/iris/src/installing.rst
deleted file mode 100644
index 6b6999ab82..0000000000
--- a/docs/iris/src/installing.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _installing_iris:
-
-****************
-Installing Iris
-****************
-
-.. include:: ../../../INSTALL
diff --git a/docs/iris/src/sphinxext/auto_label_figures.py b/docs/iris/src/sphinxext/auto_label_figures.py
deleted file mode 100644
index 00f3aa96dc..0000000000
--- a/docs/iris/src/sphinxext/auto_label_figures.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import os
-from docutils import nodes
-
-
-def auto_label_figures(app, doctree):
- """
- Add a label on every figure.
- """
-
- for fig in doctree.traverse(condition=nodes.figure):
- for img in fig.traverse(condition=nodes.image):
- fname, ext = os.path.splitext(img['uri'])
- if ext == '.png':
- fname = os.path.basename(fname).replace('_', '-')
- fig['ids'].append(fname)
-
-
-def setup(app):
- app.connect('doctree-read', auto_label_figures)
diff --git a/docs/iris/src/sphinxext/custom_class_autodoc.py b/docs/iris/src/sphinxext/custom_class_autodoc.py
deleted file mode 100644
index 25c095cb84..0000000000
--- a/docs/iris/src/sphinxext/custom_class_autodoc.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# (C) British Crown Copyright 2010 - 2019, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-from sphinx.ext import autodoc
-from sphinx.ext.autodoc import *
-from sphinx.util import force_decode
-from sphinx.util.docstrings import prepare_docstring
-
-import inspect
-
-
-class ClassWithConstructorDocumenter(autodoc.ClassDocumenter):
- priority = 1000000
-
- def get_object_members(self, want_all):
- return autodoc.ClassDocumenter.get_object_members(self, want_all)
-
- @staticmethod
- def can_document_member(member, mname, isattr, self):
- return autodoc.ClassDocumenter.can_document_member(member, mname,
- isattr, self)
-
- def get_doc(self, encoding=None):
- content = self.env.config.autoclass_content
-
- docstrings = []
- docstring = self.get_attr(self.object, '__doc__', None)
- if docstring:
- docstrings.append(docstring)
-
- # for classes, what the "docstring" is can be controlled via a
- # config value; the default is only the class docstring
- if content in ('both', 'init'):
- constructor = self.get_constructor()
- if constructor:
- initdocstring = self.get_attr(constructor, '__doc__', None)
- else:
- initdocstring = None
- if initdocstring:
- if content == 'init':
- docstrings = [initdocstring]
- else:
- docstrings.append(initdocstring)
-
- return [prepare_docstring(force_decode(docstring, encoding))
- for docstring in docstrings]
-
- def get_constructor(self):
- # for classes, the relevant signature is the __init__ method's
- initmeth = self.get_attr(self.object, '__new__', None)
-
- if initmeth is None or initmeth is object.__new__ or not \
- (inspect.ismethod(initmeth) or inspect.isfunction(initmeth)):
- initmeth = None
-
- if initmeth is None:
- initmeth = self.get_attr(self.object, '__init__', None)
-
- if initmeth is None or initmeth is object.__init__ or \
- initmeth is object.__new__ or not \
- (inspect.ismethod(initmeth) or inspect.isfunction(initmeth)):
- initmeth = None
-
- return initmeth
-
- def format_args(self):
- initmeth = self.get_constructor()
- try:
- argspec = inspect.getargspec(initmeth)
- except TypeError:
- # still not possible: happens e.g. for old-style classes
- # with __init__ in C
- return None
- if argspec[0] and argspec[0][0] in ('cls', 'self'):
- del argspec[0][0]
- return inspect.formatargspec(*argspec)
-
-
-def setup(app):
- app.add_autodocumenter(ClassWithConstructorDocumenter)
diff --git a/docs/iris/src/sphinxext/custom_data_autodoc.py b/docs/iris/src/sphinxext/custom_data_autodoc.py
deleted file mode 100644
index 29a4fda94c..0000000000
--- a/docs/iris/src/sphinxext/custom_data_autodoc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-from sphinx.ext.autodoc import DataDocumenter, ModuleLevelDocumenter
-try:
- # Use 'object_description' in place of the former 'safe_repr' function.
- from sphinx.util.inspect import object_description as safe_repr
-except ImportError:
- # 'safe_repr' is the old usage, for Sphinx<1.3.
- from sphinx.util.inspect import safe_repr
-
-from iris.analysis import Aggregator
-
-
-class IrisDataDocumenter(DataDocumenter):
- priority = 100
-
- def add_directive_header(self, sig):
- ModuleLevelDocumenter.add_directive_header(self, sig)
- if not self.options.annotation:
- try:
- objrepr = safe_repr(self.object)
- except ValueError:
- pass
- else:
- self.add_line(u' :annotation:', '')
- elif self.options.annotation is object():
- pass
- else:
- self.add_line(
- u' :annotation: {}'.format(self.options.annotation),
- '')
-
-
-def handler(app, what, name, obj, options, signature, return_annotation):
- if what == 'data':
- if isinstance(obj, object) and issubclass(obj.__class__, Aggregator):
- signature = '()'
- return_annotation = '{} instance.'.format(obj.__class__.__name__)
- return signature, return_annotation
-
-
-def setup(app):
- app.add_autodocumenter(IrisDataDocumenter)
- app.connect('autodoc-process-signature', handler)
diff --git a/docs/iris/src/sphinxext/gen_example_directory.py b/docs/iris/src/sphinxext/gen_example_directory.py
deleted file mode 100644
index 60863010c5..0000000000
--- a/docs/iris/src/sphinxext/gen_example_directory.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-
-'''
-Generate the rst files for the examples
-'''
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import os
-import re
-import shutil
-import sys
-
-
-def out_of_date(original, derived):
- '''
- Returns True if derivative is out-of-date wrt original,
- both of which are full file paths.
-
- TODO: this check isn't adequate in some cases, e.g., if we discover
- a bug when building the examples, the original and derived will be
- unchanged but we still want to force a rebuild.
- '''
- return (not os.path.exists(derived) or
- os.stat(derived).st_mtime < os.stat(original).st_mtime)
-
-
-docstring_regex = re.compile(r'[\'\"]{3}(.*?)[\'\"]{3}', re.DOTALL)
-
-
-noplot_regex = re.compile(r'#\s*-\*-\s*noplot\s*-\*-')
-
-
-def generate_example_rst(app):
- # Example code can be found at the same level as the documentation
- # src folder.
- rootdir = os.path.join(os.path.dirname(app.builder.srcdir), 'example_code')
-
- # Examples are built as a subfolder of the src folder.
- exampledir = os.path.join(app.builder.srcdir, 'examples')
-
- if not os.path.exists(exampledir):
- os.makedirs(exampledir)
-
- datad = {}
- for root, subFolders, files in os.walk(rootdir):
- for fname in files:
- if (fname.startswith('.') or fname.startswith('#') or
- fname.startswith('_') or fname.find('.svn') >= 0 or
- not fname.endswith('.py')):
- continue
-
- fullpath = os.path.join(root, fname)
- with open(fullpath) as fh:
- contents = fh.read()
- # indent
- relpath = os.path.split(root)[-1]
- datad.setdefault(relpath, []).append((fullpath, fname, contents))
-
- subdirs = sorted(datad.keys())
-
- index = []
- index.append('''\
-Iris examples
-=============
-
-.. toctree::
- :maxdepth: 2
-
-''')
-
- for subdir in subdirs:
- rstdir = os.path.join(exampledir, subdir)
- if not os.path.exists(rstdir):
- os.makedirs(rstdir)
-
- outputdir = os.path.join(app.builder.outdir, 'examples')
- if not os.path.exists(outputdir):
- os.makedirs(outputdir)
-
- outputdir = os.path.join(outputdir, subdir)
- if not os.path.exists(outputdir):
- os.makedirs(outputdir)
-
- index.append(' {}/index.rst\n'.format(subdir))
- subdir_root_path = os.path.join(rootdir, subdir)
- subdirIndex = []
-
- # Use the __init__.py file's docstring for the subdir example page (if
- # __init__ exists).
- if os.path.exists(os.path.join(subdir_root_path, '__init__.py')):
- import imp
- mod = imp.load_source(
- subdir,
- os.path.join(subdir_root_path, '__init__.py'))
- subdirIndex.append(mod.__doc__)
- else:
- line = 'Examples in {}\n'.format(subdir)
- subdirIndex.extend([line, '=' * len(line)])
-
- # Append the code to produce the toctree.
- subdirIndex.append('''
-.. toctree::
- :maxdepth: 1
-
-''')
-
- sys.stdout.write(subdir + ', ')
- sys.stdout.flush()
-
- data = sorted(datad[subdir])
-
- for fullpath, fname, contents in data:
- basename, ext = os.path.splitext(fname)
- outputfile = os.path.join(outputdir, fname)
-
- rstfile = '{}.rst'.format(basename)
- outrstfile = os.path.join(rstdir, rstfile)
-
- subdirIndex.append(' {}\n'.format(rstfile))
-
- if not out_of_date(fullpath, outrstfile):
- continue
-
- out = []
- out.append('.. _{}-{}:\n\n'.format(subdir, basename))
-
- # Copy the example code to be in the src examples directory. This
- # means we can define a simple relative path in the plot directive,
- # which can also copy the file into the resulting build directory.
- shutil.copy(fullpath, rstdir)
-
- docstring_results = docstring_regex.search(contents)
- if docstring_results is not None:
- out.append(docstring_results.group(1))
- else:
- title = '{} example code: {}'.format(subdir, fname)
- out.append(title + '\n')
- out.append('=' * len(title) + '\n\n')
-
- if not noplot_regex.search(contents):
- rel_example = os.path.relpath(outputfile, app.builder.outdir)
- out.append('\n\n.. plot:: {}\n'.format(rel_example))
- out.append(' :include-source:\n\n')
- else:
- out.append('[`source code <{}>`_]\n\n'.format(fname))
- out.append('.. literalinclude:: {}\n\n'.format(fname))
- # Write the .py file contents (we didn't need to do this for
- # plots as the plot directive does this for us.)
- with open(outputfile, 'w') as fhstatic:
- fhstatic.write(contents)
-
- with open(outrstfile, 'w') as fh:
- fh.writelines(out)
-
- subdirIndexFile = os.path.join(rstdir, 'index.rst')
- with open(subdirIndexFile, 'w') as fhsubdirIndex:
- fhsubdirIndex.writelines(subdirIndex)
-
- with open(os.path.join(exampledir, 'index.rst'), 'w') as fhindex:
- fhindex.writelines(index)
-
-
-def setup(app):
- app.connect('builder-inited', generate_example_rst)
diff --git a/docs/iris/src/sphinxext/gen_gallery.py b/docs/iris/src/sphinxext/gen_gallery.py
deleted file mode 100644
index 1dabf919f7..0000000000
--- a/docs/iris/src/sphinxext/gen_gallery.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#
-# (C) Copyright 2012 MATPLOTLIB (vn 1.2.0)
-#
-
-'''
-Generate a thumbnail gallery of examples.
-'''
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import os
-import glob
-import re
-import warnings
-
-import matplotlib.image as image
-from sphinx.util import status_iterator
-
-from sphinx.util import status_iterator
-
-template = '''\
-{{% extends "layout.html" %}}
-{{% set title = "Thumbnail gallery" %}}
-
-
-{{% block body %}}
-
-
Click on any image to see full size image and source code
')
-
- random_image_content_template = '''
-// This file was automatically generated by gen_gallery.py & should not be
-// modified directly.
-
-images = new Array();
-
-{}
-
-'''
-
- random_image_template = "['{thumbfile}', '{full_image}', '{link}'];"
- random_image_join = 'images[{}] = {}'
-
- dirs = ('General', 'Meteorology', 'Oceanography')
-
- for subdir in dirs:
- rows.append(header_template.format(subdir, subdir, subdir))
- toc_rows.append(toc_template.format(subdir, subdir))
-
- origdir = os.path.join(os.path.dirname(outdir), rootdir, subdir)
- if not os.path.exists(origdir):
- origdir = os.path.join(os.path.dirname(outdir), 'plot_directive',
- rootdir, subdir)
- thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails')
- if not os.path.exists(thumbdir):
- os.makedirs(thumbdir)
-
- data = []
-
- for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))):
- if filename.endswith('hires.png'):
- continue
-
- path, filename = os.path.split(filename)
- basename, ext = os.path.splitext(filename)
- if basename in skips:
- continue
-
- # Create thumbnails based on images in tmpdir, and place them
- # within the build tree.
- orig_path = str(os.path.join(origdir, filename))
- thumb_path = str(os.path.join(thumbdir, filename))
- if out_of_date(orig_path, thumb_path) or True:
- thumbnails[orig_path] = thumb_path
-
- m = multiimage.match(basename)
- if m is not None:
- basename = m.group(1)
-
- data.append((subdir, basename,
- os.path.join(rootdir, subdir, 'thumbnails',
- filename)))
-
- for (subdir, basename, thumbfile) in data:
- if thumbfile is not None:
- anchor = os.path.basename(thumbfile)
- anchor = os.path.splitext(anchor)[0].replace('_', '-')
- link = 'examples/{}/{}.html#{}'.format(
- subdir,
- basename,
- anchor)
- rows.append(link_template.format(
- href=link,
- thumb_file=thumbfile,
- alternative_text=basename))
- random_image.append(random_image_template.format(
- link=link,
- thumbfile=thumbfile,
- basename=basename,
- full_image='_images/' + os.path.basename(thumbfile)))
-
- if len(data) == 0:
- warnings.warn('No thumbnails were found in {}'.format(subdir))
-
- # Close out the
opened up at the top of this loop.
- rows.append('
')
-
- # Generate JS list of images for front page.
- random_image_content = '\n'.join([random_image_join.format(i, line)
- for i, line in enumerate(random_image)])
- random_image_content = random_image_content_template.format(
- random_image_content)
- random_image_script_path = os.path.join(app.builder.srcdir,
- '_static',
- 'random_image.js')
- with open(random_image_script_path, 'w') as fh:
- fh.write(random_image_content)
-
- content = template.format('\n'.join(toc_rows),
- '\n'.join(rows))
-
- # Only write out the file if the contents have actually changed.
- # Otherwise, this triggers a full rebuild of the docs.
-
- gallery_path = os.path.join(app.builder.srcdir,
- '_templates',
- 'gallery.html')
- if os.path.exists(gallery_path):
- with open(gallery_path, 'r') as fh:
- regenerate = fh.read() != content
- else:
- regenerate = True
- if regenerate:
- with open(gallery_path, 'w') as fh:
- fh.write(content)
-
- for key in status_iterator(thumbnails, 'generating thumbnails... ',
- length=len(thumbnails)):
- image.thumbnail(key, thumbnails[key], 0.3)
-
-
-def setup(app):
- app.connect('env-updated', gen_gallery)
diff --git a/docs/iris/src/sphinxext/generate_package_rst.py b/docs/iris/src/sphinxext/generate_package_rst.py
deleted file mode 100644
index f67efc7ebc..0000000000
--- a/docs/iris/src/sphinxext/generate_package_rst.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import os
-import sys
-import re
-import inspect
-
-
-document_dict = {
- # Use autoclass for classes.
- 'class': '''
-{object_docstring}
-
-..
-
- .. autoclass:: {object_name}
- :members:
- :undoc-members:
- :inherited-members:
-
-''',
- 'function': '''
-.. autofunction:: {object_name}
-
-''',
- # For everything else, let automodule do some magic...
- None: '''
-
-.. autodata:: {object_name}
-
-'''}
-
-
-horizontal_sep = '''
-.. raw:: html
-
-
-
-
-'''
-
-
-def lookup_object_type(obj):
- if inspect.isclass(obj):
- return 'class'
- elif inspect.isfunction(obj):
- return 'function'
- else:
- return None
-
-
-def auto_doc_module(file_path, import_name, root_package,
- package_toc=None, title=None):
- doc = r'''.. _{import_name}:
-
-{title_underline}
-{title}
-{title_underline}
-
-{sidebar}
-
-.. currentmodule:: {root_package}
-
-.. automodule:: {import_name}
-
-In this module:
-
-{module_elements}
-
-
-'''
- if package_toc:
- sidebar = '''
-.. sidebar:: Modules in this package
-
-{package_toc_tree}
-
- '''.format(package_toc_tree=package_toc)
- else:
- sidebar = ''
-
- try:
- mod = __import__(import_name)
- except ImportError as e:
- message = r'''.. error::
-
- This module could not be imported. Some dependencies are missing::
-
- ''' + str(e)
- return doc.format(title=title or import_name,
- title_underline='=' * len(title or import_name),
- import_name=import_name, root_package=root_package,
- sidebar=sidebar, module_elements=message)
-
- mod = sys.modules[import_name]
- elems = dir(mod)
-
- if '__all__' in elems:
- document_these = [(attr_name, getattr(mod, attr_name))
- for attr_name in mod.__all__]
- else:
- document_these = [(attr_name, getattr(mod, attr_name))
- for attr_name in elems
- if (not attr_name.startswith('_') and
- not inspect.ismodule(getattr(mod, attr_name)))]
-
- def is_from_this_module(arg):
- name = arg[0]
- obj = arg[1]
- return (hasattr(obj, '__module__') and
- obj.__module__ == mod.__name__)
-
- sort_order = {'class': 2, 'function': 1}
-
- # Sort them according to sort_order dict.
- def sort_key(arg):
- name = arg[0]
- obj = arg[1]
- return sort_order.get(lookup_object_type(obj), 0)
-
- document_these = filter(is_from_this_module, document_these)
- document_these = sorted(document_these, key=sort_key)
-
- lines = []
- for element, obj in document_these:
- object_name = import_name + '.' + element
- obj_content = document_dict[lookup_object_type(obj)].format(
- object_name=object_name,
- object_name_header_line='+' * len(object_name),
- object_docstring=inspect.getdoc(obj))
- lines.append(obj_content)
-
- lines = horizontal_sep.join(lines)
-
- module_elements = '\n'.join(' * :py:obj:`{}`'.format(element)
- for element, obj in document_these)
-
- lines = doc + lines
- return lines.format(title=title or import_name,
- title_underline='=' * len(title or import_name),
- import_name=import_name, root_package=root_package,
- sidebar=sidebar, module_elements=module_elements)
-
-
-def auto_doc_package(file_path, import_name, root_package, sub_packages):
- max_depth = 1 if import_name == 'iris' else 2
- package_toc = '\n '.join(sub_packages)
- package_toc = '''
- .. toctree::
- :maxdepth: {:d}
- :titlesonly:
-
- {}
-
-
-'''.format(max_depth, package_toc)
-
- if '.' in import_name:
- title = None
- else:
- title = import_name.capitalize() + ' reference documentation'
-
- return auto_doc_module(file_path, import_name, root_package,
- package_toc=package_toc, title=title)
-
-
-def auto_package_build(app):
- root_package = app.config.autopackage_name
- if root_package is None:
- raise ValueError('set the autopackage_name variable in the '
- 'conf.py file')
-
- if not isinstance(root_package, list):
- raise ValueError('autopackage was expecting a list of packages to '
- 'document e.g. ["itertools"]')
-
- for package in root_package:
- do_package(package)
-
-
-def do_package(package_name):
- out_dir = package_name + os.path.sep
-
- # Import the root package. If this fails then an import error will be
- # raised.
- module = __import__(package_name)
- root_package = package_name
- rootdir = os.path.dirname(module.__file__)
-
- package_folder = []
- module_folders = {}
- for root, subFolders, files in os.walk(rootdir):
- for fname in files:
- name, ext = os.path.splitext(fname)
-
- # Skip some non-relevant files.
- if (fname.startswith('.') or fname.startswith('#') or
- re.search('^_[^_]', fname) or fname.find('.svn') >= 0 or
- not (ext in ['.py', '.so'])):
- continue
-
- # Handle new shared library naming conventions
- if ext == '.so':
- name = name.split('.', 1)[0]
-
- rel_path = root_package + \
- os.path.join(root, fname).split(rootdir)[-1]
- mod_folder = root_package + \
- os.path.join(root).split(rootdir)[-1].replace('/', '.')
-
- # Only add this package to folder list if it contains an __init__
- # script.
- if name == '__init__':
- package_folder.append([mod_folder, rel_path])
- else:
- import_name = mod_folder + '.' + name
- mf_list = module_folders.setdefault(mod_folder, [])
- mf_list.append((import_name, rel_path))
- if not os.path.exists(out_dir):
- os.makedirs(out_dir)
-
- for package, package_path in package_folder:
- if '._' in package or 'test' in package:
- continue
-
- paths = []
- for spackage, spackage_path in package_folder:
- # Ignore this packages, packages that are not children of this
- # one, test packages, private packages, and packages that are
- # subpackages of subpackages (they'll be part of the subpackage).
- if spackage == package:
- continue
- if not spackage.startswith(package):
- continue
- if spackage.count('.') > package.count('.') + 1:
- continue
- if 'test' in spackage:
- continue
-
- split_path = spackage.rsplit('.', 2)[-2:]
- if any(part[0] == '_' for part in split_path):
- continue
-
- paths.append(os.path.join(*split_path) + '.rst')
-
- paths.extend(os.path.join(os.path.basename(os.path.dirname(path)),
- os.path.basename(path).split('.', 1)[0])
- for imp_name, path in module_folders.get(package, []))
-
- paths.sort()
- doc = auto_doc_package(package_path, package, root_package, paths)
-
- package_dir = out_dir + package.replace('.', os.path.sep)
- if not os.path.exists(package_dir):
- os.makedirs(out_dir + package.replace('.', os.path.sep))
-
- out_path = package_dir + '.rst'
- if not os.path.exists(out_path):
- print('Creating non-existent document {} ...'.format(out_path))
- with open(out_path, 'w') as fh:
- fh.write(doc)
- else:
- with open(out_path, 'r') as fh:
- existing_content = ''.join(fh.readlines())
- if doc != existing_content:
- print('Creating out of date document {} ...'.format(
- out_path))
- with open(out_path, 'w') as fh:
- fh.write(doc)
-
- for import_name, module_path in module_folders.get(package, []):
- doc = auto_doc_module(module_path, import_name, root_package)
- out_path = out_dir + import_name.replace('.', os.path.sep) + '.rst'
- if not os.path.exists(out_path):
- print('Creating non-existent document {} ...'.format(
- out_path))
- with open(out_path, 'w') as fh:
- fh.write(doc)
- else:
- with open(out_path, 'r') as fh:
- existing_content = ''.join(fh.readlines())
- if doc != existing_content:
- print('Creating out of date document {} ...'.format(
- out_path))
- with open(out_path, 'w') as fh:
- fh.write(doc)
-
-
-def setup(app):
- app.connect('builder-inited', auto_package_build)
- app.add_config_value('autopackage_name', None, 'env')
diff --git a/docs/iris/src/userguide/citation.rst b/docs/iris/src/userguide/citation.rst
deleted file mode 100644
index 01b655574e..0000000000
--- a/docs/iris/src/userguide/citation.rst
+++ /dev/null
@@ -1,55 +0,0 @@
-.. _Citing_Iris:
-
-===========
-Citing Iris
-===========
-
-If Iris played an important part in your research then please add us to your reference list by using one of the recommendations below.
-
-************
-BibTeX entry
-************
-
-For example::
-
- @manual{Iris,
- author = {{Met Office}},
- title = {Iris: A Python library for analysing and visualising meteorological and oceanographic data sets},
- edition = {v1.2},
- year = {2010 - 2013},
- address = {Exeter, Devon },
- url = {http://scitools.org.uk/}
- }
-
-
-*******************
-Downloaded Software
-*******************
-
-Suggested format::
-
- ProductName. Version. ReleaseDate. Publisher. Location. DOIorURL. DownloadDate.
-
-For example::
-
- Iris. v1.2. 28-Feb-2013. Met Office. UK. https://github.com/SciTools/iris/archive/v1.2.0.tar.gz 01-03-2013
-
-
-********************
-Checked out Software
-********************
-
-Suggested format::
-
- ProductName. Publisher. URL. CheckoutDate. RepositorySpecificCheckoutInformation.
-
-For example::
-
- Iris. Met Office. git@github.com:SciTools/iris.git 06-03-2013
-
-.. _How to cite and describe software: http://software.ac.uk/so-exactly-what-software-did-you-use
-
-
-Reference: [Jackson]_.
-
-.. [Jackson] Jackson, M. 2012. `How to cite and describe software`_. Accessed 06-03-2013.
diff --git a/docs/iris/src/userguide/code_maintenance.rst b/docs/iris/src/userguide/code_maintenance.rst
deleted file mode 100644
index 00ba30506c..0000000000
--- a/docs/iris/src/userguide/code_maintenance.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-Code Maintenance
-================
-
-From a user point of view "code maintenance" means ensuring that your existing
-working code stays working, in the face of changes to Iris.
-
-
-Stability and Change
----------------------
-
-In practice, as Iris develops, most users will want to periodically upgrade
-their installed version to access new features or at least bugfixes.
-
-This is obvious if you are still developing other code that uses Iris, or using
-code from other sources.
-However, even if you have only legacy code that remains untouched, some code
-maintenance effort is probably still necessary :
-
- * On the one hand, *in principle*, working code will go on working, as long
- as you don't change anything else.
-
- * However, such "version statis" can easily become a growing burden, if you
- are simply waiting until an update becomes unavoidable : Often, that will
- eventually occur when you need to update some other software component,
- for some completely unconnected reason.
-
-
-Principles of Change Management
--------------------------------
-
-When you upgrade software to a new version, you often find that you need to
-rewrite your legacy code, simply to keep it working.
-
-In Iris, however, we aim to reduce code maintenance problems to an absolute
-minimum by following defined change management rules.
-These ensure that, *within a major release number* :
-
- * you can be confident that your code will still work with subsequent minor
- releases
-
- * you will be aware of future incompatibility problems in advance
-
- * you can defer making code compatibility changes for some time, until it
- suits you
-
-The above applies to minor version upgrades : e.g. code that works with version
-"1.4.2" should still work with a subsequent minor release such as "1.5.0" or
-"1.7.2".
-
-A *major* release however, e.g. "v2.0.0" or "v3.0.0", can include more
-significant changes, including so-called "breaking" changes: This means that
-existing code may need to be modified to make it work with the new version.
-
-Since breaking change can only occur at major releases, these are the *only*
-times we can alter or remove existing behaviours (even deprecated
-ones). This is what a major release is for : it enables the removal and
-replacement of old features.
-
-Of course, even at a major release, we do still aim to keep breaking changes to
-a minimum.
diff --git a/docs/iris/src/userguide/concat.png b/docs/iris/src/userguide/concat.png
deleted file mode 100644
index eb3d84046e..0000000000
Binary files a/docs/iris/src/userguide/concat.png and /dev/null differ
diff --git a/docs/iris/src/userguide/concat.svg b/docs/iris/src/userguide/concat.svg
deleted file mode 100644
index 0234b37bfa..0000000000
--- a/docs/iris/src/userguide/concat.svg
+++ /dev/null
@@ -1,782 +0,0 @@
-
-
-
-
diff --git a/docs/iris/src/userguide/cube_diagram.dia b/docs/iris/src/userguide/cube_diagram.dia
deleted file mode 100644
index 8edc611782..0000000000
Binary files a/docs/iris/src/userguide/cube_diagram.dia and /dev/null differ
diff --git a/docs/iris/src/userguide/cube_diagram.png b/docs/iris/src/userguide/cube_diagram.png
deleted file mode 100644
index 80f5328c3b..0000000000
Binary files a/docs/iris/src/userguide/cube_diagram.png and /dev/null differ
diff --git a/docs/iris/src/userguide/cube_maths.rst b/docs/iris/src/userguide/cube_maths.rst
deleted file mode 100644
index 6c6f846bc3..0000000000
--- a/docs/iris/src/userguide/cube_maths.rst
+++ /dev/null
@@ -1,217 +0,0 @@
-======================
-Basic cube mathematics
-======================
-
-
-The section :doc:`navigating_a_cube` highlighted that
-every cube has a data attribute;
-this attribute can then be manipulated directly::
-
- cube.data -= 273.15
-
-The problem with manipulating the data directly is that other metadata may
-become inconsistent; in this case the units of the cube are no longer what was
-intended. This example could be rectified by changing the units attribute::
-
- cube.units = 'celsius'
-
-.. note::
-
- :meth:`iris.cube.Cube.convert_units` can be used to automatically convert a
- cube's data and update its units attribute.
- So, the two steps above can be achieved by::
-
- cube.convert_units('celsius')
-
-In order to reduce the amount of metadata which becomes inconsistent,
-fundamental arithmetic operations such as addition, subtraction, division
-and multiplication can be applied directly to any cube.
-
-Calculating the difference between two cubes
---------------------------------------------
-
-Let's load some air temperature which runs from 1860 to 2100::
-
- filename = iris.sample_data_path('E1_north_america.nc')
- air_temp = iris.load_cube(filename, 'air_temperature')
-
-We can now get the first and last time slices using indexing
-(see :ref:`subsetting_a_cube` for a reminder)::
-
- t_first = air_temp[0, :, :]
- t_last = air_temp[-1, :, :]
-
-.. testsetup::
-
- filename = iris.sample_data_path('E1_north_america.nc')
- air_temp = iris.load_cube(filename, 'air_temperature')
- t_first = air_temp[0, :, :]
- t_last = air_temp[-1, :, :]
-
-And finally we can subtract the two.
-The result is a cube of the same size as the original two time slices,
-but with the data representing their difference:
-
- >>> print(t_last - t_first)
- unknown / (K) (latitude: 37; longitude: 49)
- Dimension coordinates:
- latitude x -
- longitude - x
- Scalar coordinates:
- forecast_reference_time: 1859-09-01 06:00:00
- height: 1.5 m
-
-
-.. note::
-
- Notice that the coordinates "time" and "forecast_period" have been removed
- from the resultant cube;
- this is because these coordinates differed between the two input cubes.
-
-
-.. _cube-maths_anomaly:
-
-Calculating a cube anomaly
---------------------------
-
-In section :doc:`cube_statistics` we discussed how the dimensionality of a cube
-can be reduced using the :meth:`Cube.collapsed ` method
-to calculate a statistic over a dimension.
-
-Let's use that method to calculate a mean of our air temperature time-series,
-which we'll then use to calculate a time mean anomaly and highlight the powerful
-benefits of cube broadcasting.
-
-First, let's remind ourselves of the shape of our air temperature time-series
-cube::
-
- >>> print(air_temp.summary(True))
- air_temperature / (K) (time: 240; latitude: 37; longitude: 49)
-
-Now, we'll calculate the time-series mean using the
-:meth:`Cube.collapsed ` method::
-
- >>> air_temp_mean = air_temp.collapsed('time', iris.analysis.MEAN)
- >>> print(air_temp_mean.summary(True))
- air_temperature / (K) (latitude: 37; longitude: 49)
-
-As expected the *time* dimension has been collapsed, reducing the
-dimensionality of the resultant *air_temp_mean* cube. This time-series mean can
-now be used to calculate the time mean anomaly against the original
-time-series::
-
- >>> anomaly = air_temp - air_temp_mean
- >>> print(anomaly.summary(True))
- unknown / (K) (time: 240; latitude: 37; longitude: 49)
-
-Notice that the calculation of the *anomaly* involves subtracting a
-*2d* cube from a *3d* cube to yield a *3d* result. This is only possible
-because cube broadcasting is performed during cube arithmetic operations.
-
-Cube broadcasting follows similar broadcasting rules as
-`NumPy `_, but
-the additional richness of Iris coordinate meta-data provides an enhanced
-capability beyond the basic broadcasting behaviour of NumPy.
-
-As the coordinate meta-data of a cube uniquely describes each dimension, it is
-possible to leverage this knowledge to identify the similar dimensions involved
-in a cube arithmetic operation. This essentially means that we are no longer
-restricted to performing arithmetic on cubes with identical shapes.
-
-This extended broadcasting behaviour is highlighted in the following
-examples. The first of these shows that it is possible to involve the
-transpose of the air temperature time-series in an arithmetic operation with
-itself.
-
-Let's first create the transpose of the air temperature time-series::
-
- >>> air_temp_T = air_temp.copy()
- >>> air_temp_T.transpose()
- >>> print(air_temp_T.summary(True))
- air_temperature / (K) (longitude: 49; latitude: 37; time: 240)
-
-Now add the transpose to the original time-series::
-
- >>> result = air_temp + air_temp_T
- >>> print(result.summary(True))
- unknown / (K) (time: 240; latitude: 37; longitude: 49)
-
-Notice that the *result* is the same dimensionality and shape as *air_temp*.
-Let's check that the arithmetic operation has calculated a result that
-we would intuitively expect::
-
- >>> result == 2 * air_temp
- True
-
-Let's extend this example slightly, by taking a slice from the middle
-*latitude* dimension of the transpose cube::
-
- >>> air_temp_T_slice = air_temp_T[:, 0, :]
- >>> print(air_temp_T_slice.summary(True))
- air_temperature / (K) (longitude: 49; time: 240)
-
-Compared to our original time-series, the *air_temp_T_slice* cube has one
-less dimension *and* it's shape if different. However, this doesn't prevent
-us from performing cube arithmetic with it, thanks to the extended cube
-broadcasting behaviour::
-
- >>> result = air_temp - air_temp_T_slice
- >>> print(result.summary(True))
- unknown / (K) (time: 240; latitude: 37; longitude: 49)
-
-Combining multiple phenomena to form a new one
-----------------------------------------------
-
-Combining cubes of potential-temperature and pressure we can calculate
-the associated temperature using the equation:
-
-.. math::
-
- T = \theta (\frac{p}{p_0}) ^ {(287.05 / 1005)}
-
-Where :math:`p` is pressure, :math:`\theta` is potential temperature,
-:math:`p_0` is the potential temperature reference pressure
-and :math:`T` is temperature.
-
-First, let's load pressure and potential temperature cubes::
-
- filename = iris.sample_data_path('colpex.pp')
- phenomenon_names = ['air_potential_temperature', 'air_pressure']
- pot_temperature, pressure = iris.load_cubes(filename, phenomenon_names)
-
-In order to calculate :math:`\frac{p}{p_0}` we can define a coordinate which
-represents the standard reference pressure of 1000 hPa::
-
- import iris.coords
- p0 = iris.coords.AuxCoord(1000.0,
- long_name='reference_pressure',
- units='hPa')
-
-We must ensure that the units of ``pressure`` and ``p0`` are the same,
-so convert the newly created coordinate using
-the :meth:`iris.coords.Coord.convert_units` method::
-
- p0.convert_units(pressure.units)
-
-Now we can combine all of this information to calculate the air temperature
-using the equation above::
-
- temperature = pot_temperature * ( (pressure / p0) ** (287.05 / 1005) )
-
-Finally, the cube we have created needs to be given a suitable name::
-
- temperature.rename('air_temperature')
-
-The result could now be plotted using the guidance provided in the
-:doc:`plotting_a_cube` section.
-
-.. htmlonly::
-
- A very similar example to this can be found in
- :doc:`/examples/Meteorology/deriving_phenomena`.
-
-.. latexonly::
-
- A very similar example to this can be found in the examples section,
- with the title "Deriving Exner Pressure and Air Temperature".
-
diff --git a/docs/iris/src/userguide/cube_statistics.rst b/docs/iris/src/userguide/cube_statistics.rst
deleted file mode 100644
index 3ca7d9a2e0..0000000000
--- a/docs/iris/src/userguide/cube_statistics.rst
+++ /dev/null
@@ -1,334 +0,0 @@
-.. _cube-statistics:
-
-===============
-Cube statistics
-===============
-
-.. _cube-statistics-collapsing:
-
-Collapsing entire data dimensions
----------------------------------
-
-.. testsetup::
-
- import iris
- filename = iris.sample_data_path('uk_hires.pp')
- cube = iris.load_cube(filename, 'air_potential_temperature')
-
- import iris.analysis.cartography
- cube.coord('grid_latitude').guess_bounds()
- cube.coord('grid_longitude').guess_bounds()
- grid_areas = iris.analysis.cartography.area_weights(cube)
-
-
-In the :doc:`subsetting_a_cube` section we saw how to extract a subset of a
-cube in order to reduce either its dimensionality or its resolution.
-Instead of simply extracting a sub-region of the data,
-we can produce statistical functions of the data values
-across a particular dimension,
-such as a 'mean over time' or 'minimum over latitude'.
-
-.. _cube-statistics_forecast_printout:
-
-For instance, suppose we have a cube:
-
- >>> import iris
- >>> filename = iris.sample_data_path('uk_hires.pp')
- >>> cube = iris.load_cube(filename, 'air_potential_temperature')
- >>> print(cube)
- air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
- Dimension coordinates:
- time x - - -
- model_level_number - x - -
- grid_latitude - - x -
- grid_longitude - - - x
- Auxiliary coordinates:
- forecast_period x - - -
- level_height - x - -
- sigma - x - -
- surface_altitude - - x x
- Derived coordinates:
- altitude - x x x
- Scalar coordinates:
- forecast_reference_time: 2009-11-19 04:00:00
- Attributes:
- STASH: m01s00i004
- source: Data from Met Office Unified Model
- um_version: 7.3
-
-
-In this case we have a 4 dimensional cube;
-to mean the vertical (z) dimension down to a single valued extent
-we can pass the coordinate name and the aggregation definition to the
-:meth:`Cube.collapsed() ` method:
-
- >>> import iris.analysis
- >>> vertical_mean = cube.collapsed('model_level_number', iris.analysis.MEAN)
- >>> print(vertical_mean)
- air_potential_temperature / (K) (time: 3; grid_latitude: 204; grid_longitude: 187)
- Dimension coordinates:
- time x - -
- grid_latitude - x -
- grid_longitude - - x
- Auxiliary coordinates:
- forecast_period x - -
- surface_altitude - x x
- Derived coordinates:
- altitude - x x
- Scalar coordinates:
- forecast_reference_time: 2009-11-19 04:00:00
- level_height: 696.6666 m, bound=(0.0, 1393.3333) m
- model_level_number: 10, bound=(1, 19)
- sigma: 0.92292976, bound=(0.8458596, 1.0)
- Attributes:
- STASH: m01s00i004
- source: Data from Met Office Unified Model
- um_version: 7.3
- Cell methods:
- mean: model_level_number
-
-
-Similarly other analysis operators such as ``MAX``, ``MIN`` and ``STD_DEV``
-can be used instead of ``MEAN``, see :mod:`iris.analysis` for a full list
-of currently supported operators.
-
-For an example of using this functionality, the
-:ref:`Hovmoller diagram ` example found
-in the gallery takes a zonal mean of an ``XYT`` cube by using the
-``collapsed`` method with ``latitude`` and ``iris.analysis.MEAN`` as arguments.
-
-.. _cube-statistics-collapsing-average:
-
-Area averaging
-^^^^^^^^^^^^^^
-
-Some operators support additional keywords to the ``cube.collapsed`` method.
-For example, :func:`iris.analysis.MEAN ` supports
-a weights keyword which can be combined with
-:func:`iris.analysis.cartography.area_weights` to calculate an area average.
-
-Let's use the same data as was loaded in the previous example.
-Since ``grid_latitude`` and ``grid_longitude`` were both point coordinates
-we must guess bound positions for them
-in order to calculate the area of the grid boxes::
-
- import iris.analysis.cartography
- cube.coord('grid_latitude').guess_bounds()
- cube.coord('grid_longitude').guess_bounds()
- grid_areas = iris.analysis.cartography.area_weights(cube)
-
-These areas can now be passed to the ``collapsed`` method as weights:
-
-.. doctest::
-
- >>> new_cube = cube.collapsed(['grid_longitude', 'grid_latitude'], iris.analysis.MEAN, weights=grid_areas)
- >>> print(new_cube)
- air_potential_temperature / (K) (time: 3; model_level_number: 7)
- Dimension coordinates:
- time x -
- model_level_number - x
- Auxiliary coordinates:
- forecast_period x -
- level_height - x
- sigma - x
- Derived coordinates:
- altitude - x
- Scalar coordinates:
- forecast_reference_time: 2009-11-19 04:00:00
- grid_latitude: 1.5145501 degrees, bound=(0.14430022, 2.8848) degrees
- grid_longitude: 358.74948 degrees, bound=(357.494, 360.00497) degrees
- surface_altitude: 399.625 m, bound=(-14.0, 813.25) m
- Attributes:
- STASH: m01s00i004
- source: Data from Met Office Unified Model
- um_version: 7.3
- Cell methods:
- mean: grid_longitude, grid_latitude
-
-Several examples of area averaging exist in the gallery which may be of interest,
-including an example on taking a :ref:`global area-weighted mean
-`.
-
-.. _cube-statistics-aggregated-by:
-
-Partially reducing data dimensions
-----------------------------------
-
-Instead of completely collapsing a dimension, other methods can be applied
-to reduce or filter the number of data points of a particular dimension.
-
-
-Aggregation of grouped data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The :meth:`Cube.aggregated_by ` operation
-combines data for all points with the same value of a given coordinate.
-To do this, you need a coordinate whose points take on only a limited set
-of different values -- the *number* of these then determines the size of the
-reduced dimension.
-The :mod:`iris.coord_categorisation` module can be used to make such
-'categorical' coordinates out of ordinary ones: The most common use is
-to aggregate data over regular *time intervals*,
-such as by calendar month or day of the week.
-
-For example, let's create two new coordinates on the cube
-to represent the climatological seasons and the season year respectively::
-
- import iris
- import iris.coord_categorisation
-
- filename = iris.sample_data_path('ostia_monthly.nc')
- cube = iris.load_cube(filename, 'surface_temperature')
-
- iris.coord_categorisation.add_season(cube, 'time', name='clim_season')
- iris.coord_categorisation.add_season_year(cube, 'time', name='season_year')
-
-
-
-.. note::
-
- The 'season year' is not the same as year number, because (e.g.) the months
- Dec11, Jan12 + Feb12 all belong to 'DJF-12'.
- See :meth:`iris.coord_categorisation.add_season_year`.
-
-
-.. testsetup:: aggregation
-
- import datetime
- import iris
-
- filename = iris.sample_data_path('ostia_monthly.nc')
- cube = iris.load_cube(filename, 'surface_temperature')
-
- import iris.coord_categorisation
- iris.coord_categorisation.add_season(cube, 'time', name='clim_season')
- iris.coord_categorisation.add_season_year(cube, 'time', name='season_year')
-
- annual_seasonal_mean = cube.aggregated_by(
- ['clim_season', 'season_year'],
- iris.analysis.MEAN)
-
-
-Printing this cube now shows that two extra coordinates exist on the cube:
-
-.. doctest:: aggregation
-
- >>> print(cube)
- surface_temperature / (K) (time: 54; latitude: 18; longitude: 432)
- Dimension coordinates:
- time x - -
- latitude - x -
- longitude - - x
- Auxiliary coordinates:
- clim_season x - -
- forecast_reference_time x - -
- season_year x - -
- Scalar coordinates:
- forecast_period: 0 hours
- Attributes:
- Conventions: CF-1.5
- STASH: m01s00i024
- Cell methods:
- mean: month, year
-
-
-These two coordinates can now be used to aggregate by season and climate-year:
-
-.. doctest:: aggregation
-
- >>> annual_seasonal_mean = cube.aggregated_by(
- ... ['clim_season', 'season_year'],
- ... iris.analysis.MEAN)
- >>> print(repr(annual_seasonal_mean))
-
-
-The primary change in the cube is that the cube's data has been
-reduced in the 'time' dimension by aggregation (taking means, in this case).
-This has collected together all datapoints with the same values of season and
-season-year.
-The results are now indexed by the 19 different possible values of season and
-season-year in a new, reduced 'time' dimension.
-
-We can see this by printing the first 10 values of season+year
-from the original cube: These points are individual months,
-so adjacent ones are often in the same season:
-
-.. doctest:: aggregation
- :options: +NORMALIZE_WHITESPACE
-
- >>> for season, year in zip(cube.coord('clim_season')[:10].points,
- ... cube.coord('season_year')[:10].points):
- ... print(season + ' ' + str(year))
- mam 2006
- mam 2006
- jja 2006
- jja 2006
- jja 2006
- son 2006
- son 2006
- son 2006
- djf 2007
- djf 2007
-
-Compare this with the first 10 values of the new cube's coordinates:
-All the points now have distinct season+year values:
-
-.. doctest:: aggregation
- :options: +NORMALIZE_WHITESPACE
-
- >>> for season, year in zip(
- ... annual_seasonal_mean.coord('clim_season')[:10].points,
- ... annual_seasonal_mean.coord('season_year')[:10].points):
- ... print(season + ' ' + str(year))
- mam 2006
- jja 2006
- son 2006
- djf 2007
- mam 2007
- jja 2007
- son 2007
- djf 2008
- mam 2008
- jja 2008
-
-Because the original data started in April 2006 we have some incomplete seasons
-(e.g. there were only two months worth of data for 'mam-2006').
-In this case we can fix this by removing all of the resultant 'times' which
-do not cover a three month period (note: judged here as > 3*28 days):
-
-.. doctest:: aggregation
-
- >>> tdelta_3mth = datetime.timedelta(hours=3*28*24.0)
- >>> spans_three_months = lambda t: (t.bound[1] - t.bound[0]) > tdelta_3mth
- >>> three_months_bound = iris.Constraint(time=spans_three_months)
- >>> full_season_means = annual_seasonal_mean.extract(three_months_bound)
- >>> full_season_means
-
-
-The final result now represents the seasonal mean temperature for 17 seasons
-from jja-2006 to jja-2010:
-
-.. doctest:: aggregation
- :options: +NORMALIZE_WHITESPACE
-
- >>> for season, year in zip(full_season_means.coord('clim_season').points,
- ... full_season_means.coord('season_year').points):
- ... print(season + ' ' + str(year))
- jja 2006
- son 2006
- djf 2007
- mam 2007
- jja 2007
- son 2007
- djf 2008
- mam 2008
- jja 2008
- son 2008
- djf 2009
- mam 2009
- jja 2009
- son 2009
- djf 2010
- mam 2010
- jja 2010
-
diff --git a/docs/iris/src/userguide/end_of_userguide.rst b/docs/iris/src/userguide/end_of_userguide.rst
deleted file mode 100644
index c8f951a634..0000000000
--- a/docs/iris/src/userguide/end_of_userguide.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-End of the user guide
-=====================
-
-If this was your first time reading the user guide, we hope you found it enjoyable and informative.
-It is advised that you now go back to the :doc:`start ` and try experimenting with your own data.
-
-
-
-Iris gallery
-------------
-It can be very daunting to start coding a project from an empty file, that is why you will find many in-depth
-examples in the Iris gallery which can be used as a goal driven reference to producing your own visualisations.
-
-If you produce a visualisation which you think would add value to the gallery, please get in touch with us and
-we will consider including it as an example for all to benefit from.
diff --git a/docs/iris/src/userguide/index.rst b/docs/iris/src/userguide/index.rst
deleted file mode 100644
index 8c0b24bec3..0000000000
--- a/docs/iris/src/userguide/index.rst
+++ /dev/null
@@ -1,41 +0,0 @@
-.. _user_guide_index:
-
-===============
-Iris user guide
-===============
-
-How to use the user guide
----------------------------
-If you are reading this user guide for the first time it is strongly recommended that you read the user guide
-fully before experimenting with your own data files.
-
-
-Much of the content has supplementary links to the reference documentation; you will not need to follow these
-links in order to understand the guide but they may serve as a useful reference for future exploration.
-
-.. htmlonly::
-
- Since later pages depend on earlier ones, try reading this user guide sequentially using the ``next`` and ``previous`` links.
-
-
-User guide table of contents
--------------------------------
-
-.. toctree::
- :maxdepth: 2
- :numbered:
-
- iris_cubes.rst
- loading_iris_cubes.rst
- saving_iris_cubes.rst
- navigating_a_cube.rst
- subsetting_a_cube.rst
- real_and_lazy_data.rst
- plotting_a_cube.rst
- interpolation_and_regridding.rst
- merge_and_concat.rst
- cube_statistics.rst
- cube_maths.rst
- citation.rst
- code_maintenance.rst
- end_of_userguide.rst
diff --git a/docs/iris/src/userguide/interpolation_and_regridding.rst b/docs/iris/src/userguide/interpolation_and_regridding.rst
deleted file mode 100644
index 565f9b61eb..0000000000
--- a/docs/iris/src/userguide/interpolation_and_regridding.rst
+++ /dev/null
@@ -1,412 +0,0 @@
-.. _interpolation_and_regridding:
-
-
-.. testsetup:: *
-
- import numpy as np
- import iris
- import warnings
- warnings.simplefilter('ignore')
-
-=================================
-Cube interpolation and regridding
-=================================
-
-Iris provides powerful cube-aware interpolation and regridding functionality,
-exposed through Iris cube methods. This functionality is provided by building
-upon existing interpolation schemes implemented by SciPy.
-
-In Iris we refer to the avaliable types of interpolation and regridding as
-`schemes`. The following are the interpolation schemes that are currently
-available in Iris:
-
- * linear interpolation (:class:`iris.analysis.Linear`), and
- * nearest-neighbour interpolation (:class:`iris.analysis.Nearest`).
-
-The following are the regridding schemes that are currently available in Iris:
-
- * linear regridding (:class:`iris.analysis.Linear`),
- * nearest-neighbour regridding (:class:`iris.analysis.Nearest`), and
- * area-weighted regridding (:class:`iris.analysis.AreaWeighted`, first-order conservative).
-
-
-.. _interpolation:
-
-Interpolation
--------------
-
-Interpolating a cube is achieved with the :meth:`~iris.cube.Cube.interpolate`
-method. This method expects two arguments:
-
- #. the sample points to interpolate, and
- #. the second argument being the interpolation scheme to use.
-
-The result is a new cube, interpolated at the sample points.
-
-Sample points must be defined as an iterable of ``(coord, value(s))`` pairs.
-The `coord` argument can be either a coordinate name or coordinate instance.
-The specified coordinate must exist on the cube being interpolated! For example:
-
- * coordinate names and scalar sample points: ``[('latitude', 51.48), ('longitude', 0)]``,
- * a coordinate instance and a scalar sample point: ``[(cube.coord('latitude'), 51.48)]``, and
- * a coordinate name and a NumPy array of sample points: ``[('longitude', np.linspace(-11, 2, 14))]``
-
-are all examples of valid sample points.
-
-The values for coordinates that correspond to date/times can be supplied as
-datetime.datetime or cftime.datetime instances,
-e.g. ``[('time', datetime.datetime(2009, 11, 19, 10, 30))]``).
-
-Let's take the air temperature cube we've seen previously:
-
- >>> air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> print(air_temp)
- air_temperature / (K) (latitude: 73; longitude: 96)
- Dimension coordinates:
- latitude x -
- longitude - x
- Scalar coordinates:
- forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
- forecast_reference_time: 1998-03-01 03:00:00
- pressure: 1000.0 hPa
- time: 1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
- Attributes:
- STASH: m01s16i203
- source: Data from Met Office Unified Model
- Cell methods:
- mean within years: time
- mean over years: time
-
-We can interpolate specific values from the coordinates of the cube:
-
- >>> sample_points = [('latitude', 51.48), ('longitude', 0)]
- >>> print(air_temp.interpolate(sample_points, iris.analysis.Linear()))
- air_temperature / (K) (scalar cube)
- Scalar coordinates:
- forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
- forecast_reference_time: 1998-03-01 03:00:00
- latitude: 51.48 degrees
- longitude: 0 degrees
- pressure: 1000.0 hPa
- time: 1998-12-01 00:00:00, bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
- Attributes:
- STASH: m01s16i203
- source: Data from Met Office Unified Model
- Cell methods:
- mean within years: time
- mean over years: time
-
-As we can see, the resulting cube is scalar and has longitude and latitude coordinates with
-the values defined in our sample points.
-
-It isn't necessary to specify sample points for every dimension, only those that you
-wish to interpolate over:
-
- >>> result = air_temp.interpolate([('longitude', 0)], iris.analysis.Linear())
- >>> print('Original: ' + air_temp.summary(shorten=True))
- Original: air_temperature / (K) (latitude: 73; longitude: 96)
- >>> print('Interpolated: ' + result.summary(shorten=True))
- Interpolated: air_temperature / (K) (latitude: 73)
-
-The sample points for a coordinate can be an array of values. When multiple coordinates are
-provided with arrays instead of scalar sample points, the coordinates on the resulting cube
-will be orthogonal:
-
- >>> sample_points = [('longitude', np.linspace(-11, 2, 14)),
- ... ('latitude', np.linspace(48, 60, 13))]
- >>> result = air_temp.interpolate(sample_points, iris.analysis.Linear())
- >>> print(result.summary(shorten=True))
- air_temperature / (K) (latitude: 13; longitude: 14)
-
-
-Interpolating non-horizontal coordinates
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Interpolation in Iris is not limited to horizontal-spatial coordinates - any
-coordinate satisfying the prerequisites of the chosen scheme may be interpolated
-over.
-
-For instance, the :class:`iris.analysis.Linear` scheme requires 1D numeric,
-monotonic, coordinates. Supposing we have a single column cube such as
-the one defined below:
-
- >>> cube = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature')
- >>> column = cube[:, 0, 0]
- >>> print(column.summary(shorten=True))
- air_potential_temperature / (K) (model_level_number: 15)
-
-This cube has a "hybrid-height" vertical coordinate system, meaning that the vertical
-coordinate is unevenly spaced in altitude:
-
- >>> print(column.coord('altitude').points)
- [ 418.69836 434.5705 456.7928 485.3665 520.2933 561.5752
- 609.2145 663.2141 723.57697 790.30664 863.4072 942.8823
- 1028.737 1120.9764 1219.6051 ]
-
-We could regularise the vertical coordinate by defining 10 equally spaced altitude
-sample points between 400 and 1250 and interpolating our vertical coordinate onto
-these sample points:
-
- >>> sample_points = [('altitude', np.linspace(400, 1250, 10))]
- >>> new_column = column.interpolate(sample_points, iris.analysis.Linear())
- >>> print(new_column.summary(shorten=True))
- air_potential_temperature / (K) (model_level_number: 10)
-
-Let's look at the original data, the interpolation line and
-the new data in a plot. This will help us to see what is going on:
-
-.. plot:: userguide/regridding_plots/interpolate_column.py
-
-The red diamonds on the extremes of the altitude values show that we have
-extrapolated data beyond the range of the original data. In some cases this is
-desirable but in other cases it is not. For example, this column defines
-a surface altitude value of 414m, so extrapolating an "air potential temperature"
-at 400m makes little physical sense in this case.
-
-We can control the extrapolation mode when defining the interpolation scheme.
-Controlling the extrapolation mode allows us to avoid situations like the above where
-extrapolating values makes little physical sense.
-
-The extrapolation mode is controlled by the ``extrapolation_mode`` keyword.
-For the available interpolation schemes available in Iris, the ``extrapolation_mode``
-keyword must be one of:
-
- * ``extrapolate`` -- the extrapolation points will be calculated by extending the gradient of the closest two points,
- * ``error`` -- a ValueError exception will be raised, notifying an attempt to extrapolate,
- * ``nan`` -- the extrapolation points will be be set to NaN,
- * ``mask`` -- the extrapolation points will always be masked, even if the source data is not a MaskedArray, or
- * ``nanmask`` -- if the source data is a MaskedArray the extrapolation points will be masked. Otherwise they will be set to NaN.
-
-Using an extrapolation mode is achieved by constructing an interpolation scheme
-with the extrapolation mode keyword set as required. The constructed scheme
-is then passed to the :meth:`~iris.cube.Cube.interpolate` method.
-For example, to mask values that lie beyond the range of the original data:
-
- >>> scheme = iris.analysis.Linear(extrapolation_mode='mask')
- >>> new_column = column.interpolate(sample_points, scheme)
- >>> print(new_column.coord('altitude').points)
- [-- 494.44451904296875 588.888916015625 683.333251953125 777.77783203125
- 872.2222290039062 966.666748046875 1061.111083984375 1155.555419921875 --]
-
-
-.. _caching_an_interpolator:
-
-Caching an interpolator
-^^^^^^^^^^^^^^^^^^^^^^^
-
-If you need to interpolate a cube on multiple sets of sample points you can
-'cache' an interpolator to be used for each of these interpolations. This can
-shorten the execution time of your code as the most computationally
-intensive part of an interpolation is setting up the interpolator.
-
-To cache an interpolator you must set up an interpolator scheme and call the
-scheme's interpolator method. The interpolator method takes as arguments:
-
- #. a cube to be interpolated, and
- #. an iterable of coordinate names or coordinate instances of the coordinates that are to be interpolated over.
-
-For example:
-
- >>> air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> interpolator = iris.analysis.Nearest().interpolator(air_temp, ['latitude', 'longitude'])
-
-When this cached interpolator is called you must pass it an iterable of sample points
-that have the same form as the iterable of coordinates passed to the constructor.
-So, to use the cached interpolator defined above:
-
- >>> latitudes = np.linspace(48, 60, 13)
- >>> longitudes = np.linspace(-11, 2, 14)
- >>> for lat, lon in zip(latitudes, longitudes):
- ... result = interpolator([lat, lon])
-
-In each case ``result`` will be a cube interpolated from the ``air_temp`` cube we
-passed to interpolator.
-
-Note that you must specify the required extrapolation mode when setting up the cached interpolator.
-For example::
-
- >>> interpolator = iris.analysis.Nearest(extrapolation_mode='nan').interpolator(cube, coords)
-
-
-.. _regridding:
-
-Regridding
-----------
-
-Regridding is conceptually a very similar process to interpolation in Iris.
-The primary difference is that interpolation is based on sample points, while
-regridding is based on the **horizontal** grid of *another cube*.
-
-Regridding a cube is achieved with the :meth:`cube.regrid() ` method.
-This method expects two arguments:
-
- #. *another cube* that defines the target grid onto which the cube should be regridded, and
- #. the regridding scheme to use.
-
-.. note::
-
- Regridding is a common operation needed to allow comparisons of data on different grids.
- The powerful mapping functionality provided by cartopy, however, means that regridding
- is often not necessary if performed just for visualisation purposes.
-
-Let's load two cubes that have different grids and coordinate systems:
-
- >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc'))
-
-We can visually confirm that they are on different grids by plotting the two cubes:
-
-.. plot:: userguide/regridding_plots/regridding_plot.py
-
-Let's regrid the ``global_air_temp`` cube onto a rotated pole grid
-using a linear regridding scheme. To achieve this we pass the ``rotated_psl``
-cube to the regridder to supply the target grid to regrid the ``global_air_temp``
-cube onto:
-
- >>> rotated_air_temp = global_air_temp.regrid(rotated_psl, iris.analysis.Linear())
-
-.. plot:: userguide/regridding_plots/regridded_to_rotated.py
-
-We could regrid the pressure values onto the global grid, but this will involve
-some form of extrapolation. As with interpolation, we can control the extrapolation
-mode when defining the regridding scheme.
-
-For the available regridding schemes in Iris, the ``extrapolation_mode`` keyword
-must be one of:
-
- * ``extrapolate`` --
-
- * for :class:`~iris.analysis.Linear` the extrapolation points will be calculated by extending the gradient of the closest two points.
- * for :class:`~iris.analysis.Nearest` the extrapolation points will take their value from the nearest source point.
-
- * ``nan`` -- the extrapolation points will be be set to NaN.
- * ``error`` -- a ValueError exception will be raised, notifying an attempt to extrapolate.
- * ``mask`` -- the extrapolation points will always be masked, even if the source data is not a MaskedArray.
- * ``nanmask`` -- if the source data is a MaskedArray the extrapolation points will be masked. Otherwise they will be set to NaN.
-
-The ``rotated_psl`` cube is defined on a limited area rotated pole grid. If we regridded
-the ``rotated_psl`` cube onto the global grid as defined by the ``global_air_temp`` cube
-any linearly extrapolated values would quickly become dominant and highly inaccurate.
-We can control this behaviour by defining the ``extrapolation_mode`` in the constructor
-of the regridding scheme to mask values that lie outside of the domain of the rotated
-pole grid:
-
- >>> scheme = iris.analysis.Linear(extrapolation_mode='mask')
- >>> global_psl = rotated_psl.regrid(global_air_temp, scheme)
-
-.. plot:: userguide/regridding_plots/regridded_to_global.py
-
-Notice that although we can still see the approximate shape of the rotated pole grid, the
-cells have now become rectangular in a plate carrée (equirectangular) projection.
-The spatial grid of the resulting cube is really global, with a large proportion of the
-data being masked.
-
-Area-weighted regridding
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-It is often the case that a point-based regridding scheme (such as
-:class:`iris.analysis.Linear` or :class:`iris.analysis.Nearest`) is not
-appropriate when you need to conserve quantities when regridding. The
-:class:`iris.analysis.AreaWeighted` scheme is less general than
-:class:`~iris.analysis.Linear` or :class:`~iris.analysis.Nearest`, but is a
-conservative regridding scheme, meaning that the area-weighted total is
-approximately preserved across grids.
-
-With the :class:`~iris.analysis.AreaWeighted` regridding scheme, each target grid-box's
-data is computed as a weighted mean of all grid-boxes from the source grid. The weighting
-for any given target grid-box is the area of the intersection with each of the
-source grid-boxes. This scheme performs well when regridding from a high
-resolution source grid to a lower resolution target grid, since all source data
-points will be accounted for in the target grid.
-
-Let's demonstrate this with the global air temperature cube we saw previously,
-along with a limited area cube containing total concentration of volcanic ash:
-
- >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> print(global_air_temp.summary(shorten=True))
- air_temperature / (K) (latitude: 73; longitude: 96)
- >>>
- >>> regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt'))
- >>> regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM)
- >>> print(regional_ash.summary(shorten=True))
- VOLCANIC_ASH_AIR_CONCENTRATION / (g/m3) (latitude: 214; longitude: 584)
-
-One of the key limitations of the :class:`~iris.analysis.AreaWeighted`
-regridding scheme is that the two input grids must be defined in the same
-coordinate system as each other. Both input grids must also contain monotonic,
-bounded, 1D spatial coordinates.
-
-.. note::
-
- The :class:`~iris.analysis.AreaWeighted` regridding scheme requires spatial
- areas, therefore the longitude and latitude coordinates must be bounded.
- If the longitude and latitude bounds are not defined in the cube we can
- guess the bounds based on the coordinates' point values:
-
- >>> global_air_temp.coord('longitude').guess_bounds()
- >>> global_air_temp.coord('latitude').guess_bounds()
-
-Using NumPy's masked array module we can mask any data that falls below a meaningful
-concentration:
-
- >>> regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
-
-Finally, we can regrid the data using the :class:`~iris.analysis.AreaWeighted`
-regridding scheme:
-
- >>> scheme = iris.analysis.AreaWeighted(mdtol=0.5)
- >>> global_ash = regional_ash.regrid(global_air_temp, scheme)
- >>> print(global_ash.summary(shorten=True))
- VOLCANIC_ASH_AIR_CONCENTRATION / (g/m3) (latitude: 73; longitude: 96)
-
-Note that the :class:`~iris.analysis.AreaWeighted` regridding scheme allows us
-to define a missing data tolerance (``mdtol``), which specifies the tolerated
-fraction of masked data in any given target grid-box. If the fraction of masked
-data within a target grid-box exceeds this value, the data in this target
-grid-box will be masked in the result.
-
-The fraction of masked data is calculated based on the area of masked source
-grid-boxes that overlaps with each target grid-box. Defining an ``mdtol`` in the
-:class:`~iris.analysis.AreaWeighted` regridding scheme allows fine control
-of masked data tolerance. It is worth remembering that defining an ``mdtol`` of
-anything other than 1 will prevent the scheme from being fully conservative, as
-some data will be disregarded if it lies close to masked data.
-
-To visualise the above regrid, let's plot the original data, along with 3 distinct
-``mdtol`` values to compare the result:
-
-.. plot:: userguide/regridding_plots/regridded_to_global_area_weighted.py
-
-
-.. _caching_a_regridder:
-
-Caching a regridder
-^^^^^^^^^^^^^^^^^^^
-
-If you need to regrid multiple cubes with a common source grid onto a common
-target grid you can 'cache' a regridder to be used for each of these regrids.
-This can shorten the execution time of your code as the most computationally
-intensive part of a regrid is setting up the regridder.
-
-To cache a regridder you must set up a regridder scheme and call the
-scheme's regridder method. The regridder method takes as arguments:
-
- #. a cube (that is to be regridded) defining the source grid, and
- #. a cube defining the target grid to regrid the source cube to.
-
-For example:
-
- >>> global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc'))
- >>> regridder = iris.analysis.Nearest().regridder(global_air_temp, rotated_psl)
-
-When this cached regridder is called you must pass it a cube on the same grid
-as the source grid cube (in this case ``global_air_temp``) that is to be
-regridded to the target grid. For example::
-
- >>> for cube in list_of_cubes_on_source_grid:
- ... result = regridder(cube)
-
-In each case ``result`` will be the input cube regridded to the grid defined by
-the target grid cube (in this case ``rotated_psl``) that we used to define the
-cached regridder.
diff --git a/docs/iris/src/userguide/iris_cubes.rst b/docs/iris/src/userguide/iris_cubes.rst
deleted file mode 100644
index dc423afba1..0000000000
--- a/docs/iris/src/userguide/iris_cubes.rst
+++ /dev/null
@@ -1,196 +0,0 @@
-.. _user_guide_introduction:
-
-===================
-Introduction
-===================
-
-.. _iris_data_structures:
-
-Iris data structures
---------------------
-The top level object in Iris is called a cube. A cube contains data and metadata about a phenomenon.
-
-In Iris, a cube is an interpretation of the *Climate and Forecast (CF) Metadata Conventions* whose purpose is to:
-
- *require conforming datasets to contain sufficient metadata that they are self-describing... including physical
- units if appropriate, and that each value can be located in space (relative to earth-based coordinates) and time.*
-
-Whilst the CF conventions are often mentioned alongside NetCDF, Iris implements several major format importers which can take
-files of specific formats and turn them into Iris cubes. Additionally, a framework is provided which allows users
-to extend Iris' import capability to cater for specialist or unimplemented formats.
-
-A single cube describes one and only one phenomenon, always has a name, a unit and
-an n-dimensional data array to represents the cube's phenomenon. In order to locate the
-data spatially, temporally, or in any other higher-dimensional space, a collection of *coordinates*
-exist on the cube.
-
-
-Coordinates
-===========
-
-A coordinate is a container to store metadata about some dimension(s) of a cube's data array and therefore,
-by definition, its phenomenon.
-
- * Each coordinate has a name and a unit.
- * When a coordinate is added to a cube, the data dimensions that it represents are also provided.
- * The shape of a coordinate is always the same as the shape of the associated data dimension(s) on the cube.
- * A dimension not explicitly listed signifies that the coordinate is independent of that dimension.
- * Each dimension of a coordinate must be mapped to a data dimension. The only coordinates with no mapping are
- scalar coordinates.
-
- * Depending on the underlying data that the coordinate is representing, its values may be discrete points or be
- bounded to represent interval extents (e.g. temperature at *point x* **vs** rainfall accumulation *between 0000-1200 hours*).
- * Coordinates have an attributes dictionary which can hold arbitrary extra metadata, excluding certain restricted CF names
- * More complex coordinates may contain a coordinate system which is necessary to fully interpret the values
- contained within the coordinate.
-
-There are two classes of coordinates:
-
- **DimCoord**
-
- * Numeric
- * Monotonic
- * Representative of, at most, a single data dimension (1d)
-
- **AuxCoord**
-
- * May be of any type, including strings
- * May represent multiple data dimensions (n-dimensional)
-
-
-Cube
-====
-A cube consists of:
-
- * a standard name and/or a long name and an appropriate unit
- * a data array who's values are representative of the phenomenon
- * a collection of coordinates and associated data dimensions on the cube's data array, which are split into two separate lists:
-
- * *dimension coordinates* - DimCoords which uniquely map to exactly one data dimension, ordered by dimension.
- * *auxiliary coordinates* - DimCoords or AuxCoords which map to as many data dimensions as the coordinate has dimensions.
-
- * an attributes dictionary which, other than some protected CF names, can hold arbitrary extra metadata.
- * a list of cell methods to represent operations which have already been applied to the data (e.g. "mean over time")
- * a list of coordinate "factories" used for deriving coordinates from the values of other coordinates in the cube
-
-
-Cubes in practice
------------------
-
-
-A simple cube example
-=====================
-
-Suppose we have some gridded data which has 24 air temperature readings (in Kelvin) which is located at
-4 different longitudes, 2 different latitudes and 3 different heights. Our data array can be represented pictorially:
-
-.. image:: multi_array.png
-
-Where dimensions 0, 1, and 2 have lengths 3, 2 and 4 respectively.
-
-The Iris cube to represent this data would consist of:
-
- * a standard name of ``air_temperature`` and a unit of ``kelvin``
- * a data array of shape ``(3, 2, 4)``
- * a coordinate, mapping to dimension 0, consisting of:
-
- * a standard name of ``height`` and unit of ``meters``
- * an array of length 3 representing the 3 ``height`` points
-
- * a coordinate, mapping to dimension 1, consisting of:
-
- * a standard name of ``latitude`` and unit of ``degrees``
- * an array of length 2 representing the 2 latitude points
- * a coordinate system such that the ``latitude`` points could be fully located on the globe
-
- * a coordinate, mapping to dimension 2, consisting of:
-
- * a standard name of ``longitude`` and unit of ``degrees``
- * an array of length 4 representing the 4 longitude points
- * a coordinate system such that the ``longitude`` points could be fully located on the globe
-
-
-
-
-Pictorially the cube has taken on more information than a simple array:
-
-
-.. image:: multi_array_to_cube.png
-
-
-Additionally further information may be optionally attached to the cube.
-For example, it is possible to attach any of the following:
-
- * a coordinate, not mapping to any data dimensions, consisting of:
-
- * a standard name of ``time`` and unit of ``days since 2000-01-01 00:00``
- * a data array of length 1 representing the time that the data array is valid for
-
- * an auxiliary coordinate, mapping to dimensions 1 and 2, consisting of:
-
- * a long name of ``place name`` and no unit
- * a 2d string array of shape ``(2, 4)`` with the names of the 8 places that the lat/lons correspond to
-
- * an auxiliary coordinate "factory", which can derive its own mapping, consisting of:
-
- * a standard name of ``height`` and a unit of ``feet``
- * knowledge of how data values for this coordinate can be calculated given the ``height in meters`` coordinate
-
- * a cell method of "mean" over "ensemble" to indicate that the data has been meaned over
- a collection of "ensembles" (i.e. multiple model runs).
-
-
-Printing a cube
-===============
-
-Every Iris cube can be printed to screen as you will see later in the user guide. It is worth familiarising yourself with the
-output as this is the quickest way of inspecting the contents of a cube. Here is the result of printing a real life cube:
-
-.. _hybrid_cube_printout:
-
-.. testcode::
- :hide:
-
- import iris
- filename = iris.sample_data_path('uk_hires.pp')
- # NOTE: Every time the output of this cube changes, the full list of deductions below should be re-assessed.
- print(iris.load_cube(filename, 'air_potential_temperature'))
-
-.. testoutput::
-
- air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
- Dimension coordinates:
- time x - - -
- model_level_number - x - -
- grid_latitude - - x -
- grid_longitude - - - x
- Auxiliary coordinates:
- forecast_period x - - -
- level_height - x - -
- sigma - x - -
- surface_altitude - - x x
- Derived coordinates:
- altitude - x x x
- Scalar coordinates:
- forecast_reference_time: 2009-11-19 04:00:00
- Attributes:
- STASH: m01s00i004
- source: Data from Met Office Unified Model
- um_version: 7.3
-
-
-Using this output we can deduce that:
-
- * The cube represents air potential temperature.
- * There are 4 data dimensions, and the data has a shape of ``(3, 7, 204, 187)``
- * The 4 data dimensions are mapped to the ``time``, ``model_level_number``,
- ``grid_latitude``, ``grid_longitude`` coordinates respectively
- * There are three 1d auxiliary coordinates and one 2d auxiliary (``surface_altitude``)
- * There is a single ``altitude`` derived coordinate, which spans 3 data dimensions
- * There are 7 distinct values in the "model_level_number" coordinate. Similar inferences can
- be made for the other dimension coordinates.
- * There are 7, not necessarily distinct, values in the ``level_height`` coordinate.
- * There is a single ``forecast_reference_time`` scalar coordinate representing the entire cube.
- * The cube has one further attribute relating to the phenomenon.
- In this case the originating file format, PP, encodes information in a STASH code which in some cases can
- be useful for identifying advanced experiment information relating to the phenomenon.
diff --git a/docs/iris/src/userguide/loading_iris_cubes.rst b/docs/iris/src/userguide/loading_iris_cubes.rst
deleted file mode 100644
index 2cb3b9b259..0000000000
--- a/docs/iris/src/userguide/loading_iris_cubes.rst
+++ /dev/null
@@ -1,465 +0,0 @@
-.. _loading_iris_cubes:
-
-===================
-Loading Iris cubes
-===================
-
-To load a single file into a **list** of Iris cubes
-the :py:func:`iris.load` function is used::
-
- import iris
- filename = '/path/to/file'
- cubes = iris.load(filename)
-
-Iris will attempt to return **as few cubes as possible**
-by collecting together multiple fields with a shared standard name
-into a single multidimensional cube.
-
-The :py:func:`iris.load` function automatically recognises the format
-of the given files and attempts to produce Iris Cubes from their contents.
-
-.. note::
-
- Currently there is support for CF NetCDF, GRIB 1 & 2, PP and FieldsFiles
- file formats with a framework for this to be extended to custom formats.
-
-
-In order to find out what has been loaded, the result can be printed:
-
- >>> import iris
- >>> filename = iris.sample_data_path('uk_hires.pp')
- >>> cubes = iris.load(filename)
- >>> print(cubes)
- 0: air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
- 1: surface_altitude / (m) (grid_latitude: 204; grid_longitude: 187)
-
-
-This shows that there were 2 cubes as a result of loading the file, they were:
-``air_potential_temperature`` and ``surface_altitude``.
-
-The ``surface_altitude`` cube was 2 dimensional with:
- * the two dimensions have extents of 204 and 187 respectively and are
- represented by the ``grid_latitude`` and ``grid_longitude`` coordinates.
-
-The ``air_potential_temperature`` cubes were 4 dimensional with:
- * the same length ``grid_latitude`` and ``grid_longitude`` dimensions as
- ``surface_altitide``
- * a ``time`` dimension of length 3
- * a ``model_level_number`` dimension of length 7
-
-.. note::
-
- The result of :func:`iris.load` is **always** a
- :class:`list of cubes `.
- Anything that can be done with a Python :class:`list` can be done
- with the resultant list of cubes. It is worth noting, however, that
- there is no inherent order to this
- :class:`list of cubes `.
- Because of this, indexing may be inconsistent. A more consistent way to
- extract a cube is by using the :class:`iris.Constraint` class as
- described in :ref:`constrained-loading`.
-
-.. hint::
-
- Throughout this user guide you will see the function
- ``iris.sample_data_path`` being used to get the filename for the resources
- used in the examples. The result of this function is just a string.
-
- Using this function allows us to provide examples which will work
- across platforms and with data installed in different locations,
- however in practice you will want to use your own strings::
-
- filename = '/path/to/file'
- cubes = iris.load(filename)
-
-To get the air potential temperature cube from the list of cubes
-returned by :py:func:`iris.load` in the previous example,
-list indexing can be used:
-
- >>> import iris
- >>> filename = iris.sample_data_path('uk_hires.pp')
- >>> cubes = iris.load(filename)
- >>> # get the first cube (list indexing is 0 based)
- >>> air_potential_temperature = cubes[0]
- >>> print(air_potential_temperature)
- air_potential_temperature / (K) (time: 3; model_level_number: 7; grid_latitude: 204; grid_longitude: 187)
- Dimension coordinates:
- time x - - -
- model_level_number - x - -
- grid_latitude - - x -
- grid_longitude - - - x
- Auxiliary coordinates:
- forecast_period x - - -
- level_height - x - -
- sigma - x - -
- surface_altitude - - x x
- Derived coordinates:
- altitude - x x x
- Scalar coordinates:
- forecast_reference_time: 2009-11-19 04:00:00
- Attributes:
- STASH: m01s00i004
- source: Data from Met Office Unified Model
- um_version: 7.3
-
-Notice that the result of printing a **cube** is a little more verbose than
-it was when printing a **list of cubes**. In addition to the very short summary
-which is provided when printing a list of cubes, information is provided
-on the coordinates which constitute the cube in question.
-This was the output discussed at the end of the :doc:`iris_cubes` section.
-
-.. note::
-
- Dimensioned coordinates will have a dimension marker ``x`` in the
- appropriate column for each cube data dimension that they describe.
-
-
-Loading multiple files
------------------------
-
-To load more than one file into a list of cubes, a list of filenames can be
-provided to :py:func:`iris.load`::
-
- filenames = [iris.sample_data_path('uk_hires.pp'),
- iris.sample_data_path('air_temp.pp')]
- cubes = iris.load(filenames)
-
-
-It is also possible to load one or more files with wildcard substitution
-using the expansion rules defined :py:mod:`fnmatch`.
-
-For example, to match **zero or more characters** in the filename,
-star wildcards can be used::
-
- filename = iris.sample_data_path('GloSea4', '*.pp')
- cubes = iris.load(filename)
-
-
-.. note::
-
- The cubes returned will not necessarily be in the same order as the
- order of the filenames.
-
-Lazy loading
-------------
-
-In fact when Iris loads data from most file types, it normally only reads the
-essential descriptive information or metadata : the bulk of the actual data
-content will only be loaded later, as it is needed.
-This is referred to as 'lazy' data. It allows loading to be much quicker, and to occupy less memory.
-
-For more on the benefits, handling and uses of lazy data, see :doc:`Real and Lazy Data `.
-
-
-.. _constrained-loading:
-
-Constrained loading
------------------------
-Given a large dataset, it is possible to restrict or constrain the load
-to match specific Iris cube metadata.
-Constrained loading provides the ability to generate a cube
-from a specific subset of data that is of particular interest.
-
-As we have seen, loading the following file creates several Cubes::
-
- filename = iris.sample_data_path('uk_hires.pp')
- cubes = iris.load(filename)
-
-Specifying a name as a constraint argument to :py:func:`iris.load` will mean
-only cubes with a matching :meth:`name `
-will be returned::
-
- filename = iris.sample_data_path('uk_hires.pp')
- cubes = iris.load(filename, 'specific_humidity')
-
-To constrain the load to multiple distinct constraints, a list of constraints
-can be provided. This is equivalent to running load once for each constraint
-but is likely to be more efficient::
-
- filename = iris.sample_data_path('uk_hires.pp')
- cubes = iris.load(filename, ['air_potential_temperature', 'specific_humidity'])
-
-The :class:`iris.Constraint` class can be used to restrict coordinate values
-on load. For example, to constrain the load to match
-a specific ``model_level_number``::
-
- filename = iris.sample_data_path('uk_hires.pp')
- level_10 = iris.Constraint(model_level_number=10)
- cubes = iris.load(filename, level_10)
-
-Constraints can be combined using ``&`` to represent a more restrictive
-constraint to ``load``::
-
- filename = iris.sample_data_path('uk_hires.pp')
- forecast_6 = iris.Constraint(forecast_period=6)
- level_10 = iris.Constraint(model_level_number=10)
- cubes = iris.load(filename, forecast_6 & level_10)
-
-As well as being able to combine constraints using ``&``,
-the :class:`iris.Constraint` class can accept multiple arguments,
-and a list of values can be given to constrain a coordinate to one of
-a collection of values::
-
- filename = iris.sample_data_path('uk_hires.pp')
- level_10_or_16_fp_6 = iris.Constraint(model_level_number=[10, 16], forecast_period=6)
- cubes = iris.load(filename, level_10_or_16_fp_6)
-
-A common requirement is to limit the value of a coordinate to a specific range,
-this can be achieved by passing the constraint a function::
-
- def bottom_16_levels(cell):
- # return True or False as to whether the cell in question should be kept
- return cell <= 16
-
- filename = iris.sample_data_path('uk_hires.pp')
- level_lt_16 = iris.Constraint(model_level_number=bottom_16_levels)
- cubes = iris.load(filename, level_lt_16)
-
-.. note::
-
- As with many of the examples later in this documentation, the
- simple function above can be conveniently written as a lambda function
- on a single line::
-
- bottom_16_levels = lambda cell: cell <= 16
-
-
-Note also the :ref:`warning on equality constraints with floating point coordinates `.
-
-
-Cube attributes can also be part of the constraint criteria. Supposing a
-cube attribute of ``STASH`` existed, as is the case when loading ``PP`` files,
-then specific STASH codes can be filtered::
-
- filename = iris.sample_data_path('uk_hires.pp')
- level_10_with_stash = iris.AttributeConstraint(STASH='m01s00i004') & iris.Constraint(model_level_number=10)
- cubes = iris.load(filename, level_10_with_stash)
-
-.. seealso::
-
- For advanced usage there are further examples in the
- :class:`iris.Constraint` reference documentation.
-
-
-Constraining a circular coordinate across its boundary
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Occasionally you may need to constrain your cube with a region that crosses the
-boundary of a circular coordinate (this is often the meridian or the dateline /
-antimeridian). An example use-case of this is to extract the entire Pacific Ocean
-from a cube whose longitudes are bounded by the dateline.
-
-This functionality cannot be provided reliably using contraints. Instead you should use the
-functionality provided by :meth:`cube.intersection `
-to extract this region.
-
-
-.. _using-time-constraints:
-
-Constraining on Time
-^^^^^^^^^^^^^^^^^^^^
-Iris follows NetCDF-CF rules in representing time coordinate values as normalised,
-purely numeric, values which are normalised by the calendar specified in the coordinate's
-units (e.g. "days since 1970-01-01").
-However, when constraining by time we usually want to test calendar-related
-aspects such as hours of the day or months of the year, so Iris
-provides special features to facilitate this:
-
-Firstly, when Iris evaluates Constraint expressions, it will convert time-coordinate
-values (points and bounds) from numbers into :class:`~datetime.datetime`-like objects
-for ease of calendar-based testing.
-
- >>> filename = iris.sample_data_path('uk_hires.pp')
- >>> cube_all = iris.load_cube(filename, 'air_potential_temperature')
- >>> print('All times :\n' + str(cube_all.coord('time')))
- All times :
- DimCoord([2009-11-19 10:00:00, 2009-11-19 11:00:00, 2009-11-19 12:00:00], standard_name='time', calendar='gregorian')
- >>> # Define a function which accepts a datetime as its argument (this is simplified in later examples).
- >>> hour_11 = iris.Constraint(time=lambda cell: cell.point.hour == 11)
- >>> cube_11 = cube_all.extract(hour_11)
- >>> print('Selected times :\n' + str(cube_11.coord('time')))
- Selected times :
- DimCoord([2009-11-19 11:00:00], standard_name='time', calendar='gregorian')
-
-Secondly, the :class:`iris.time` module provides flexible time comparison
-facilities. An :class:`iris.time.PartialDateTime` object can be compared to
-objects such as :class:`datetime.datetime` instances, and this comparison will
-then test only those 'aspects' which the PartialDateTime instance defines:
-
- >>> import datetime
- >>> from iris.time import PartialDateTime
- >>> dt = datetime.datetime(2011, 3, 7)
- >>> print(dt > PartialDateTime(year=2010, month=6))
- True
- >>> print(dt > PartialDateTime(month=6))
- False
- >>>
-
-These two facilities can be combined to provide straightforward calendar-based
-time selections when loading or extracting data.
-
-The previous constraint example can now be written as:
-
- >>> the_11th_hour = iris.Constraint(time=iris.time.PartialDateTime(hour=11))
- >>> print(iris.load_cube(
- ... iris.sample_data_path('uk_hires.pp'),
- ... 'air_potential_temperature' & the_11th_hour).coord('time'))
- DimCoord([2009-11-19 11:00:00], standard_name='time', calendar='gregorian')
-
-It is common that a cube will need to be constrained between two given dates.
-In the following example we construct a time sequence representing the first
-day of every week for many years:
-
-.. testsetup:: timeseries_range
-
- import datetime
- import numpy as np
- from iris.time import PartialDateTime
- long_ts = iris.cube.Cube(np.arange(150), long_name='data', units='1')
- _mondays = iris.coords.DimCoord(7 * np.arange(150), standard_name='time', units='days since 2007-04-09')
- long_ts.add_dim_coord(_mondays, 0)
-
-
-.. doctest:: timeseries_range
- :options: +NORMALIZE_WHITESPACE, +ELLIPSIS
-
- >>> print(long_ts.coord('time'))
- DimCoord([2007-04-09 00:00:00, 2007-04-16 00:00:00, 2007-04-23 00:00:00,
- ...
- 2010-02-01 00:00:00, 2010-02-08 00:00:00, 2010-02-15 00:00:00],
- standard_name='time', calendar='gregorian')
-
-Given two dates in datetime format, we can select all points between them.
-
-.. doctest:: timeseries_range
- :options: +NORMALIZE_WHITESPACE, +ELLIPSIS
-
- >>> d1 = datetime.datetime.strptime('20070715T0000Z', '%Y%m%dT%H%MZ')
- >>> d2 = datetime.datetime.strptime('20070825T0000Z', '%Y%m%dT%H%MZ')
- >>> st_swithuns_daterange_07 = iris.Constraint(
- ... time=lambda cell: d1 <= cell.point < d2)
- >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07)
- >>> print(within_st_swithuns_07.coord('time'))
- DimCoord([2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00,
- 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00],
- standard_name='time', calendar='gregorian')
-
-Alternatively, we may rewrite this using :class:`iris.time.PartialDateTime`
-objects.
-
-.. doctest:: timeseries_range
- :options: +NORMALIZE_WHITESPACE, +ELLIPSIS
-
- >>> pdt1 = PartialDateTime(year=2007, month=7, day=15)
- >>> pdt2 = PartialDateTime(year=2007, month=8, day=25)
- >>> st_swithuns_daterange_07 = iris.Constraint(
- ... time=lambda cell: pdt1 <= cell.point < pdt2)
- >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07)
- >>> print(within_st_swithuns_07.coord('time'))
- DimCoord([2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00,
- 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00],
- standard_name='time', calendar='gregorian')
-
-A more complex example might require selecting points over an annually repeating
-date range. We can select points within a certain part of the year, in this case
-between the 15th of July through to the 25th of August. By making use of
-PartialDateTime this becomes simple:
-
-.. doctest:: timeseries_range
-
- >>> st_swithuns_daterange = iris.Constraint(
- ... time=lambda cell: PartialDateTime(month=7, day=15) <= cell < PartialDateTime(month=8, day=25))
- >>> within_st_swithuns = long_ts.extract(st_swithuns_daterange)
- ...
- >>> print(within_st_swithuns.coord('time'))
- DimCoord([2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00,
- 2007-08-06 00:00:00, 2007-08-13 00:00:00, 2007-08-20 00:00:00,
- 2008-07-21 00:00:00, 2008-07-28 00:00:00, 2008-08-04 00:00:00,
- 2008-08-11 00:00:00, 2008-08-18 00:00:00, 2009-07-20 00:00:00,
- 2009-07-27 00:00:00, 2009-08-03 00:00:00, 2009-08-10 00:00:00,
- 2009-08-17 00:00:00, 2009-08-24 00:00:00], standard_name='time', calendar='gregorian')
-
-Notice how the dates printed are between the range specified in the ``st_swithuns_daterange``
-and that they span multiple years.
-
-
-Strict loading
---------------
-
-The :py:func:`iris.load_cube` and :py:func:`iris.load_cubes` functions are
-similar to :py:func:`iris.load` except they can only return
-*one cube per constraint*.
-The :func:`iris.load_cube` function accepts a single constraint and
-returns a single cube. The :func:`iris.load_cubes` function accepts any
-number of constraints and returns a list of cubes (as an `iris.cube.CubeList`).
-Providing no constraints to :func:`iris.load_cube` or :func:`iris.load_cubes`
-is equivalent to requesting exactly one cube of any type.
-
-A single cube is loaded in the following example::
-
- >>> filename = iris.sample_data_path('air_temp.pp')
- >>> cube = iris.load_cube(filename)
- >>> print(cube)
- air_temperature / (K) (latitude: 73; longitude: 96)
- Dimension coordinates:
- latitude x -
- longitude - x
- ...
- Cell methods:
- mean: time
-
-However, when attempting to load data which would result in anything other than
-one cube, an exception is raised::
-
- >>> filename = iris.sample_data_path('uk_hires.pp')
- >>> cube = iris.load_cube(filename)
- Traceback (most recent call last):
- ...
- iris.exceptions.ConstraintMismatchError: Expected exactly one cube, found 2.
-
-.. note::
-
- All the load functions share many of the same features, hence
- multiple files could be loaded with wildcard filenames
- or by providing a list of filenames.
-
-The strict nature of :func:`iris.load_cube` and :func:`iris.load_cubes`
-means that, when combined with constrained loading, it is possible to
-ensure that precisely what was asked for on load is given
-- otherwise an exception is raised.
-This fact can be utilised to make code only run successfully if
-the data provided has the expected criteria.
-
-For example, suppose that code needed ``air_potential_temperature``
-in order to run::
-
- import iris
- filename = iris.sample_data_path('uk_hires.pp')
- air_pot_temp = iris.load_cube(filename, 'air_potential_temperature')
- print(air_pot_temp)
-
-Should the file not produce exactly one cube with a standard name of
-'air_potential_temperature', an exception will be raised.
-
-Similarly, supposing a routine needed both 'surface_altitude' and
-'air_potential_temperature' to be able to run::
-
- import iris
- filename = iris.sample_data_path('uk_hires.pp')
- altitude_cube, pot_temp_cube = iris.load_cubes(filename, ['surface_altitude', 'air_potential_temperature'])
-
-The result of :func:`iris.load_cubes` in this case will be a list of 2 cubes
-ordered by the constraints provided. Multiple assignment has been used to put
-these two cubes into separate variables.
-
-.. note::
-
- In Python, lists of a pre-known length and order can be exploited
- using *multiple assignment*:
-
- >>> number_one, number_two = [1, 2]
- >>> print(number_one)
- 1
- >>> print(number_two)
- 2
-
diff --git a/docs/iris/src/userguide/merge.png b/docs/iris/src/userguide/merge.png
deleted file mode 100644
index cafaa370da..0000000000
Binary files a/docs/iris/src/userguide/merge.png and /dev/null differ
diff --git a/docs/iris/src/userguide/merge.svg b/docs/iris/src/userguide/merge.svg
deleted file mode 100644
index 9326bc332b..0000000000
--- a/docs/iris/src/userguide/merge.svg
+++ /dev/null
@@ -1,714 +0,0 @@
-
-
-
-
diff --git a/docs/iris/src/userguide/merge_and_concat.png b/docs/iris/src/userguide/merge_and_concat.png
deleted file mode 100644
index 48238287b4..0000000000
Binary files a/docs/iris/src/userguide/merge_and_concat.png and /dev/null differ
diff --git a/docs/iris/src/userguide/multi_array.png b/docs/iris/src/userguide/multi_array.png
deleted file mode 100644
index 54a2688d2a..0000000000
Binary files a/docs/iris/src/userguide/multi_array.png and /dev/null differ
diff --git a/docs/iris/src/userguide/multi_array.svg b/docs/iris/src/userguide/multi_array.svg
deleted file mode 100644
index d28f6d71d6..0000000000
--- a/docs/iris/src/userguide/multi_array.svg
+++ /dev/null
@@ -1,455 +0,0 @@
-
-
-
-
diff --git a/docs/iris/src/userguide/multi_array_to_cube.png b/docs/iris/src/userguide/multi_array_to_cube.png
deleted file mode 100644
index 1144ee6715..0000000000
Binary files a/docs/iris/src/userguide/multi_array_to_cube.png and /dev/null differ
diff --git a/docs/iris/src/userguide/multi_array_to_cube.svg b/docs/iris/src/userguide/multi_array_to_cube.svg
deleted file mode 100644
index a2fc2f5e26..0000000000
--- a/docs/iris/src/userguide/multi_array_to_cube.svg
+++ /dev/null
@@ -1,1378 +0,0 @@
-
-
-
-
diff --git a/docs/iris/src/userguide/plotting_examples/1d_quickplot_simple.py b/docs/iris/src/userguide/plotting_examples/1d_quickplot_simple.py
deleted file mode 100644
index 75462101a0..0000000000
--- a/docs/iris/src/userguide/plotting_examples/1d_quickplot_simple.py
+++ /dev/null
@@ -1,18 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.quickplot as qplt
-
-
-fname = iris.sample_data_path('air_temp.pp')
-temperature = iris.load_cube(fname)
-
-# Take a 1d slice using array style indexing.
-temperature_1d = temperature[5, :]
-
-qplt.plot(temperature_1d)
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/1d_simple.py b/docs/iris/src/userguide/plotting_examples/1d_simple.py
deleted file mode 100644
index 8cb3f45643..0000000000
--- a/docs/iris/src/userguide/plotting_examples/1d_simple.py
+++ /dev/null
@@ -1,18 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-
-
-fname = iris.sample_data_path('air_temp.pp')
-temperature = iris.load_cube(fname)
-
-# Take a 1d slice using array style indexing.
-temperature_1d = temperature[5, :]
-
-iplt.plot(temperature_1d)
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/1d_with_legend.py b/docs/iris/src/userguide/plotting_examples/1d_with_legend.py
deleted file mode 100644
index b0aee43c4a..0000000000
--- a/docs/iris/src/userguide/plotting_examples/1d_with_legend.py
+++ /dev/null
@@ -1,47 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.plot as iplt
-
-
-fname = iris.sample_data_path('air_temp.pp')
-
-# Load exactly one cube from the given file
-temperature = iris.load_cube(fname)
-
-# We are only interested in a small number of longitudes (the 4 after and
-# including the 5th element), so index them out
-temperature = temperature[5:9, :]
-
-for cube in temperature.slices('longitude'):
-
- # Create a string label to identify this cube (i.e. latitude: value)
- cube_label = 'latitude: %s' % cube.coord('latitude').points[0]
-
- # Plot the cube, and associate it with a label
- iplt.plot(cube, label=cube_label)
-
-# Match the longitude range to global
-max_lon = temperature.coord('longitude').points.max()
-min_lon = temperature.coord('longitude').points.min()
-plt.xlim(min_lon, max_lon)
-
-# Add the legend with 2 columns
-plt.legend(ncol=2)
-
-# Put a grid on the plot
-plt.grid(True)
-
-# Provide some axis labels
-plt.ylabel('Temerature / kelvin')
-plt.xlabel('Longitude / degrees')
-
-# And a sensible title
-plt.suptitle('Air Temperature', fontsize=20, y=0.9)
-
-# Finally, show it.
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/brewer.py b/docs/iris/src/userguide/plotting_examples/brewer.py
deleted file mode 100644
index 2d61e276ab..0000000000
--- a/docs/iris/src/userguide/plotting_examples/brewer.py
+++ /dev/null
@@ -1,27 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-import iris.palette
-
-
-a = np.linspace(0, 1, 256).reshape(1, -1)
-a = np.vstack((a, a))
-
-maps = sorted(iris.palette.CMAP_BREWER)
-nmaps = len(maps)
-
-fig = plt.figure(figsize=(7, 10))
-fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)
-for i, m in enumerate(maps):
- ax = plt.subplot(nmaps, 1, i+1)
- plt.axis("off")
- plt.imshow(a, aspect='auto', cmap=plt.get_cmap(m), origin='lower')
- pos = list(ax.get_position().bounds)
- fig.text(pos[0] - 0.01, pos[1], m, fontsize=8,
- horizontalalignment='right')
-
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/cube_blockplot.py b/docs/iris/src/userguide/plotting_examples/cube_blockplot.py
deleted file mode 100644
index a6273a274c..0000000000
--- a/docs/iris/src/userguide/plotting_examples/cube_blockplot.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.quickplot as qplt
-
-
-# Load the data for a single value of model level number.
-fname = iris.sample_data_path('hybrid_height.nc')
-temperature_cube = iris.load_cube(
- fname, iris.Constraint(model_level_number=1))
-
-# Draw the block plot.
-qplt.pcolormesh(temperature_cube)
-
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/cube_brewer_cite_contourf.py b/docs/iris/src/userguide/plotting_examples/cube_brewer_cite_contourf.py
deleted file mode 100644
index 9ba68fc47a..0000000000
--- a/docs/iris/src/userguide/plotting_examples/cube_brewer_cite_contourf.py
+++ /dev/null
@@ -1,29 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.quickplot as qplt
-import iris.plot as iplt
-
-
-fname = iris.sample_data_path('air_temp.pp')
-temperature_cube = iris.load_cube(fname)
-
-# Get the Purples "Brewer" palette.
-brewer_cmap = plt.get_cmap('brewer_Purples_09')
-
-# Draw the contours, with n-levels set for the map colours (9).
-# NOTE: needed as the map is non-interpolated, but matplotlib does not provide
-# any special behaviour for these.
-qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap)
-
-# Add a citation to the plot.
-iplt.citation(iris.plot.BREWER_CITE)
-
-# Add coastlines to the map created by contourf.
-plt.gca().coastlines()
-
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/cube_brewer_contourf.py b/docs/iris/src/userguide/plotting_examples/cube_brewer_contourf.py
deleted file mode 100644
index aacf129c30..0000000000
--- a/docs/iris/src/userguide/plotting_examples/cube_brewer_contourf.py
+++ /dev/null
@@ -1,25 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.cm as mpl_cm
-import matplotlib.pyplot as plt
-
-import iris
-import iris.quickplot as qplt
-
-fname = iris.sample_data_path('air_temp.pp')
-temperature_cube = iris.load_cube(fname)
-
-# Load a Cynthia Brewer palette.
-brewer_cmap = mpl_cm.get_cmap('brewer_OrRd_09')
-
-# Draw the contours, with n-levels set for the map colours (9).
-# NOTE: needed as the map is non-interpolated, but matplotlib does not provide
-# any special behaviour for these.
-qplt.contourf(temperature_cube, brewer_cmap.N, cmap=brewer_cmap)
-
-# Add coastlines to the map created by contourf.
-plt.gca().coastlines()
-
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/cube_contour.py b/docs/iris/src/userguide/plotting_examples/cube_contour.py
deleted file mode 100644
index 0ec2fa7be4..0000000000
--- a/docs/iris/src/userguide/plotting_examples/cube_contour.py
+++ /dev/null
@@ -1,23 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.quickplot as qplt
-
-
-fname = iris.sample_data_path('air_temp.pp')
-temperature_cube = iris.load_cube(fname)
-
-# Add a contour, and put the result in a variable called contour.
-contour = qplt.contour(temperature_cube)
-
-# Add coastlines to the map created by contour.
-plt.gca().coastlines()
-
-# Add contour labels based on the contour we have just created.
-plt.clabel(contour, inline=False)
-
-plt.show()
diff --git a/docs/iris/src/userguide/plotting_examples/cube_contourf.py b/docs/iris/src/userguide/plotting_examples/cube_contourf.py
deleted file mode 100644
index 21ebb1a214..0000000000
--- a/docs/iris/src/userguide/plotting_examples/cube_contourf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import matplotlib.pyplot as plt
-
-import iris
-import iris.quickplot as qplt
-
-
-fname = iris.sample_data_path('air_temp.pp')
-temperature_cube = iris.load_cube(fname)
-
-# Draw the contour with 25 levels.
-qplt.contourf(temperature_cube, 25)
-
-# Add coastlines to the map created by contourf.
-plt.gca().coastlines()
-
-plt.show()
diff --git a/docs/iris/src/userguide/real_and_lazy_data.rst b/docs/iris/src/userguide/real_and_lazy_data.rst
deleted file mode 100644
index 84a35efa64..0000000000
--- a/docs/iris/src/userguide/real_and_lazy_data.rst
+++ /dev/null
@@ -1,235 +0,0 @@
-
-.. _real_and_lazy_data:
-
-
-.. testsetup:: *
-
- import dask.array as da
- import iris
- import numpy as np
-
-
-==================
-Real and Lazy Data
-==================
-
-We have seen in the :doc:`iris_cubes` section of the user guide that
-Iris cubes contain data and metadata about a phenomenon. The data element of a cube
-is always an array, but the array may be either "real" or "lazy".
-
-In this section of the user guide we will look specifically at the concepts of
-real and lazy data as they apply to the cube and other data structures in Iris.
-
-
-What is real and lazy data?
----------------------------
-
-In Iris, we use the term **real data** to describe data arrays that are loaded
-into memory. Real data is typically provided as a
-`NumPy array `_,
-which has a shape and data type that are used to describe the array's data points.
-Each data point takes up a small amount of memory, which means large NumPy arrays can
-take up a large amount of memory.
-
-Conversely, we use the term **lazy data** to describe data that is not loaded into memory.
-(This is sometimes also referred to as **deferred data**.)
-In Iris, lazy data is provided as a
-`dask array `_.
-A dask array also has a shape and data type
-but the dask array's data points remain on disk and only loaded into memory in
-small chunks when absolutely necessary. This has key performance benefits for
-handling large amounts of data, where both calculation time and storage
-requirements can be significantly reduced.
-
-In Iris, when actual data values are needed from a lazy data array, it is
-*'realised'* : this means that all the actual values are read in from the file,
-and a 'real'
-(i.e. `numpy `_)
-array replaces the lazy array within the Iris object.
-
-Following realisation, the Iris object just contains the actual ('real')
-data, so the time cost of reading all the data is not incurred again.
-From here on, access to the data is fast, but it now occupies its full memory space.
-
-In particular, any direct reference to a `cube.data` will realise the cube data
-content : any lazy content is lost as the data is read from file, and the cube
-content is replaced with a real array.
-This is also referred to simply as "touching" the data.
-
-See the section :ref:`when_real_data`
-for more examples of this.
-
-You can check whether a cube has real data or lazy data by using the method
-:meth:`~iris.cube.Cube.has_lazy_data`. For example::
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> cube.has_lazy_data()
- True
- # Realise the lazy data.
- >>> cube.data
- >>> cube.has_lazy_data()
- False
-
-
-Benefits
---------
-
-The primary advantage of using lazy data is that it enables
-`out-of-core processing `_;
-that is, the loading and manipulating of datasets without loading the full data into memory.
-
-There are two key benefits from this :
-
-**Firstly**, the result of a calculation on a large dataset often occupies much
-less storage space than the source data -- such as for instance a maximum data
-value calculated over a large number of datafiles.
-In these cases the result can be computed in sections, without ever requiring the
-entire source dataset to be loaded, thus drastically reducing memory footprint.
-This strategy of task division can also enable reduced execution time through the effective
-use of parallel processing capabilities.
-
-**Secondly**, it is often simply convenient to form a calculation on a large
-dataset, of which only a certain portion is required at any one time
--- for example, plotting individual timesteps from a large sequence.
-In such cases, a required portion can be extracted and realised without calculating the entire result.
-
-.. _when_real_data:
-
-When does my data become real?
-------------------------------
-
-Certain operations, such as cube indexing and statistics, can be
-performed in a lazy fashion, producing a 'lazy' result from a lazy input, so
-that no realisation immediately occurs.
-However other operations, such as plotting or printing data values, will always
-trigger the 'realisation' of data.
-
-When you load a dataset using Iris the data array will almost always initially be
-a lazy array. This section details some operations that will realise lazy data
-as well as some operations that will maintain lazy data. We use the term **realise**
-to mean converting lazy data into real data.
-
-Most operations on data arrays can be run equivalently on both real and lazy data.
-If the data array is real then the operation will be run on the data array
-immediately. The results of the operation will be available as soon as processing is completed.
-If the data array is lazy then the operation will be deferred and the data array will
-remain lazy until you request the result (such as when you read from ``cube.data``)::
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> cube.has_lazy_data()
- True
- >>> cube += 5
- >>> cube.has_lazy_data()
- True
-
-The process by which the operation is deferred until the result is requested is
-referred to as **lazy evaluation**.
-
-Certain operations, including regridding and plotting, can only be run on real data.
-Calling such operations on lazy data will automatically realise your lazy data.
-
-You can also realise (and so load into memory) your cube's lazy data if you 'touch' the data.
-To 'touch' the data means directly accessing the data by calling ``cube.data``,
-as in the previous example.
-
-Core data
-^^^^^^^^^
-
-Cubes have the concept of "core data". This returns the cube's data in its
-current state:
-
- * If a cube has lazy data, calling the cube's :meth:`~iris.cube.Cube.core_data` method
- will return the cube's lazy dask array. Calling the cube's
- :meth:`~iris.cube.Cube.core_data` method **will never realise** the cube's data.
- * If a cube has real data, calling the cube's :meth:`~iris.cube.Cube.core_data` method
- will return the cube's real NumPy array.
-
-For example::
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> cube.has_lazy_data()
- True
-
- >>> the_data = cube.core_data()
- >>> type(the_data)
-
- >>> cube.has_lazy_data()
- True
-
- # Realise the lazy data.
- >>> cube.data
- >>> the_data = cube.core_data()
- >>> type(the_data)
-
- >>> cube.has_lazy_data()
- False
-
-
-Coordinates
------------
-
-In the same way that Iris cubes contain a data array, Iris coordinates contain a
-points array and an optional bounds array.
-Coordinate points and bounds arrays can also be real or lazy:
-
- * A :class:`~iris.coords.DimCoord` will only ever have **real** points and bounds
- arrays because of monotonicity checks that realise lazy arrays.
- * An :class:`~iris.coords.AuxCoord` can have **real or lazy** points and bounds.
- * An :class:`~iris.aux_factory.AuxCoordFactory` (or derived coordinate)
- can have **real or lazy** points and bounds. If all of the
- :class:`~iris.coords.AuxCoord` instances used to construct the derived coordinate
- have real points and bounds then the derived coordinate will have real points
- and bounds, otherwise the derived coordinate will have lazy points and bounds.
-
-Iris cubes and coordinates have very similar interfaces, which extends to accessing
-coordinates' lazy points and bounds:
-
-.. doctest::
-
- >>> cube = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature')
-
- >>> dim_coord = cube.coord('model_level_number')
- >>> print(dim_coord.has_lazy_points())
- False
- >>> print(dim_coord.has_bounds())
- False
- >>> print(dim_coord.has_lazy_bounds())
- False
-
- >>> aux_coord = cube.coord('sigma')
- >>> print(aux_coord.has_lazy_points())
- True
- >>> print(aux_coord.has_bounds())
- True
- >>> print(aux_coord.has_lazy_bounds())
- True
-
- # Realise the lazy points. This will **not** realise the lazy bounds.
- >>> points = aux_coord.points
- >>> print(aux_coord.has_lazy_points())
- False
- >>> print(aux_coord.has_lazy_bounds())
- True
-
- >>> derived_coord = cube.coord('altitude')
- >>> print(derived_coord.has_lazy_points())
- True
- >>> print(derived_coord.has_bounds())
- True
- >>> print(derived_coord.has_lazy_bounds())
- True
-
-.. note::
- Printing a lazy :class:`~iris.coords.AuxCoord` will realise its points and bounds arrays!
-
-
-Dask processing options
------------------------
-
-Iris uses dask to provide lazy data arrays for both Iris cubes and coordinates,
-and for computing deferred operations on lazy arrays.
-
-Dask provides processing options to control how deferred operations on lazy arrays
-are computed. This is provided via the ``dask.set_options`` interface. See the
-`dask documentation `_
-for more information on setting dask processing options.
diff --git a/docs/iris/src/userguide/regridding_plots/interpolate_column.py b/docs/iris/src/userguide/regridding_plots/interpolate_column.py
deleted file mode 100644
index 201dcda4a1..0000000000
--- a/docs/iris/src/userguide/regridding_plots/interpolate_column.py
+++ /dev/null
@@ -1,63 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import iris
-import iris.quickplot as qplt
-import iris.analysis
-import matplotlib.pyplot as plt
-import numpy as np
-
-
-fname = iris.sample_data_path('hybrid_height.nc')
-column = iris.load_cube(fname, 'air_potential_temperature')[:, 0, 0]
-
-alt_coord = column.coord('altitude')
-
-# Interpolate the "perfect" linear interpolation. Really this is just
-# a high number of interpolation points, in this case 1000 of them.
-altitude_points = [('altitude', np.linspace(400, 1250, 1000))]
-scheme = iris.analysis.Linear(extrapolation_mode='mask')
-linear_column = column.interpolate(altitude_points, scheme)
-
-# Now interpolate the data onto 10 evenly spaced altitude levels,
-# as we did in the example.
-altitude_points = [('altitude', np.linspace(400, 1250, 10))]
-scheme = iris.analysis.Linear()
-new_column = column.interpolate(altitude_points, scheme)
-
-plt.figure(figsize=(5, 4), dpi=100)
-
-# Plot the black markers for the original data.
-qplt.plot(column, column.coord('altitude'),
- marker='o', color='black', linestyle='', markersize=3,
- label='Original values', zorder=2)
-
-# Plot the gray line to display the linear interpolation.
-qplt.plot(linear_column, linear_column.coord('altitude'),
- color='gray',
- label='Linear interpolation', zorder=0)
-
-# Plot the red markers for the new data.
-qplt.plot(new_column, new_column.coord('altitude'),
- marker='D', color='red', linestyle='',
- label='Interpolated values', zorder=1)
-
-ax = plt.gca()
-# Space the plot such that the labels appear correctly.
-plt.subplots_adjust(left=0.17, bottom=0.14)
-
-# Limit the plot to a maximum of 5 ticks.
-ax.xaxis.get_major_locator().set_params(nbins=5)
-
-# Prevent matplotlib from using "offset" notation on the xaxis.
-ax.xaxis.get_major_formatter().set_useOffset(False)
-
-# Put some space between the line and the axes.
-ax.margins(0.05)
-
-# Place gridlines and a legend.
-ax.grid()
-plt.legend(loc='lower right')
-
-plt.show()
diff --git a/docs/iris/src/userguide/regridding_plots/regridded_to_global.py b/docs/iris/src/userguide/regridding_plots/regridded_to_global.py
deleted file mode 100644
index c392de7a52..0000000000
--- a/docs/iris/src/userguide/regridding_plots/regridded_to_global.py
+++ /dev/null
@@ -1,26 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import iris
-import iris.analysis
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-
-
-global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
-rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc'))
-
-scheme = iris.analysis.Linear(extrapolation_mode='mask')
-global_psl = rotated_psl.regrid(global_air_temp, scheme)
-
-plt.figure(figsize=(4, 3))
-iplt.pcolormesh(global_psl)
-plt.title('Air pressure\n'
- 'on a global longitude latitude grid')
-ax = plt.gca()
-ax.coastlines()
-ax.gridlines()
-ax.set_extent([-90, 70, 10, 80])
-
-plt.show()
diff --git a/docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py b/docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py
deleted file mode 100644
index 067a4ee2d6..0000000000
--- a/docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py
+++ /dev/null
@@ -1,51 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import iris
-import iris.analysis
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-import matplotlib.colors
-import numpy as np
-
-global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
-
-regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt'))
-regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM)
-
-# Mask values so low that they are anomalous.
-regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
-
-norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
-
-global_air_temp.coord('longitude').guess_bounds()
-global_air_temp.coord('latitude').guess_bounds()
-
-fig = plt.figure(figsize=(8, 4.5))
-
-plt.subplot(2, 2, 1)
-iplt.pcolormesh(regional_ash, norm=norm)
-plt.title('Volcanic ash total\nconcentration not regridded',
- size='medium')
-
-for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
- plt.subplot(2, 2, subplot_num)
- scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
- global_ash = regional_ash.regrid(global_air_temp, scheme)
- iplt.pcolormesh(global_ash, norm=norm)
- plt.title('Volcanic ash total concentration\n'
- 'regridded with AreaWeighted(mdtol={})'.format(mdtol),
- size='medium')
-
-plt.subplots_adjust(hspace=0, wspace=0.05,
- left=0.001, right=0.999, bottom=0, top=0.955)
-
-# Iterate over each of the figure's axes, adding coastlines, gridlines
-# and setting the extent.
-for ax in fig.axes:
- ax.coastlines('50m')
- ax.gridlines()
- ax.set_extent([-80, 40, 31, 75])
-
-plt.show()
diff --git a/docs/iris/src/userguide/regridding_plots/regridded_to_rotated.py b/docs/iris/src/userguide/regridding_plots/regridded_to_rotated.py
deleted file mode 100644
index f2c2b271df..0000000000
--- a/docs/iris/src/userguide/regridding_plots/regridded_to_rotated.py
+++ /dev/null
@@ -1,25 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import iris
-import iris.analysis
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-
-global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
-rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc'))
-
-rotated_air_temp = global_air_temp.regrid(rotated_psl, iris.analysis.Linear())
-
-
-plt.figure(figsize=(4, 3))
-
-iplt.pcolormesh(rotated_air_temp, norm=plt.Normalize(260, 300))
-plt.title('Air temperature\n'
- 'on a limited area rotated pole grid')
-ax = plt.gca()
-ax.coastlines(resolution='50m')
-ax.gridlines()
-
-plt.show()
diff --git a/docs/iris/src/userguide/regridding_plots/regridding_plot.py b/docs/iris/src/userguide/regridding_plots/regridding_plot.py
deleted file mode 100644
index 2419d47727..0000000000
--- a/docs/iris/src/userguide/regridding_plots/regridding_plot.py
+++ /dev/null
@@ -1,33 +0,0 @@
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import iris
-import iris.plot as iplt
-import matplotlib.pyplot as plt
-
-# Load the data.
-global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
-rotated_psl = iris.load_cube(iris.sample_data_path('rotated_pole.nc'))
-
-plt.figure(figsize=(9, 3.5))
-
-plt.subplot(1, 2, 1)
-iplt.pcolormesh(global_air_temp, norm=plt.Normalize(260, 300))
-plt.title('Air temperature\n'
- 'on a global longitude latitude grid')
-ax = plt.gca()
-ax.coastlines()
-ax.gridlines()
-
-plt.subplot(1, 2, 2)
-iplt.pcolormesh(rotated_psl)
-plt.title('Air pressure\n'
- 'on a limited area rotated pole grid')
-ax = plt.gca()
-ax.coastlines(resolution='50m')
-ax.gridlines()
-
-plt.tight_layout()
-
-plt.show()
diff --git a/docs/iris/src/userguide/subsetting_a_cube.rst b/docs/iris/src/userguide/subsetting_a_cube.rst
deleted file mode 100644
index b61f16a043..0000000000
--- a/docs/iris/src/userguide/subsetting_a_cube.rst
+++ /dev/null
@@ -1,210 +0,0 @@
-.. _subsetting_a_cube:
-
-=================
-Subsetting a Cube
-=================
-
-The :doc:`loading_iris_cubes` section of the user guide showed how to load data into multidimensional Iris cubes.
-However it is often necessary to reduce the dimensionality of a cube down to something more appropriate and/or manageable.
-
-Iris provides several ways of reducing both the amount of data and/or the number of dimensions in your cube depending on the circumstance.
-In all cases **the subset of a valid cube is itself a valid cube**.
-
-
-Cube extraction
-^^^^^^^^^^^^^^^^
-A subset of a cube can be "extracted" from a multi-dimensional cube in order to reduce its dimensionality:
-
- >>> import iris
- >>> filename = iris.sample_data_path('space_weather.nc')
- >>> cube = iris.load_cube(filename, 'electron density')
- >>> equator_slice = cube.extract(iris.Constraint(grid_latitude=0))
- >>> print(equator_slice)
- electron density / (1E11 e/m^3) (height: 29; grid_longitude: 31)
- Dimension coordinates:
- height x -
- grid_longitude - x
- Auxiliary coordinates:
- latitude - x
- longitude - x
- Scalar coordinates:
- grid_latitude: 0.0 degrees
- Attributes:
- Conventions: CF-1.5
-
-
-In this example we start with a 3 dimensional cube, with dimensions of ``height``, ``grid_latitude`` and ``grid_longitude``,
-and extract every point where the latitude is 0, resulting in a 2d cube with axes of ``height`` and ``grid_longitude``.
-
-
-.. _floating-point-warning:
-.. warning::
-
- Caution is required when using equality constraints with floating point coordinates such as ``grid_latitude``.
- Printing the points of a coordinate does not necessarily show the full precision of the underlying number and it
- is very easy return no matches to a constraint when one was expected.
- This can be avoided by using a function as the argument to the constraint::
-
- def near_zero(cell):
- """Returns true if the cell is between -0.1 and 0.1."""
- return -0.1 < cell < 0.1
-
- equator_constraint = iris.Constraint(grid_latitude=near_zero)
-
- Often you will see this construct in shorthand using a lambda function definition::
-
- equator_constraint = iris.Constraint(grid_latitude=lambda cell: -0.1 < cell < 0.1)
-
-
-The extract method could be applied again to the *equator_slice* cube to get a further subset.
-
-For example to get a ``height`` of 9000 metres at the equator the following line extends the previous example::
-
- equator_height_9km_slice = equator_slice.extract(iris.Constraint(height=9000))
- print(equator_height_9km_slice)
-
-The two steps required to get ``height`` of 9000 m at the equator can be simplified into a single constraint::
-
- equator_height_9km_slice = cube.extract(iris.Constraint(grid_latitude=0, height=9000))
- print(equator_height_9km_slice)
-
-As we saw in :doc:`loading_iris_cubes` the result of :func:`iris.load` is a :class:`CubeList `.
-The ``extract`` method also exists on a :class:`CubeList ` and behaves in exactly the
-same way as loading with constraints:
-
- >>> import iris
- >>> air_temp_and_fp_6 = iris.Constraint('air_potential_temperature', forecast_period=6)
- >>> level_10 = iris.Constraint(model_level_number=10)
- >>> filename = iris.sample_data_path('uk_hires.pp')
- >>> cubes = iris.load(filename).extract(air_temp_and_fp_6 & level_10)
- >>> print(cubes)
- 0: air_potential_temperature / (K) (grid_latitude: 204; grid_longitude: 187)
- >>> print(cubes[0])
- air_potential_temperature / (K) (grid_latitude: 204; grid_longitude: 187)
- Dimension coordinates:
- grid_latitude x -
- grid_longitude - x
- Auxiliary coordinates:
- surface_altitude x x
- Derived coordinates:
- altitude x x
- Scalar coordinates:
- forecast_period: 6.0 hours
- forecast_reference_time: 2009-11-19 04:00:00
- level_height: 395.0 m, bound=(360.0, 433.3332) m
- model_level_number: 10
- sigma: 0.9549927, bound=(0.9589389, 0.95068014)
- time: 2009-11-19 10:00:00
- Attributes:
- STASH: m01s00i004
- source: Data from Met Office Unified Model
- um_version: 7.3
-
-
-Cube iteration
-^^^^^^^^^^^^^^^
-A useful way of dealing with a Cube in its **entirety** is by iterating over its layers or slices.
-For example, to deal with a 3 dimensional cube (z,y,x) you could iterate over all 2 dimensional slices in y and x
-which make up the full 3d cube.::
-
- import iris
- filename = iris.sample_data_path('hybrid_height.nc')
- cube = iris.load_cube(filename)
- print(cube)
- for yx_slice in cube.slices(['grid_latitude', 'grid_longitude']):
- print(repr(yx_slice))
-
-As the original cube had the shape (15, 100, 100) there were 15 latitude longitude slices and hence the
-line ``print(repr(yx_slice))`` was run 15 times.
-
-.. note::
-
- The order of latitude and longitude in the list is important; had they been swapped the resultant cube slices
- would have been transposed.
-
- For further information see :py:meth:`Cube.slices `.
-
-
-This method can handle n-dimensional slices by providing more or fewer coordinate names in the list to **slices**::
-
- import iris
- filename = iris.sample_data_path('hybrid_height.nc')
- cube = iris.load_cube(filename)
- print(cube)
- for i, x_slice in enumerate(cube.slices(['grid_longitude'])):
- print(i, repr(x_slice))
-
-The Python function :py:func:`enumerate` is used in this example to provide an incrementing variable **i** which is
-printed with the summary of each cube slice. Note that there were 1500 1d longitude cubes as a result of
-slicing the 3 dimensional cube (15, 100, 100) by longitude (i starts at 0 and 1500 = 15 * 100).
-
-.. hint::
- It is often useful to get a single 2d slice from a multidimensional cube in order to develop a 2d plot function, for example.
- This can be achieved by using the ``next()`` function on the result of
- slices::
-
- first_slice = next(cube.slices(['grid_latitude', 'grid_longitude']))
-
- Once the your code can handle a 2d slice, it is then an easy step to loop over **all** 2d slices within the bigger
- cube using the slices method.
-
-
-Cube indexing
-^^^^^^^^^^^^^
-In the same way that you would expect a numeric multidimensional array to be **indexed** to take a subset of your
-original array, you can **index** a Cube for the same purpose.
-
-
-Here are some examples of array indexing in :py:mod:`numpy`::
-
- import numpy as np
- # create an array of 12 consecutive integers starting from 0
- a = np.arange(12)
- print(a)
-
- print(a[0]) # first element of the array
-
- print(a[-1]) # last element of the array
-
- print(a[0:4]) # first four elements of the array (the same as a[:4])
-
- print(a[-4:]) # last four elements of the array
-
- print(a[::-1]) # gives all of the array, but backwards
-
- # Make a 2d array by reshaping a
- b = a.reshape(3, 4)
- print(b)
-
- print(b[0, 0]) # first element of the first and second dimensions
-
- print(b[0]) # first element of the first dimension (+ every other dimension)
-
- # get the second element of the first dimension and all of the second dimension
- # in reverse, by steps of two.
- print(b[1, ::-2])
-
-
-Similarly, Iris cubes have indexing capability::
-
- import iris
- filename = iris.sample_data_path('hybrid_height.nc')
- cube = iris.load_cube(filename)
-
- print(cube)
-
- # get the first element of the first dimension (+ every other dimension)
- print(cube[0])
-
- # get the last element of the first dimension (+ every other dimension)
- print(cube[-1])
-
- # get the first 4 elements of the first dimension (+ every other dimension)
- print(cube[0:4])
-
- # Get the first element of the first and third dimension (+ every other dimension)
- print(cube[0, :, 0])
-
- # Get the second element of the first dimension and all of the second dimension
- # in reverse, by steps of two.
- print(cube[1, ::-2])
diff --git a/docs/iris/src/whatsnew/1.10.rst b/docs/iris/src/whatsnew/1.10.rst
deleted file mode 100644
index 26f21c0252..0000000000
--- a/docs/iris/src/whatsnew/1.10.rst
+++ /dev/null
@@ -1,176 +0,0 @@
-What's New in Iris 1.10
-***********************
-
-:Release: 1.10
-:Date: 5th September 2016
-
-This document explains the new/changed features of Iris in version 1.10
-(:doc:`View all changes `.)
-
-Iris 1.10 Features
-==================
-.. _iris_grib_added:
-
-* Support has now been added for the
- `iris_grib `_ package, which
- provides GRIB format support in an optional package, separate from Iris.
-
- * If ``iris_grib`` is available, it will always be used in place of the older
- iris module :mod:`iris.fileformats.grib`.
-
- * The capabilities of ``iris_grib`` are essentially the same as the existing
- :mod:`iris.fileformats.grib` when used with ``iris.FUTURE.strict_grib_load=True``,
- with only small detail differences.
-
- * The old :mod:`iris.fileformats.grib` module is now deprecated and may shortly be
- removed.
-
- * If you are already using the recommended :data:`iris.FUTURE` setting
- ``iris.FUTURE.strict_grib_load=True`` this should not cause problems, as
- the new package is all-but identical.
-
- * However, the option ``iris.FUTURE.strict_grib_load`` is itself now
- deprecated, so you should remove code that sets it.
-
- * If, however, your code is still using the older "non-strict" grib
- loading, then you may need to make code changes.
-
- * In particular, the ``field`` object passed to load callbacks is
- different.
- See :class:`iris.fileformats.grib.message.GribMessage` (the
- ``iris_grib.message.GribMessage`` class is the same as this, for now).
-
- * Please exercise your code with the new iris_grib module, and let us know of
- any problems you uncover, such as files that will no longer load with the
- new implementation.
-
-* :meth:`iris.experimental.regrid.PointInCell.regridder` now works across coordinate systems, including non latlon systems. Additionally, the requirement that the source data X and Y coordinates be 2D has been removed. NB: some aspects of this change are backwards incompatible.
-* Plotting non-Gregorian calendars is now supported. This adds `nc_time_axis `_ as a dependency.
-* Promoting a scalar coordinate to a dimension coordinate with :func:`iris.util.new_axis` no longer loads deferred data.
-* The parsing functionality for Cell Methods from netCDF files is available as part of the :mod:`iris.fileformats.netcdf` module as :func:`iris.fileformats.netcdf.parse_cell_methods`.
-* Support for the NameIII Version 2 file format has been added.
-* Loading netcdf data in Mercator and Stereographic projections now accepts optional extra projection parameter attributes (``false_easting``, ``false_northing`` and ``scale_factor_at_projection_origin``), if they match the default values.
-
- * NetCDF files which define a Mercator projection where the ``false_easting``, ``false_northing`` and ``scale_factor_at_projection_origin`` match the defaults will have the projection loaded correctly. Otherwise, a warning will be issued for each parameter that does not match the default and the projection will not be loaded.
- * NetCDF files which define a Steroegraphic projection where the ``scale_factor_at_projection_origin`` is equal to 1.0 will have the projection loaded correctly. Otherwise, a warning will be issued and the projection will not be loaded.
-
-* The :mod:`iris.plot` routines :func:`~iris.plot.contour`, :func:`~iris.plot.contourf`, :func:`~iris.plot.outline`, :func:`~iris.plot.pcolor`, :func:`~iris.plot.pcolormesh` and :func:`~iris.plot.points` now support plotting cubes with anonymous dimensions by specifying the *numeric index* of the anonymous dimension within the ``coords`` keyword argument.
-
- Note that the axis of the anonymous dimension will be plotted in index space.
-
-* NetCDF loading and saving now supports Cubes that use the LambertConformal coordinate system.
-* The experimental structured Fieldsfile loader :func:`~iris.experimental.fieldsfile.load` has been extended to also load structured PP files.
-
- Structured loading is a streamlined operation, offering the benefit of a significantly faster loading alternative to the more generic :func:`iris.load` mechanism.
-
- Note that structured loading is not an optimised wholesale replacement of :func:`iris.load`. Structured loading is restricted to input containing contiguously ordered fields for each phenomenon that repeat regularly over the same vertical levels and times. For further details, see :func:`~iris.experimental.fieldsfile.load`
-
-* :mod:`iris.experimental.regrid_conservative` is now compatible with ESMPy v7.
-* Saving zonal (i.e. longitudinal) means to PP files now sets the '64s' bit in LBPROC.
-* Loading of 'little-endian' PP files is now supported.
-* All appropriate :mod:`iris.plot` functions now handle an ``axes`` keyword, allowing use of the object oriented matplotlib interface rather than pyplot.
-* The ability to pass file format object lists into the rules based load pipeline, as used for GRIB, Fields Files and PP has been added. The :func:`iris.fileformats.pp.load_pairs_from_fields` and :func:`iris.fileformats.grib.load_pairs_from_fields` are provided to produce cubes from such lists. These lists may have been filtered or altered using the appropriate :mod:`iris.fileformats` modules.
-* Cubes can now have an 'hour' coordinate added with :meth:`iris.coord_categorisation.add_hour`.
-* Time coordinates from PP fields with an lbcode of the form 3xx23 are now correctly encoded with a 360-day calendar.
-* The loading from and saving to netCDF of CF cell_measure variables is supported, along with their representation within a Cube as :attr:`~iris.cube.Cube.cell_measures`.
-* Cubes with anonymous dimensions can now be concatenated. This can only occur along a dimension that is not anonymous.
-* NetCDF saving of ``valid_range``, ``valid_min`` and ``valid_max`` cube attributes is now allowed.
-
-Bugs Fixed
-==========
-* Altered Cell Methods to display coordinate's standard_name rather than var_name where appropriate to avoid human confusion.
-* Saving multiple cubes with netCDF4 protected attributes should now work as expected.
-* Concatenating cubes with singleton dimensions (dimensions of size one) now works properly.
-* Fixed the ``grid_mapping_name`` and ``secant_latitudes`` handling for the LambertConformal coordinate system.
-* Fixed bug in :func:`iris.analysis.cartography.project` where the output projection coordinates didn't have units.
-* Attempting to use :meth:`iris.sample_data_path` to access a file that isn't actually Iris sample data now raises a more descriptive error. A note about the appropriate use of `sample_data_path` has also been added to the documentation.
-* Fixed a bug where regridding or interpolation with the :class:`~iris.analysis.Nearest` scheme returned floating-point results even when the source data was integer typed. It now always returns the same type as the source data.
-* Fixed a bug where regridding circular data would ignore any source masking. This affected any regridding using the :class:`~iris.analysis.Linear` and :class:`~iris.analysis.Nearest` schemes, and also :func:`iris.analysis.interpolate.linear`.
-* The ``coord_name`` parameter to :func:`~iris.fileformats.rules.scalar_cell_method` is now checked correctly.
-* LBPROC is set correctly when a cube containing the minimum of a variable is saved to a PP file. The IA component of LBTIM is set correctly when saving maximum or minimum values.
-* The performance of :meth:`iris.cube.Cube.extract` when a list of values is given to an instance of :class:`iris.Constraint` has been improved considerably.
-* Fixed a bug with :meth:`iris.cube.Cube.data` where an :class:`numpy.ndarray` was not being returned for scalar cubes with lazy data.
-* When saving in netcdf format, the units of 'latitude' and 'longitude' coordinates specified in 'degrees' are saved as 'degrees_north' and 'degrees_east' respectively, as defined in the CF conventions for netCDF files: sections 4.1 and 4.2.
-* Fixed a bug with a class of pp files with lbyr == 0, where the date would cause errors when converting to a datetime object (e.g. when printing a cube).
-
- When processing a pp field with lbtim = 2x, lbyr == lbyrd == 0 and lbmon == lbmond, 'month' and 'month_number' coordinates are created instead of 'time'.
-
-* Fixed a bug in :meth:`~iris.analysis.calculus.curl` where the sign of the r-component for spherical coordinates was opposite to what was expected.
-* A bug that prevented cube printing in some cases has been fixed.
-* Fixed a bug where a deepcopy of a :class:`~iris.coords.DimCoord` would have writable ``points`` and ``bounds`` arrays. These arrays can now no longer be modified in-place.
-* Concatenation no longer occurs when the auxiliary coordinates of the cubes do not match. This check is not applied to AuxCoords that span the dimension the concatenation is occuring along. This behaviour can be switched off by setting the ``check_aux_coords`` kwarg in :meth:`iris.cube.CubeList.concatenate` to False.
-* Fixed a bug in :meth:`iris.cube.Cube.subset` where an exception would be thrown while trying to subset over a non-dimensional scalar coordinate.
-
-Incompatible Changes
-====================
-* The source and target for :meth:`iris.experimental.regrid.PointInCell.regridder` must now have defined coordinate systems (i.e. not ``None``). Additionally, the source data X and Y coordinates must have the same cube dimensions.
-
-Deprecations
-============
-* Deprecated the :class:`iris.Future` option
- ``iris.FUTURE.strict_grib_load``.
- This only affected the module :mod:`iris.fileformats.grib`, which is itself
- now deprecated.
- Please see :ref:`iris_grib package `, above.
-* Deprecated the module :mod:`iris.fileformats.grib`. The new package
- `iris_grib `_ replaces this
- fuctionality, which will shortly be removed.
- Please see :ref:`iris_grib package `, above.
-* The use of :data:`iris.config.SAMPLE_DATA_DIR` has been deprecated and replaced by the now importable `iris_sample_data `_ package.
-
-* Deprecated the module :mod:`iris.analysis.interpolate`.
- This contains the following public items, all of which are now deprecated and
- will be removed in a future release:
-
- * :func:`~iris.analysis.interpolate.linear`
- * :func:`~iris.analysis.interpolate.regrid`
- * :func:`~iris.analysis.interpolate.regrid_to_max_resolution`
- * :func:`~iris.analysis.interpolate.nearest_neighbour_indices`
- * :func:`~iris.analysis.interpolate.nearest_neighbour_data_value`
- * :func:`~iris.analysis.interpolate.extract_nearest_neighbour`
- * class :class:`~iris.analysis.interpolate.Linear1dExtrapolator`.
-
- Please use the replacement facilities individually noted in the module
- documentation for :mod:`iris.analysis.interpolate`
-* The method :meth:`iris.cube.Cube.regridded` has been deprecated.
- Please use :meth:`iris.cube.Cube.regrid` instead (see
- :meth:`~iris.cube.Cube.regridded` for details).
-* Deprecated :data:`iris.fileformats.grib.hindcast_workaround` and :class:`iris.fileformats.grib.GribWrapper`. The class :class:`iris.fileformats.grib.message.GribMessage` provides alternative means of working with GRIB message instances.
-* Deprecated the module :mod:`iris.fileformats.ff`. Please use the replacement
- facilities in module :mod:`iris.fileformats.um` :
-
- * :func:`iris.fileformats.um.um_to_pp` replaces :class:`iris.fileformats.ff.FF2PP`.
- * :func:`iris.fileformats.um.load_cubes` replaces :func:`iris.fileformats.ff.load_cubes`.
- * :func:`iris.fileformats.um.load_cubes_32bit_ieee` replaces :func:`iris.fileformats.ff.load_cubes_32bit_ieee`.
-
- All other public components are generally deprecated and will be removed in a future release.
-
-* The :func:`iris.fileformats.pp.as_pairs` and :func:`iris.fileformats.grib.as_pairs` are deprecated. These are replaced with :func:`iris.fileformats.pp.save_pairs_from_cube` and :func:`iris.fileformats.grib.save_pairs_from_cube`.
-* ``iris.fileformats.pp_packing`` has been deprecated. Please install the separate `mo_pack `_ package instead. This provides the same functionality.
-* Deprecated logging functions (currently used only for rules logging):
- :data:`iris.config.iris.config.RULE_LOG_DIR`,
- :data:`iris.config.iris.config.RULE_LOG_IGNORE` and
- :data:`iris.fileformats.rules.log`.
-
-* Deprecated all the remaining text rules mechanisms:
- :class:`iris.fileformats.rules.DebugString`,
- :class:`iris.fileformats.rules.CMAttribute`,
- :class:`iris.fileformats.rules.CMCustomAttribute`,
- :class:`iris.fileformats.rules.CoordAndDims`,
- :class:`iris.fileformats.rules.Rule`,
- :class:`iris.fileformats.rules.FunctionRule`,
- :class:`iris.fileformats.rules.ProcedureRule`,
- :class:`iris.fileformats.rules.RulesContainer` and
- :func:`iris.fileformats.rules.calculate_forecast_period`.
-
-* Deprecated the custom pp save rules mechanism implemented by the functions :func:`iris.fileformats.pp.add_save_rules` and :func:`iris.fileformats.pp.reset_save_rules`. The functions :func:`iris.fileformats.pp.as_fields`, :func:`iris.fileformats.pp.as_pairs` and :func:`iris.fileformats.pp.save_fields` provide alternative means of achieving the same ends.
-
-Documentation Changes
-=====================
-* It is now clear that repeated values will form a group under :meth:`iris.cube.Cube.aggregated_by` even if they aren't consecutive. Hence, the documentation for :mod:`iris.cube` has been changed to reflect this.
-* The documentation for :meth:`iris.analysis.calculus.curl` has been updated for clarity.
-* False claims about :meth:`iris.fileformats.pp.save`, :meth:`iris.fileformats.pp.as_pairs`, and :meth:`iris.fileformats.pp.as_fields` being able to take instances of :class:`iris.cube.CubeList` as inputs have been removed.
-* A :doc:`new code example <../examples/Meteorology/wind_speed>`, demonstrating the use of a quiver plot to display wind speeds over Lake Victoria, has been added.
-* The docstring for :data:`iris.analysis.SUM` has been updated to explicitly state that weights passed to it aren't normalised internally.
-* A note regarding the impossibility of partially collapsing multi-dimensional coordinates has been added to the user guide.
-
diff --git a/docs/iris/src/whatsnew/1.11.rst b/docs/iris/src/whatsnew/1.11.rst
deleted file mode 100644
index eb93ec2f8c..0000000000
--- a/docs/iris/src/whatsnew/1.11.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-What's New in Iris 1.11
-***********************
-
-:Release: 1.11
-:Date: 2016-11-28
-
-This document explains the new/changed features of Iris in version 1.11
-(:doc:`View all changes `.)
-
-Iris 1.11 Features
-==================
-* If available, display the ``STASH`` code instead of ``unknown / (unknown)`` when printing cubes
- with no ``standard_name`` and no ``units``.
-* Support for saving to netCDF with data packing has been added.
-* The coordinate system :class:`iris.coord_systems.LambertAzimuthalEqualArea` has been added with NetCDF saving support.
-
-Bugs Fixed
-==========
-* Fixed a floating point tolerance bug in :func:`iris.experimental.regrid.regrid_area_weighted_rectilinear_src_and_grid`
- for wrapped longitudes.
-* Allow :func:`iris.util.new_axis` to promote the nominated scalar coordinate of a cube
- with a scalar masked constant data payload.
-* Fixed a bug where :func:`iris.util._is_circular` would erroneously return false
- when coordinate values are decreasing.
-* When saving to NetCDF, the existing behaviour of writing string attributes as ASCII has been
- maintained across known versions of netCDF4-python.
-
-Documentation Changes
-=====================
-* Fuller doc-string detail added to :func:`iris.analysis.cartography.unrotate_pole` and
- :func:`iris.analysis.cartography.rotate_pole`.
diff --git a/docs/iris/src/whatsnew/1.13.rst b/docs/iris/src/whatsnew/1.13.rst
deleted file mode 100644
index 532c160f13..0000000000
--- a/docs/iris/src/whatsnew/1.13.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-What's New in Iris 1.13
-***********************
-
-:Release: 1.13
-:Date: 2017-05-17
-
-
-This document explains the new/changed features of Iris in version 1.13
-(:doc:`View all changes `.)
-
-Iris 1.13 Features
-==================
-
-* Allow the reading of NAME trajectories stored by time instead of by particle number.
-* An experimental link to python-stratify via :mod:`iris.experimental.stratify`.
-* Data arrays may be shared between cubes, and subsets of cubes, by using the :meth:`iris.cube.share_data` flag.
-
-
-Bug Fixes
-=========
-
-* The bounds are now set correctly on the longitude coordinate if a zonal mean diagnostic has been loaded from a PP file as per the CF Standard.
-* NetCDF loading will now determine whether there is a string-valued scalar label, i.e. a character variable that only has one dimension (the length of the string), and interpret this correctly.
-* A line plot of geographic coordinates (e.g. drawing a trajectory) wraps around the edge of the map cleanly, rather than plotting a segment straight across the map.
-* When saving to PP, lazy data is preserved when generating PP fields from cubes so that a list of cubes can be saved to PP without excessive memory requirements.
-* An error is now correctly raised if a user tries to perform an arithmetic operation on two cubes with mismatching coordinates. Previously these cases were caught by the add and subtract operators, and now it is also caught by the multiply and divide operators.
-* Limited area Rotated Pole datasets where the data range is ``0 <= lambda < 360``, for example as produced in New Zealand, are plotted over a sensible map extent by default.
-* Removed the potential for a RuntimeWarning: overflow encountered in ``int_scalars`` which was missed during collapsed calculations. This could trip up unwary users of limited data types, such as int32 for very large numbers (e.g. seconds since 1970).
-* The CF conventions state that certain ``formula_terms`` terms may be omitted and assumed to be zero (http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#dimensionless-v-coord) so Iris now allows factories to be constructed with missing terms.
-* In the User Guide's contour plot example, clabel inline is set to be False so that it renders correctly, avoiding spurious horizontal lines across plots, although this does make labels a little harder to see.
-* The computation of area weights has been changed to a more numerically stable form. The previous form converted latitude to colatitude and used difference of cosines in the cell area computation. This formulation uses latitude and difference of sines. The conversion from latitude to colatitude at lower precision causes errors when computing the cell areas.
-
-Testing
-=======
-
-* Iris has adopted conda-forge to provide environments for continuous integration testing.
-
diff --git a/docs/iris/src/whatsnew/1.4.rst b/docs/iris/src/whatsnew/1.4.rst
deleted file mode 100644
index 053a6e1096..0000000000
--- a/docs/iris/src/whatsnew/1.4.rst
+++ /dev/null
@@ -1,205 +0,0 @@
-What's new in Iris 1.4
-**********************
-
-:Release: 1.4.0
-:Date: 14 June 2013
-
-This document explains the new/changed features of Iris in version 1.4.
-(:doc:`View all changes `.)
-
-Iris 1.4 features
-=================
-
-A summary of the main features added with version 1.4:
-
-* Multiple cubes can now be exported to a NetCDF file.
-* Correct nearest-neighbour calculation with circular coords.
-* :ref:`Experimental regridding enhancements`.
-* :ref:`Iris-Pandas interoperability`.
-* NIMROD level type 12 (levels below ground) can now be loaded.
-* :ref:`Load cubes from the internet via OPeNDAP`.
-* :ref:`GeoTiff export (experimental)`.
-* :ref:`Cube merge update`.
-* :ref:`Unambiguous season year naming`.
-* NIMROD files with multiple fields and period of interest can now be loaded.
-* Missing values are now handled when loading GRIB messages.
-* PP export rule to calculate forecast period.
-* :func:`~iris.cube.Cube.aggregated_by` now maintains array masking.
-* IEEE 32bit fieldsfiles can now be loaded.
-* NetCDF transverse mercator and climatology data can now be loaded.
-* Polar stereographic GRIB data can now be loaded.
-* :ref:`Cubes with no vertical coord can now be exported to GRIB`.
-* :ref:`Simplified resource configuration`.
-* :ref:`Extended GRIB parameter translation`.
-* Added an optimisation for single-valued coordinate constraints.
-* :ref:`One dimensional linear interpolation fix`.
-* :ref:`Fix for iris.analysis.calculus.differentiate`.
-* Fixed pickling of cubes with 2D aux coords from NetCDF.
-* Fixed bug which ignored the "coords" keyword for certain plots.
-* Use the latest release of Cartopy, v0.8.0.
-
-
-Incompatible changes
---------------------
-* As part of simplifying the mechanism for accessing test data,
- :func:`iris.io.select_data_path`, :data:`iris.config.DATA_REPOSITORY`,
- :data:`iris.config.MASTER_DATA_REPOSITORY` and
- :data:`iris.config.RESOURCE_DIR` have been removed.
-
-Deprecations
-------------
-* The *add_custom_season_** functions from :mod:`~iris.coord_categorisation` have been deprecated in favour of adding their functionality to the *add_season_** functions
-
-
-.. _OPeNDAP: http://www.opendap.org/about
-
-
-.. _exp-regrid:
-
-Experimental regridding enhancements
-====================================
-
-Bilinear, area-weighted and area-conservative regridding functions are now available in
-:mod:`iris.experimental`. These functions support masked data and handle
-derived coordinates such as hybrid height. The final API is still in development.
-In the meantime:
-
-Bilinear rectilinear regridding
--------------------------------
-:func:`~iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid`
-can be used to regrid a cube onto a horizontal grid defined in a different coordinate system.
-The data values are calculated using bilinear interpolation.
-
-For example::
-
- from iris.experimental.regrid import regrid_bilinear_rectilinear_src_and_grid
- regridded_cube = regrid_bilinear_rectilinear_src_and_grid(source_cube, target_grid_cube)
-
-Area-weighted regridding
-------------------------
-:func:`~iris.experimental.regrid.regrid_area_weighted_rectilinear_src_and_grid` can be used to regrid a cube
-such that the data values of the resulting cube are calculated using the
-area-weighted mean.
-
-For example::
-
- from iris.experimental.regrid import regrid_area_weighted_rectilinear_src_and_grid as regrid_area_weighted
- regridded_cube = regrid_area_weighted(source_cube, target_grid_cube)
-
-Area-conservative regridding
-----------------------------
-:func:`~iris.experimental.regrid_conservative.regrid_conservative_via_esmpy`
-can be used for area-conservative regridding between geographical coordinate systems.
-This uses the ESMF library functions, via the ESMPy interface.
-
-For example::
-
- from iris.experimental.regrid_conservative import regrid_conservative_via_esmpy
- regridded_cube = regrid_conservative_via_esmpy(source_cube, target_grid_cube)
-
-
-.. _iris-pandas:
-
-Iris-Pandas interoperablilty
-============================
-Conversion to and from Pandas Series_ and DataFrames_ is now available.
-See :mod:`iris.pandas` for more details.
-
-.. _Series: http://pandas.pydata.org/pandas-docs/stable/api.html#series
-.. _DataFrames: http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe
-
-
-.. _load-opendap:
-
-Load cubes from the internet via OPeNDAP
-========================================
-Cubes can now be loaded directly from the internet, via OPeNDAP_.
-
-For example::
-
- cubes = iris.load("http://geoport.whoi.edu/thredds/dodsC/bathy/gom15")
-
-
-.. _geotiff_export:
-
-GeoTiff export
-==============
-With this experimental feature, two dimensional cubes can now be exported to GeoTiff files.
-
-For example::
-
- from iris.experimental.raster import export_geotiff
- export_geotiff(cube, filename)
-
-.. note::
-
- This is a raw data export only and does not save Iris plots.
-
-
-.. _cube-merge-update:
-
-Cube merge update
-=================
-Cube merging now favours numerical coordinates over string coordinates
-to describe a dimension, and :class:`~iris.coords.DimCoord` over
-:class:`~iris.coords.AuxCoord`. These modifications prevent the error:
-*"No functional relationship between separable and inseparable candidate dimensions"*.
-
-
-.. _season-year-name:
-
-Unambiguous season year naming
-==============================
-The default names of categorisation coordinates are now less ambiguous.
-For example, :func:`~iris.coord_categorisation.add_month_number` and
-:func:`~iris.coord_categorisation.add_month_fullname` now create
-"month_number" and "month_fullname" coordinates.
-
-
-.. _grib-novert:
-
-Cubes with no vertical coord can now be exported to GRIB
-========================================================
-Iris can now export cubes with no vertical coord to GRIB.
-The solution is still under discussion: See https://github.com/SciTools/iris/issues/519.
-
-
-.. _simple_cfg:
-
-Simplified resource configuration
-=================================
-A new configuration variable called :data:`iris.config.TEST_DATA_DIR`
-has been added, replacing the previous combination of
-:data:`iris.config.MASTER_DATA_REPOSITORY` and
-:data:`iris.config.DATA_REPOSITORY`. This constant should be the path
-to a directory containing the test data required by the unit tests. It can
-be set by adding a ``test_data_dir`` entry to the ``Resources`` section of
-``site.cfg``. See :mod:`iris.config` for more details.
-
-
-.. _grib_params:
-
-Extended GRIB parameter translation
-===================================
- - More GRIB2 params are recognised on input.
- - Now translates some codes on GRIB2 output.
- - Some GRIB2 params may load with a different standard_name.
-
-
-
-.. _one-d-linear:
-
-One dimensional linear interpolation fix
-========================================
-:func:`~iris.analysis.interpolate.linear` can now extrapolate from a single point
-assuming a gradient of zero. This prevents an issue when loading cross sections
-with a hybrid height coordinate, on a staggered grid and only a single orography field.
-
-
-.. _calc-diff-fix:
-
-Fix for iris.analysis.calculus.differentiate
-=============================================
-A bug in :func:`~iris.analysis.calculus.differentiate` that had the potential to cause
-the loss of coordinate metadata when calculating the curl or the derivative of a cube has been fixed.
-
diff --git a/docs/iris/src/whatsnew/1.7.rst b/docs/iris/src/whatsnew/1.7.rst
deleted file mode 100644
index 2f3a52fbb9..0000000000
--- a/docs/iris/src/whatsnew/1.7.rst
+++ /dev/null
@@ -1,248 +0,0 @@
-What's new in Iris 1.7
-**********************
-
-This document explains the new/changed features of Iris in version 1.7.
-(:doc:`View all changes `.)
-
-:Release: 1.7.4
-:Date: 15th April 2015
-
-Iris 1.7 features
-=================
-
-.. _showcase:
-
-.. admonition:: Showcase: Iris is making use of Biggus
-
- Iris is now making extensive use of `Biggus `_
- for virtual arrays and lazy array evaluation. In practice this means that analyses
- of cubes with data bigger than the available system memory are now possible.
-
- Other than the improved functionality the changes are mostly
- transparent; for example, before the introduction of biggus, MemoryErrors
- were likely for very large datasets::
-
- >>> result = extremely_large_cube.collapsed('time', iris.analyis.MEAN)
- MemoryError
-
- Now, for supported operations, the evaluation is lazy (i.e. it doesn't take
- place until the actual data is subsequently requested) and can handle data
- larger than available system memory::
-
- >>> result = extremely_large_cube.collapsed('time', iris.analyis.MEAN)
- >>> print(type(result))
-
-
- Memory is still a limiting factor if ever the data is desired as a NumPy array
- (e.g. via :data:`cube.data `), but additional methods have
- been added to the Cube to support querying and subsequently accessing the "lazy"
- data form (see :meth:`~iris.cube.Cube.has_lazy_data` and
- :meth:`~iris.cube.Cube.lazy_data`).
-
-.. admonition:: Showcase: New interpolation and regridding API
-
- New interpolation and regridding interfaces have been added which simplify and
- extend the existing functionality.
-
- The interfaces are exposed on the cube in the form of the
- :meth:`~iris.cube.Cube.interpolate` and :meth:`~iris.cube.Cube.regrid` methods.
- Conceptually the signatures of the methods are::
-
- interpolated_cube = cube.interpolate(interpolation_points, interpolation_scheme)
-
- and::
-
- regridded_cube = cube.regrid(target_grid_cube, regridding_scheme)
-
- Whilst not all schemes have been migrated to the new interface,
- :class:`iris.analysis.Linear` defines both linear interpolation and regridding,
- and :class:`iris.analysis.AreaWeighted` defines an area weighted regridding
- scheme.
-
-.. admonition:: Showcase: Merge and concatenate reporting
-
- Merge reporting is designed as an aid to the merge processes. Should merging
- a :class:`~iris.cube.CubeList` fail, merge reporting means that a descriptive
- error will be raised that details the differences between the cubes in the
- :class:`~iris.cube.CubeList` that prevented the merge from being successful.
-
- A new :class:`~iris.cube.CubeList` method, called
- :meth:`~iris.cube.CubeList.merge_cube`, has been introduced. Calling it on a
- :class:`~iris.cube.CubeList` will result in a single merged
- :class:`~iris.cube.Cube` being returned or an error message being raised
- that describes why the merge process failed.
-
- The following example demonstrates the error message that describes a merge
- failure caused by cubes having differing attributes::
-
- >>> cube_list = iris.cube.CubeList((c1, c2))
- >>> cube_list.merge_cube()
- Traceback (most recent call last):
- ...
- raise iris.exceptions.MergeError(msgs)
- iris.exceptions.MergeError: failed to merge into a single cube.
- cube.attributes keys differ: 'foo'
-
- The naming of this new method mirrors that of Iris load functions, where
- one would always expect a :class:`~iris.cube.CubeList` from :func:`iris.load`
- and a :class:`~iris.cube.Cube` from :func:`iris.load_cube`.
-
- Concatenate reporting is the equivalent process for concatenating a
- :class:`~iris.cube.CubeList`. It is accessed through the method
- :meth:`~iris.cube.CubeList.concatenate_cube`, which will return a single
- concatenated cube or produce an error message that describes why the
- concatenate process failed.
-
-.. admonition:: Showcase: Cube broadcasting
-
- When performing cube arithmetic, cubes now follow similar broadcasting rules
- as NumPy arrays.
-
- However, the additional richness of Iris coordinate meta-data provides an
- enhanced capability beyond the basic broadcasting behaviour of NumPy.
-
- This means that when performing cube arithmetic, the dimensionality and shape of
- cubes no longer need to match. For example, if the dimensionality of a cube is
- reduced by collapsing, then the result can be used to subtract from the original
- cube to calculate an anomaly::
-
- >>> time_mean = original_cube.collapsed('time', iris.analysis.MEAN)
- >>> mean_anomaly = original_cube - time_mean
-
- Given both broadcasting **and** coordinate meta-data, Iris can now perform
- arithmetic with cubes that have similar but not identical shape::
-
- >>> similar_cube = original_cube.copy()
- >>> similar_cube.transpose()
- >>> zero_cube = original_cube - similar_cube
-
-* Merge reporting that raises a descriptive error if the merge process fails.
-* Linear interpolation and regridding now make use of SciPy's RegularGridInterpolator
- for much faster linear interpolation.
-* NAME file loading now handles the "no time averaging" column and translates
- height/altitude above ground/sea-level columns into appropriate coordinate metadata.
-* The NetCDF saver has been extended to allow saving of cubes with hybrid pressure
- auxiliary factories.
-* PP/FF loading supports LBLEV of 9999.
-* Extended GRIB1 loading to support data on hybrid pressure levels.
-* :func:`iris.coord_categorisation.add_day_of_year` can be used to add categorised
- day of year coordinates based on time coordinates with non-Gregorian calendars.
-* Support for loading data on reduced grids from GRIB files in raw form without
- automatically interpolating to a regular grid.
-* The coordinate systems :class:`iris.coord_systems.Orthographic` and
- :class:`iris.coord_systems.VerticalPerspective` (for imagery from geostationary
- satellites) have been added.
-* Extended NetCDF loading to support the "ocean sigma over z" auxiliary coordinate
- factory.
-* Support added for loading CF-NetCDF data with bounds arrays that are missing a
- vertex dimension.
-* :meth:`iris.cube.Cube.rolling_window` can now be used with string-based
- :class:`iris.coords.AuxCoord` instances.
-* Loading of PP and FF files has been optimised through deferring creation of
- PPField attributes.
-* Automatic association of a coordinate's CF formula terms variable with the
- data variable associated with that coordinate.
-* PP loading translates cross-section height into a dimensional auxiliary coordinate.
-* String auxiliary coordinates can now be plotted with the Iris plotting wrappers.
-* :func:`iris.analysis.geometry.geometry_area_weights` now allows for the calculation of
- normalized cell weights.
-* Many new translations between the CF spec and STASH codes or GRIB2 parameter codes.
-* PP save rules add the data's UM Version to the attributes of the saved file
- when appropriate.
-* NetCDF reference surface variable promotion available through the
- :class:`iris.FUTURE` mechanism.
-* A speed improvement in calculation of :func:`iris.analysis.geometry.geometry_area_weights`.
-* The mdtol keyword was added to area-weighted regridding to allow control of the
- tolerance for missing data. For a further description of this concept, see
- :class:`iris.analysis.AreaWeighted`.
-* Handling for patching of the CF conventions global attribute via a defined
- cf_patch_conventions function.
-* Deferred GRIB data loading has been introduced for reduced memory consumption when
- loading GRIB files.
-* Concatenate reporting that raises a descriptive error if the concatenation
- process fails.
-* A speed improvement when loading PP or FF data and constraining on STASH code.
-
-Bugs fixed
-==========
-* Data containing more than one reference cube for constructing hybrid height
- coordinates can now be loaded.
-* Removed cause of increased margin of error when interpolating.
-* Changed floating-point precision used when wrapping points for interpolation.
-* Mappables that can be used to generate colorbars are now returned by Iris
- plotting wrappers.
-* NetCDF load ignores over-specified formula terms on bounded dimensionless vertical
- coordinates.
-* Auxiliary coordinate factory loading now correctly interprets formula term
- varibles for "atmosphere hybrid sigma pressure" coordinate data.
-* Corrected comparison of NumPy NaN values in cube merge process.
-* Fixes for :meth:`iris.cube.Cube.intersection` to correct calculating the intersection
- of a cube with split bounds, handling of circular coordinates, handling of
- monotonically descending bounded coordinats and for finding a wrapped two-point
- result and longitude tolerances.
-* A bug affecting :meth:`iris.cube.Cube.extract` and :meth:`iris.cube.CubeList.extract`
- that led to unexpected behaviour when operating on scalar cubes has been fixed.
-* Aggregate_by may now be passed single-value coordinates.
-* Making a copy of a :class:`iris.coords.DimCoord` no longer results in the writeable
- flag on the copied points and bounds arrays being set to True.
-* Can now save to PP a cube that has vertical levels but no orography.
-* Fix a bug causing surface altitude and surface pressure fields to not appear
- in cubes loaded with a STASH constraint.
-* Fixed support for :class:`iris.fileformats.pp.STASH` objects in STASH constraints.
-* A fix to avoid a problem where cube attribute names clash with NetCDF reserved attribute names.
-* A fix to allow :meth:`iris.cube.CubeList.concatenate` to deal with descending coordinate order.
-* Add missing NetCDF attribute `varname` when constructing a new :class:`iris.coords.AuxCoord`.
-* The datatype of time arrays converted with :func:`iris.util.unify_time_units` is now preserved.
-
-Bugs fixed in v1.7.3
-^^^^^^^^^^^^^^^^^^^^
-* Scalar dimension coordinates can now be concatenated with :meth:`iris.cube.CubeList.concatenate`.
-* Arbitrary names can no longer be set for elements of a :class:`iris.fileformats.pp.SplittableInt`.
-* Cubes that contain a pseudo-level coordinate can now be saved to PP.
-* Fixed a bug in the FieldsFile loader that prevented it always loading all available fields.
-
-Bugs fixed in v1.7.4
-^^^^^^^^^^^^^^^^^^^^
-* :meth:`Coord.guess_bounds` can now deal with circular coordinates.
-* :meth:`Coord.nearest_neighbour_index` can now work with descending bounds.
-* Passing `weights` to :meth:`Cube.rolling_window` no longer prevents other
- keyword arguments from being passed to the aggregator.
-* Several minor fixes to allow use of Iris on Windows.
-* Made use of the new standard_parallels keyword in Cartopy's LambertConformal
- projection (Cartopy v0.12). Older versions of Iris will not be able to
- create LambertConformal coordinate systems with Cartopy >= 0.12.
-
-Incompatible changes
-====================
-* Saving a cube with a STASH attribute to NetCDF now produces a variable
- with an attribute of "um_stash_source" rather than "ukmo__um_stash_source".
-* Cubes saved to NetCDF with a coordinate system referencing a spherical ellipsoid
- now result in the grid mapping variable containing only the "earth_radius" attribute,
- rather than the "semi_major_axis" and "semi_minor_axis".
-* Collapsing a cube over all of its dimensions now results in a scalar cube rather
- than a 1d cube.
-
-Deprecations
-============
-* :func:`iris.util.ensure_array` has been deprecated.
-* Deprecated the :func:`iris.fileformats.pp.reset_load_rules` and
- :func:`iris.fileformats.grib.reset_load_rules` functions.
-* Matplotlib is no longer a core Iris dependency.
-
-Documentation Changes
-=====================
-* New sections on :ref:`cube broadcasting ` and
- :doc:`regridding and interpolation `
- have been added to the :doc:`user guide `.
-* An example demonstrating custom log-scale colouring has been added.
- See :ref:`General-anomaly_log_colouring`.
-* An example demonstrating the creation of a custom
- :class:`iris.analysis.Aggregator` has been added.
- See :ref:`General-custom_aggregation`.
-* An example of reprojecting data from 2D auxiliary spatial coordinates
- (such as that from the ORCA grid) has been added. See :ref:`General-orca_projection`.
-* A clarification of the behaviour of :func:`iris.analysis.calculus.differentiate`.
-* A new :doc:`"whitepapers" ` section has been added to the documentation along
- with the addition of a paper providing an :doc:`overview of the load process for UM-like
- fileformats (e.g. PP and Fieldsfile) `.
-
diff --git a/docs/iris/src/whatsnew/1.8.rst b/docs/iris/src/whatsnew/1.8.rst
deleted file mode 100644
index c763411ed8..0000000000
--- a/docs/iris/src/whatsnew/1.8.rst
+++ /dev/null
@@ -1,181 +0,0 @@
-What's new in Iris 1.8
-**********************
-
-:Release: 1.8.1
-:Date: 3rd June 2015
-
-This document explains the new/changed features of Iris in version 1.8.
-(:doc:`View all changes `.)
-
-Iris 1.8 features
-=================
-
-.. _showcase:
-
-.. admonition:: Showcase: Rotate winds
-
- Iris can now rotate and unrotate wind vector data by transforming the wind
- vector data to another coordinate system.
-
- For example::
-
- >>> from iris.analysis.cartography import rotate_winds
- >>> u_cube = iris.load_cube('my_rotated_u_wind_cube.pp')
- >>> v_cube = iris.load_cube('my_rotated_v_wind_cube.pp')
- >>> target_cs = iris.coord_systems.GeogCS(6371229.0)
- >>> u_prime, v_prime = rotate_winds(u_cube, v_cube, target_cs)
-
-.. admonition:: Showcase: Nearest-neighbour scheme
-
- A nearest-neighbour scheme for interpolation and regridding has been added
- to Iris. This joins the existing :class:`~iris.analysis.Linear` and
- :class:`~iris.analysis.AreaWeighted` interpolation and regridding schemes.
-
- For example::
-
- >>> result = cube.interpolate(sample_points, iris.analysis.Nearest())
- >>> regridded_cube = cube.regrid(target_grid, iris.analysis.Nearest())
-
-.. admonition:: Showcase: Slices over a coordinate
-
- You can slice over one or more dimensions of a cube using :meth:`iris.cube.Cube.slices_over`.
- This provides similar functionality to :meth:`~iris.cube.Cube.slices` but with
- almost the opposite outcome.
-
- Using :meth:`~iris.cube.Cube.slices` to slice a cube on a selected dimension returns
- all possible slices of the cube with the selected dimension retaining its dimensionality.
- Using :meth:`~iris.cube.Cube.slices_over` to slice a cube on a selected
- dimension returns all possible slices of the cube over the selected dimension.
-
- To demonstrate this::
-
- >>> cube = iris.load(iris.sample_data_path('colpex.pp'))[0]
- >>> print(cube.summary(shorten=True))
- air_potential_temperature / (K) (time: 6; model_level_number: 10; grid_latitude: 83; grid_longitude: 83)
- >>> my_slice = next(cube.slices('time'))
- >>> my_slice_over = next(cube.slices_over('time'))
- >>> print(my_slice.summary(shorten=True))
- air_potential_temperature / (K) (time: 6)
- >>> print(my_slice_over.summary(shorten=True))
- air_potential_temperature / (K) (model_level_number: 10; grid_latitude: 83; grid_longitude: 83)
-
-
-* :func:`iris.cube.CubeList.concatenate` now works with `biggus `_ arrays and so
- now supports concatenation of cubes with deferred data.
-* Improvements to NetCDF saving through using biggus:
-
- * A cube's lazy data payload will still be lazy after saving; the data will not
- be loaded into memory by the save operation.
- * Cubes with data payloads larger than system memory can now be saved to NetCDF
- through biggus streaming the data to disk.
-
-* :func:`iris.util.demote_dim_coord_to_aux_coord` and :func:`iris.util.promote_aux_coord_to_dim_coord`
- allow a coordinate to be easily demoted or promoted within a cube.
-* :func:`iris.util.squeeze` removes all length 1 dimensions from a cube, and demotes
- any associated squeeze dimension :class:`~iris.coords.DimCoord` to be a scalar coordinate.
-* :meth:`iris.cube.Cube.slices_over`, which returns an iterator of all sub-cubes along a given
- coordinate or dimension index.
-* :meth:`iris.cube.Cube.interpolate` now accepts datetime.datetime and
- netcdftime.datetime instances for date or time coordinates.
-* Many new and updated translations between CF spec and STASH codes or GRIB2 parameter
- codes.
-* PP/FF loader creates a height coordinate at 1.5m or 10m for certain relevant stash codes.
-* Lazy aggregator support for the :class:`standard deviation `
- aggregator has been added.
-* A speed improvement in calculation of :func:`iris.analysis.cartography.area_weights`.
-* Experimental support for unstructured grids has been added with :func:`iris.experimental.ugrid`.
- This has been implemented using `UGRID `_.
-* :meth:`iris.cube.CubeList.extract_overlapping` supports extraction of cubes over
- regions where common coordinates overlap, over multiple coordinates.
-* Warnings raised due to invalid units in loaded data have been suppressed.
-* Experimental low-level read and write access for FieldsFile variants is now supported
- via :class:`iris.experimental.um.FieldsFileVariant`.
-* PP loader will return cubes for all fields prior to a field with a problematic
- header before raising an exception.
-* NetCDF loader skips invalid global attributes, raising a warning rather than raising an
- exception.
-* A warning is now raised rather than an exception when constructing an
- :class:`~iris.aux_factory.AuxCoordFactory` fails.
-* Supported :class:`aux coordinate factories `
- have been extended to include:
-
- * ``ocean sigma coordinate``,
- * ``ocean s coordinate``,
- * ``ocean s coordinate, generic form 1``, and
- * ``ocean s coordinate, generic form 2``.
-
-* :meth:`iris.cube.Cube.intersection` now supports taking a points-only intersection.
- Any bounds on intersected coordinates are ignored but retained.
-* The FF loader's known handled grids now includes ``Grid 21``.
-* A :class:`nearest neighbour ` scheme is now provided for
- :meth:`iris.cube.Cube.interpolate` and :meth:`iris.cube.Cube.regrid`.
-* :func:`iris.analysis.cartography.rotate_winds` supports transformation of wind vectors
- to a different coordinate system.
-* NumPy universal functions can now be applied to cubes using
- :func:`iris.analysis.maths.apply_ufunc`.
-* Generic functions can be applied to :class:`~iris.cube.Cube` instances using
- :class:`iris.analysis.maths.IFunc`.
-* The :class:`iris.analysis.Linear` scheme now supports regridding as well as interpolation.
- This enables :meth:`iris.cube.Cube.regrid` to perform bilinear regridding, which now
- replaces the experimental routine "iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid".
-
-Bugs fixed
-==========
-
-1.8.0
-------
-* Fix in netCDF loader to correctly determine whether the longitude coordinate
- (including scalar coordinates) is circular.
-* :meth:`iris.cube.Cube.intersection` now supports bounds that extend slightly beyond 360
- degrees.
-* Lateral Boundary Condition (LBC) type FieldFiles are now handled correctly by the FF loader.
-* Making a copy of a scalar cube with no data now correctly copies the data array.
-* Height coordinates in NAME trajectory output files have been changed to match other
- NAME output file formats.
-* Fixed datatype when loading an ``integer_constants`` array from a FieldsFile.
-* FF/PP loader adds appropriate cell methods for ``lbtim.ib = 3`` intervals.
-* An exception is raised if the units of the latitude and longitude coordinates
- of the cube passed into :func:`iris.analysis.cartography.area_weights` are not
- convertible to radians.
-* GRIB1 loader now creates a time coordinate for a time range indicator of 2.
-* NetCDF loader now loads units that are empty strings as dimensionless.
-
-1.8.1
-------
-* The PP loader now carefully handles floating point errors in date time conversions to hours.
-* The handling fill values for lazy data loaded from NetCDF files is altered, such that the
- _FillValue set in the file is preserved through lazy operations.
-* The risk that cube intersections could return incorrect results due to floating point
- tolerances is reduced.
-* The new GRIB2 loading code is altered to enable the loading of various data representation
- templates; the data value unpacking is handled by the GRIB API.
-* Saving cube collections to NetCDF, where multiple similar aux-factories exist within the cubes,
- is now carefully handled such that extra file variables are created where required in some cases.
-
-1.8.2
------
-* A fix to prevent the error: *AttributeError: 'module' object has no attribute 'date2num'*.
- This was caused by the function :func:`netcdftime.date2num` being removed from the netCDF4
- package in recent versions.
-
-Deprecations
-============
-* The original GRIB loader has been deprecated and replaced with a new
- template-based GRIB loader.
-* Deprecated default NetCDF save behaviour of assigning the outermost
- dimension to be unlimited. Switch to the new behaviour with no auto
- assignment by setting :data:`iris.FUTURE.netcdf_no_unlimited` to True.
-* The former experimental method
- "iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid" has been removed, as
- :class:`iris.analysis.Linear` now includes this functionality.
-
-Documentation Changes
-=====================
-* A chapter on :doc:`merge and concatenate ` has been
- added to the :doc:`user guide `.
-* A section on installing Iris using `conda `_ has been
- added to the :doc:`install guide `.
-* Updates to the chapter on
- :doc:`regridding and interpolation `
- have been added to the :doc:`user guide `.
-
diff --git a/docs/iris/src/whatsnew/1.9.rst b/docs/iris/src/whatsnew/1.9.rst
deleted file mode 100644
index 7a4848b434..0000000000
--- a/docs/iris/src/whatsnew/1.9.rst
+++ /dev/null
@@ -1,129 +0,0 @@
-What's New in Iris 1.9
-**********************
-
-:Release: 1.9.2
-:Date: 28th January 2016
-
-This document explains the new/changed features of Iris in version 1.9
-(:doc:`View all changes `.)
-
-Iris 1.9 Features
-=================
-* Support for running on Python 3.4 has been added to the whole code base. Some features which
- depend on external libraries will not be available until they also support Python 3, namely:
-
- * gribapi does not yet provide a Python 3 interface
-
-* Added the UM pseudo level type to the information made available in the STASH_TRANS table in :mod:`iris.fileformats.um._ff_cross_references`
-* When reading "cell_methods" attributes from NetCDF files, allow optional whitespace before the colon.
- This is not strictly in the CF spec, but is a common occurrence.
-* Basic cube arithemetic (plus, minus, times, divide) now supports lazy evaluation.
-* :meth:`iris.analysis.cartography.rotate_winds` can now operate much faster on multi-layer (i.e. > 2-dimensional) cubes,
- as it calculates rotation coefficients only once and reuses them for additional layers.
-
-* Linear regridding of a multi-layer (i.e. > 2-dimensional) cube is now much faster,
- as it calculates transform coefficients just once and reuses them for additional layers.
-* Ensemble statistics can now be saved to GRIB2, using Product Definition Template 4.11.
-
-* Loading of NetCDF data with ocean vertical coordinates now returns a 'depth' in addition to an 'eta' cube.
- This operates on specific defined dimensionless coordinates : see CF spec version 1.6, Appendix D.
-
-* :func:`iris.analysis.stats.pearsonr` updates:
-
- * Cubes can now be different shapes, provided one is broadcastable to the
- other.
- * Accepts weights keyword for weighted correlations.
- * Accepts mdtol keyword for missing data tolerance level.
- * Accepts common_mask keyword for restricting calculation to unmasked pairs of
- cells.
-
-* Added a new point-in-cell regridding scheme, :class:`iris.experimental.regrid.PointInCell`.
-* Added :meth:`iris.analysis.WPERCENTILE` - a new weighted aggregator for calculating
- percentiles.
-* Added cell-method translations for LBPROC=64 and 192 in UM files, encoding 'zonal mean' and 'zonal+time mean'.
-
-* Support for loading GRIB2 messages defined on a Lambert conformal grid has been added to
- the GRIB2 loader.
-* Data on potential-temperature (theta) levels can now be saved to GRIB2, with a fixed surface type of 107.
-* Added several new helper functions for file-save customisation,
- (see also : :doc:`Saving Iris Cubes `):
-
- * :meth:`iris.fileformats.grib.as_pairs`
- * :meth:`iris.fileformats.grib.as_messages`
- * :meth:`iris.fileformats.grib.save_messages`
- * :meth:`iris.fileformats.pp.as_pairs`
- * :meth:`iris.fileformats.pp.as_fields`
- * :meth:`iris.fileformats.pp.save_fields`
-* Loading data from GRIB2 now supports most of the currently defined 'data representation templates' :
- code numbers 0, 1, 2, 3, 4, 40, 41, 50, 51 and 61.
-* When a Fieldsfile is opened for update as a :class:`iris.experimental.um.FieldsFileVariant`,
- unmodified packed data in the file can now be retained in the original form.
- Previously it could only be stored in an unpacked form.
-* When reading and writing NetCDF data, the CF 'flag' attributes,
- "flag_masks", "flag_meanings" and "flag_values" are now preserved through Iris load and save.
-* `mo_pack `_ was added as an optional dependency.
- It is used to encode and decode data in WGDOS packed form.
-* The :meth:`iris.experimental.um.Field.get_data` method can now be used to read Fieldsfile data
- after the original :class:`iris.experimental.um.FieldsFileVariant` has been closed.
-
-Bugs Fixed
-==========
-* Fixed a bug in :meth:`iris.unit.Unit.convert`
- (and the equivalent in `cf_units `_)
- so that it now converts data to the native endianness, without which udunits could not read it correctly.
-* Fixed a bug with loading WGDOS packed data in :mod:`iris.experimental.um`,
- which could occasionally crash, with some data.
-* Ignore non-numeric suffices in the numpy version string, which would otherwise crash some regridding routines.
-* fixed a bug in :mod:`iris.fileformats.um_cf_map` where the standard name
- for the stash code m01s12i187 was incorrectly set, such that it is inconsistent
- with the stated unit of measure, 'm s-1'. The different name, a long_name
- of 'change_over_time_in_upward_air_velocity_due_to_advection' with
- units of 'm s-1' is now used instead.
-* Fixed a bug in :meth:`iris.cube.Cube.intersection`.
- When edge points were at (base + period), intersection would unnecessarily wrap the data.
-* Fixed a bug in :mod:`iris.fileformats.pp`.
- A previous release removed the ability to pass a partial constraint on STASH attribute.
-* :meth:`iris.plot.default_projection_extent` now correctly raises an exception if a cube has X bounds but no Y bounds, or vice versa.
- Previously it never failed this, as the test was wrong.
-* When loading NetCDF data, a "units" attribute containing unicode characters is now transformed by backslash-replacement.
- Previously this caused a crash. Note: unicode units are *not supported in the CF conventions*.
-* When saving to NetCDF, factory-derived auxiliary coordinates are now correctly saved with different names when they are not identical.
- Previously, such coordinates could be saved with the same name, leading to errors.
-* Fixed a bug in :meth:`iris.experimental.um.FieldsFileVariant.close`,
- which now correctly allocates extra blocks for larger lookups when saving.
- Previously, when larger files open for update were closed, they could be written out with data overlapping the lookup table.
-* Fixed a bug in :class:`iris.aux_factory.OceanSigmaZFactory`
- which sometimes caused crashes when fetching the points of an "ocean sigma z" coordinate.
-
-Version 1.9.1
--------------
-* Fixed a unicode bug preventing standard names from being built cleanly when installing in Python3
-
-Version 1.9.2
--------------
-* New warning regarding data loss if writing to an open file which is also open to read, with lazy data.
-* Removal of a warning about data payload loading from concatenate.
-* Updates to concatenate documentation.
-* Fixed a bug with a name change in the netcdf4-python package.
-* Fixed a bug building the documentation examples.
-* Fixed a bug avoiding sorting classes directly when :meth:`iris.cube.Cube.coord_system` is used in Python3.
-* Fixed a bug regarding unsuccessful dot import.
-
-Incompatible Changes
-====================
-* GRIB message/file reading and writing may not be available for Python 3 due to GRIB API limitations.
-
-Deprecations
-============
-* Deprecated :mod:`iris.unit`, with unit functionality provided by `cf_units `_ instead.
-* When loading from NetCDF, a deprecation warning is emitted if there is vertical coordinate information
- that *would* produce extra result cubes if :data:`iris.FUTURE.netcdf_promote` were set,
- but it is *not* set.
-* Deprecated :class:`iris.aux_factory.LazyArray`
-
-Documentation Changes
-=====================
-* A chapter on :doc:`saving iris cubes ` has been
- added to the :doc:`user guide `.
-* Added script and documentation for building a what's new page from developer-submitted contributions.
- See :doc:`Contributing a "What's New" entry `.
diff --git a/docs/iris/src/whatsnew/2.3.rst b/docs/iris/src/whatsnew/2.3.rst
deleted file mode 100644
index c5a6060146..0000000000
--- a/docs/iris/src/whatsnew/2.3.rst
+++ /dev/null
@@ -1,215 +0,0 @@
-What's New in Iris 2.3.0
-************************
-
-:Release: 2.3.0
-:Date: 2019-10-04
-
-This document explains the new/changed features of Iris in version 2.3.0
-(:doc:`View all changes `.)
-
-Iris 2.3.0 Features
-===================
-.. _showcase:
-
-.. admonition:: Increased Support for CF 1.7
-
- We have introduced several changes that contribute to Iris's support for
- the CF Conventions, including some CF 1.7 additions. We are now able to
- support:
-
- * :ref:`Climatological Coordinates`
- * :ref:`Standard name modifiers`
- * :ref:`Geostationary projection`
-
- You can read more about each of these below.
-
- Additionally, the conventions attribute, added by Iris when saving to
- NetCDF, has been updated to "CF-1.7", accordingly.
-
-.. _climatological:
-.. admonition:: Climatological Coordinate Support
-
- Iris can now load, store and save `NetCDF climatological coordinates
- `_. Any cube time
- coordinate can be marked as a climatological time axis using the boolean
- property: ``climatological``. The climatological bounds are stored in the
- coordinate's ``bounds`` property.
-
- When an Iris climatological coordinate is saved in NetCDF, the NetCDF
- coordinate variable will be given a 'climatology' attribute, and the
- contents of the
- coordinate's ``bounds`` property are written to a NetCDF boundary variable
- called '_bounds'. These are in place of a standard
- 'bounds' attribute and accompanying boundary variable. See below
- for an `example adapted from CF conventions `_:
-
- .. code-block:: none
-
- dimensions:
- time=4;
- bnds=2;
- variables:
- float temperature(time,lat,lon);
- temperature:long_name="surface air temperature";
- temperature:cell_methods="time: minimum within years time: mean over years";
- temperature:units="K";
- double time(time);
- time:climatology="time_climatology";
- time:units="days since 1960-1-1";
- double time_climatology(time,bnds);
- data: // time coordinates translated to date/time format
- time="1960-4-16", "1960-7-16", "1960-10-16", "1961-1-16" ;
- time_climatology="1960-3-1", "1990-6-1",
- "1960-6-1", "1990-9-1",
- "1960-9-1", "1990-12-1",
- "1960-12-1", "1991-3-1" ;
-
- If a climatological time axis is detected when loading NetCDF -
- indicated by the format described above - the ``climatological`` property
- of the Iris coordinate will be set to ``True``.
-
-.. admonition:: New Chunking Strategy
-
- Iris now makes better choices of Dask chunk sizes when loading from NetCDF
- files: If a file variable has small, specified chunks, Iris will now choose
- Dask chunks which are a multiple of these up to a default target size.
-
- This is particularly relevant to files with an unlimited dimension, which
- previously could produce a large number of small chunks. This had an adverse
- effect on performance.
-
- In addition, Iris now takes its default chunksize from the default configured
- in Dask itself, i.e. ``dask.config.get('array.chunk-size')``.
-
-.. admonition:: Lazy Statistics
-
- Several statistical operations can now be done lazily, taking advantage of the
- performance improvements offered by Dask:
-
- * :meth:`~iris.cube.Cube.aggregated_by`
- * :class:`~iris.analysis.RMS` (more detail below)
- * :class:`~iris.analysis.MEAN`
-
-----
-
-.. _geostationary:
-.. _standard_name:
-.. _conventions_1.7:
-
-* Cube data equality testing (and hence cube equality) now uses a more
- relaxed
- tolerance : This means that some cubes may now test 'equal' that previously
- did not.
- Previously, Iris compared cube data arrays using:
- ``abs(a - b) < 1.e-8``
-
- We now apply the default operation of :func:`numpy.allclose` instead,
- which is equivalent to:
- ``abs(a - b) < (1.e-8 + 1.e-5 * b)``
-
-* Added support to render HTML for :class:`~iris.cube.CubeList` in Jupyter
- Notebooks and JupyterLab.
-* Loading CellMeasures with integer values is now supported.
-* New coordinate system: :class:`iris.coord_systems.Geostationary`,
- including load and save support, based on the `CF Geostationary projection
- definition `_.
-* :class:`iris.coord_systems.VerticalPerspective` can now be saved to and
- loaded from NetCDF files.
-* :class:`iris.experimental.regrid.PointInCell` moved to
- :class:`iris.analysis.PointInCell` to make this regridding scheme public
-* Iris now supports standard name modifiers. See `Appendix C, Standard Name Modifiers `_ for more information.
-* :meth:`iris.cube.Cube.remove_cell_measure` now also allows removal of a cell
- measure by its name (previously only accepted a CellMeasure object).
-* The :data:`iris.analysis.RMS` aggregator now supports a lazy calculation.
- However, the "weights" keyword is not currently supported by this, so a
- *weighted* calculation will still return a realised result, *and* force
- realisation of the original cube data.
-* Iris now supports NetCDF Climate and Forecast (CF) Metadata Conventions 1.7 (see `CF 1.7 Conventions Document `_ for more information)
-
-
-Iris 2.3.0 Dependency Updates
-=============================
-* Iris now supports Proj4 up to version 5, but not yet 6 or beyond, pending
- `fixes to some cartopy tests `_.
-* Iris now requires Dask >= 1.2 to allow for improved coordinate equality
- checks.
-
-
-Bugs Fixed
-==========
-* Cube equality of boolean data is now handled correctly.
-* Fixed a bug where cell measures were incorrect after a cube
- :meth:`~iris.cube.Cube.transpose` operation. Previously, this resulted in
- cell-measures that were no longer correctly mapped to the cube dimensions.
-* The :class:`~iris.coords.AuxCoord` disregarded masked points and bounds, as did the :class:`~iris.coords.DimCoord`.
- Fix permits an :class:`~iris.coords.AuxCoord` to contain masked points/bounds, and a TypeError exception is now
- raised when attempting to create or set the points/bounds of a
- :class:`~iris.coords.DimCoord` with arrays with missing points.
-* :class:`iris.coord_systems.VerticalPerspective` coordinate system now uses
- the `CF Vertical perspective definition `_; had been
- erroneously using Geostationary.
-* :class:`~iris.coords.CellMethod` will now only use valid `NetCDF name tokens `_ to reference the coordinates involved in the statistical operation.
-* The following var_name properties will now only allow valid `NetCDF name
- tokens
- `_ to
- reference the said NetCDF variable name. Note that names with a leading
- underscore are not permitted.
- - :attr:`iris.aux_factory.AuxCoordFactory.var_name`
- - :attr:`iris.coords.CellMeasure.var_name`
- - :attr:`iris.coords.Coord.var_name`
- - :attr:`iris.coords.AuxCoord.var_name`
- - :attr:`iris.cube.Cube.var_name`
-* Rendering a cube in Jupyter will no longer crash for a cube with
- attributes containing ``\n``.
-* NetCDF variables which reference themselves in their ``cell_measures``
- attribute can now be read.
-* :func:`~iris.plot.quiver` now handles circular coordinates.
-* The names of cubes loaded from abf/abl files have been corrected.
-* Fixed a bug in UM file loading, where any landsea-mask-compressed fields
- (i.e. with LBPACK=x2x) would cause an error later, when realising the data.
-* :meth:`iris.cube.Cube.collapsed` now handles partial collapsing of
- multidimensional coordinates that have bounds.
-* Fixed a bug in the :data:`~iris.analysis.PROPORTION` aggregator, where cube
- data in the form of a masked array with ``array.mask=False`` would cause an
- error, but possibly only later when the values are actually realised.
- ( Note: since netCDF4 version 1.4.0, this is now a common form for data
- loaded from netCDF files ).
-* Fixed a bug where plotting a cube with a
- :class:`iris.coord_systems.LambertConformal` coordinate system would result
- in an error. This would happen if the coordinate system was defined with one
- standard parallel, rather than two.
- In these cases, a call to
- :meth:`~iris.coord_systems.LambertConformal.as_cartopy_crs` would fail.
-* :meth:`iris.cube.Cube.aggregated_by` now gives correct values in points and
- bounds when handling multidimensional coordinates.
-* Fixed a bug in the :meth:`iris.cube.Cube.collapsed` operation, which caused
- the unexpected realization of any attached auxiliary coordinates that were
- *bounded*. It now correctly produces a lazy result and does not realise
- the original attached AuxCoords.
-
-
-Documentation Changes
-=====================
-* Added a gallery example showing `how to concatenate NEMO ocean model data
- <../examples/Oceanography/load_nemo.html>`_.
-* Added an example in the
- `Loading Iris Cubes: Constraining on Time <../userguide/loading_iris_cubes
- .html#constraining-on-time>`_
- Userguide section, demonstrating how to load data within a specified date
- range.
-* Added notes to the :func:`iris.load` documentation, and the userguide
- `Loading Iris Cubes <../userguide/loading_iris_cubes.html>`_
- chapter, emphasizing that the *order* of the cubes returned by an iris load
- operation is effectively random and unstable, and should not be relied on.
-* Fixed references in the documentation of
- :func:`iris.util.find_discontiguities` to a nonexistent
- "mask_discontiguities" routine : these now refer to
- :func:`~iris.util.mask_cube`.
-
diff --git a/docs/iris/src/whatsnew/aggregate_directory.py b/docs/iris/src/whatsnew/aggregate_directory.py
deleted file mode 100644
index fca098f4d4..0000000000
--- a/docs/iris/src/whatsnew/aggregate_directory.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# (C) British Crown Copyright 2015 - 2019, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Build a release file from files in a contributions directory.
-
-Looks for directories "<...whatsnew>/contributions_".
-Takes specified "xx.xx" as version, or latest found (alphabetic).
-Writes a file "<...whatsnew>/.rst".
-
-Valid contributions filenames are of the form:
- __summary.txt
-Where can be any valid chars, and
- is one of :
- "newfeature" "bugfix" "incompatiblechange" "deprecate" "docchange", and
- is in the style "2001-Jan-23".
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-import datetime
-from glob import glob
-import os
-import re
-import argparse
-import warnings
-from operator import itemgetter
-from distutils import version
-
-# Regular expressions: CONTRIBUTION_REGEX matches the filenames of
-# contribution snippets. It is split into three sections separated by _
-# 0. String for the category. 1. ISO8601 date. 2. String for the feature name.
-# RELEASE_REGEX matches the directory names, returning the release.
-CONTRIBUTION_REGEX_STRING = r'(?P.*)'
-CONTRIBUTION_REGEX_STRING += r'_(?P\d{4}-\w{3}-\d{2})'
-CONTRIBUTION_REGEX_STRING += r'_(?P.*)\.txt$'
-CONTRIBUTION_REGEX = re.compile(CONTRIBUTION_REGEX_STRING)
-RELEASEDIR_PREFIX = r'contributions_'
-_RELEASEDIR_REGEX_STRING = RELEASEDIR_PREFIX + r'(?P.*)$'
-RELEASE_REGEX = re.compile(_RELEASEDIR_REGEX_STRING)
-SOFTWARE_NAME = 'Iris'
-EXTENSION = '.rst'
-VALID_CATEGORIES = [
- {'Prefix': 'newfeature', 'Title': 'Features'},
- {'Prefix': 'bugfix', 'Title': 'Bugs Fixed'},
- {'Prefix': 'incompatiblechange', 'Title': 'Incompatible Changes'},
- {'Prefix': 'deprecate', 'Title': 'Deprecations'},
- {'Prefix': 'docchange', 'Title': 'Documentation Changes'}
-]
-VALID_CATEGORY_PREFIXES = [cat['Prefix'] for cat in VALID_CATEGORIES]
-
-
-def _self_root_directory():
- return os.path.abspath(os.path.dirname(__file__))
-
-
-def _decode_contribution_filename(file_name):
- file_name_elements = CONTRIBUTION_REGEX.match(file_name)
- category = file_name_elements.group('category')
- if category not in VALID_CATEGORY_PREFIXES:
- # This is an error
- raise ValueError('Unknown category in contribution filename.')
- isodate = file_name_elements.group('isodate')
- date_of_item = datetime.datetime.strptime(isodate, '%Y-%b-%d').date()
- return category, isodate, date_of_item
-
-
-def is_release_directory(directory_name, release):
- '''Returns True if a given directory name matches the requested release.'''
- result = False
- directory_elements = RELEASE_REGEX.match(directory_name)
- try:
- release_string = directory_elements.group('release')
- directory_release = version.StrictVersion(release_string)
- except (AttributeError, ValueError):
- pass
- else:
- if directory_release == release:
- result = True
- return result
-
-
-def is_compiled_release(root_directory, release):
- '''Returns True if the requested release.rst file exists.'''
- result = False
- compiled_filename = '{!s}{}'.format(release, EXTENSION)
- compiled_filepath = os.path.join(root_directory, compiled_filename)
- if os.path.exists(compiled_filepath) and os.path.isfile(compiled_filepath):
- result = True
- return result
-
-
-def get_latest_release(root_directory=None):
- """
- Implement default=latest release identification.
-
- Returns a valid release code.
-
- """
- if root_directory is None:
- root_directory = _self_root_directory()
- directory_contents = os.listdir(root_directory)
- # Default release to latest visible dir.
- possible_release_dirs = [releasedir_name
- for releasedir_name in directory_contents
- if RELEASE_REGEX.match(releasedir_name)]
- if len(possible_release_dirs) == 0:
- dirspec = os.path.join(root_directory, RELEASEDIR_PREFIX + '*')
- msg = 'No valid release directories found, i.e. {!r}.'
- raise ValueError(msg.format(dirspec))
- release_dirname = sorted(possible_release_dirs)[-1]
- release = RELEASE_REGEX.match(release_dirname).group('release')
- return release
-
-
-def find_release_directory(root_directory, release=None,
- fail_on_existing=True):
- '''
- Returns the matching contribution directory or raises an exception.
-
- Defaults to latest-found release (from release directory names).
- Optionally, fail if the matching release file already exists.
- *Always* fail if no release directory exists.
-
- '''
- if release is None:
- # Default to latest release.
- release = get_latest_release(root_directory)
-
- if fail_on_existing:
- compiled_release = is_compiled_release(root_directory, release)
- if compiled_release:
- msg = ('Specified release {!r} is already compiled : '
- '{!r} already exists.')
- compiled_filename = str(release) + EXTENSION
- raise ValueError(msg.format(release, compiled_filename))
-
- directory_contents = os.listdir(root_directory)
- result = None
- for inode in directory_contents:
- node_path = os.path.join(root_directory, inode)
- if os.path.isdir(node_path):
- release_directory = is_release_directory(inode, release)
- if release_directory:
- result = os.path.join(root_directory, inode)
- break
- if not result:
- msg = 'Contribution folder for release {!s} does not exist : no {!r}.'
- release_dirname = RELEASEDIR_PREFIX + str(release) + '/'
- release_dirpath = os.path.join(root_directory, release_dirname)
- raise ValueError(msg.format(release, release_dirpath))
- return result
-
-
-def generate_header(release, unreleased=False):
- '''Return a list of text lines that make up a header for the document.'''
- if unreleased:
- isodatestamp = ''
- else:
- isodatestamp = datetime.date.today().strftime('%Y-%m-%d')
- header_text = []
- title_template = 'What\'s New in {} {!s}\n'
- title_line = title_template.format(SOFTWARE_NAME, release)
- title_underline = ('*' * (len(title_line) - 1)) + '\n'
- header_text.append(title_line)
- header_text.append(title_underline)
- header_text.append('\n')
- header_text.append(':Release: {!s}\n'.format(release))
- header_text.append(':Date: {}\n'.format(isodatestamp))
- header_text.append('\n')
- description_template = 'This document explains the new/changed features '\
- 'of {} in version {!s}\n'
- header_text.append(description_template.format(SOFTWARE_NAME, release))
- header_text.append('(:doc:`View all changes `.)')
- header_text.append('\n')
- return header_text
-
-
-def read_directory(directory_path):
- '''Parse the items in a specified directory and return their metadata.'''
- directory_contents = os.listdir(directory_path)
- compilable_files_unsorted = []
- misnamed_files = []
- for file_name in directory_contents:
- try:
- category, isodate, date_of_item = \
- _decode_contribution_filename(file_name)
- except (AttributeError, ValueError):
- misnamed_files.append(file_name)
- continue
- compilable_files_unsorted.append({'Category': category,
- 'Date': date_of_item,
- 'FileName': file_name})
- compilable_files = sorted(compilable_files_unsorted,
- key=itemgetter('Date'),
- reverse=True)
- if misnamed_files:
- msg = 'Found contribution file(s) with unexpected names :'
- for filename in misnamed_files:
- full_path = os.path.join(directory_path, filename)
- msg += '\n {}'.format(full_path)
- warnings.warn(msg, UserWarning)
-
- return compilable_files
-
-
-def compile_directory(directory, release, unreleased=False):
- '''Read in source files in date order and compile the text into a list.'''
- if unreleased:
- release = ''
- source_text = read_directory(directory)
- compiled_text = []
- header_text = generate_header(release, unreleased)
- compiled_text.extend(header_text)
- for count, category in enumerate(VALID_CATEGORIES):
- category_text = []
- subtitle_line = ''
- if count == 0:
- subtitle_line += '{} {!s} '.format(SOFTWARE_NAME, release)
- subtitle_line += category['Title'] + '\n'
- subtitle_underline = ('=' * (len(subtitle_line) - 1)) + '\n'
- category_text.append('\n')
- category_text.append(subtitle_line)
- category_text.append(subtitle_underline)
- category_items = [item for item in source_text
- if item['Category'] == category['Prefix']]
- if not category_items:
- continue
- for file_description in category_items:
- entry_path = os.path.join(directory, file_description['FileName'])
- with open(entry_path, 'r') as content_object:
- text = content_object.readlines()
- if not text[-1].endswith('\n'):
- text[-1] += '\n'
- category_text.extend(text)
- category_text.append('\n----\n\n')
- compiled_text.extend(category_text)
- return compiled_text
-
-
-def check_all_contributions_valid(release=None, quiet=False, unreleased=False):
- """"Scan the contributions directory for badly-named files."""
- root_directory = _self_root_directory()
- # Check there are *some* contributions directory(s), else silently pass.
- contribs_spec = os.path.join(root_directory, RELEASEDIR_PREFIX + '*')
- if len(glob(contribs_spec)) > 0:
- # There are some contributions directories: check latest / specified.
- if release is None:
- release = get_latest_release()
- if not quiet:
- msg = 'Checking whatsnew contributions for release "{!s}".'
- print(msg.format(release))
- release_directory = find_release_directory(root_directory, release,
- fail_on_existing=False)
- # Run the directory scan, but convert any warning into an error.
- with warnings.catch_warnings():
- warnings.simplefilter('error')
- compile_directory(release_directory, release, unreleased)
- if not quiet:
- print('done.')
-
-
-def run_compilation(release=None, quiet=False, unreleased=False):
- '''Write a draft release.rst file given a specified uncompiled release.'''
- if release is None:
- # This must exist !
- release = get_latest_release()
- if not quiet:
- msg = 'Building release document for release "{!s}".'
- print(msg.format(release))
- root_directory = _self_root_directory()
- release_directory = find_release_directory(root_directory, release)
- compiled_text = compile_directory(release_directory, release, unreleased)
- if unreleased:
- compiled_filename = 'latest' + EXTENSION
- else:
- compiled_filename = str(release) + EXTENSION
- compiled_filepath = os.path.join(root_directory, compiled_filename)
- with open(compiled_filepath, 'w') as output_object:
- for string_line in compiled_text:
- output_object.write(string_line)
- if not quiet:
- print('done.')
-
-
-if __name__ == '__main__':
- PARSER = argparse.ArgumentParser()
- PARSER.add_argument("release", help="Release number to be compiled",
- nargs='?', type=version.StrictVersion)
- PARSER.add_argument(
- '-c', '--checkonly', action='store_true',
- help="Check contribution file names, do not build.")
- PARSER.add_argument(
- '-u', '--unreleased', action='store_true',
- help=("Label the release version as '', "
- "and its date as ''."))
- PARSER.add_argument(
- '-q', '--quiet', action='store_true',
- help="Do not print progress messages.")
- ARGUMENTS = PARSER.parse_args()
- release = ARGUMENTS.release
- unreleased = ARGUMENTS.unreleased
- quiet = ARGUMENTS.quiet
- if ARGUMENTS.checkonly:
- check_all_contributions_valid(release, quiet=quiet,
- unreleased=unreleased)
- else:
- run_compilation(release, quiet=quiet, unreleased=unreleased)
diff --git a/docs/iris/src/whatsnew/contributions_3.0.0/deprecate_2019-Oct-14_remove_deprecated_future_flags.txt b/docs/iris/src/whatsnew/contributions_3.0.0/deprecate_2019-Oct-14_remove_deprecated_future_flags.txt
deleted file mode 100644
index 5bd2903e9b..0000000000
--- a/docs/iris/src/whatsnew/contributions_3.0.0/deprecate_2019-Oct-14_remove_deprecated_future_flags.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-* The deprecated :class:`iris.Future` flags `cell_date_time_objects`,
- `netcfd_promote`, `netcdf_no_unlimited` and `clip_latitudes` have
- been removed.
\ No newline at end of file
diff --git a/docs/iris/src/whatsnew/index.rst b/docs/iris/src/whatsnew/index.rst
deleted file mode 100644
index 179216ccb5..0000000000
--- a/docs/iris/src/whatsnew/index.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. _iris_whatsnew:
-
-What's new in Iris
-******************
-
-These "What's new" pages describe the important changes between major
-Iris versions.
-
-.. toctree::
- :maxdepth: 2
-
- latest.rst
- 3.0.rst
- 2.3.rst
- 2.2.rst
- 2.1.rst
- 2.0.rst
- 1.13.rst
- 1.12.rst
- 1.11.rst
- 1.10.rst
- 1.9.rst
- 1.8.rst
- 1.7.rst
- 1.6.rst
- 1.5.rst
- 1.4.rst
- 1.3.rst
- 1.2.rst
- 1.1.rst
- 1.0.rst
diff --git a/docs/iris/src/whitepapers/change_management.rst b/docs/iris/src/whitepapers/change_management.rst
deleted file mode 100644
index b279c91b96..0000000000
--- a/docs/iris/src/whitepapers/change_management.rst
+++ /dev/null
@@ -1,451 +0,0 @@
-.. _change_management:
-
-Change Management in Iris from the User's perspective
-*****************************************************
-
-As Iris changes, user code will need revising from time to time to keep it
-working, or to maintain best practice. At the very least, you are advised to
-review existing code to ensure it functions correctly with new releases.
-
-Here, we define ways to make this as easy as possible.
-
-.. include:: ../userguide/change_management_goals.txt
-
-
-Key principles you can rely on
-==============================
-
-Iris code editions are published as defined version releases, with a given
-major and minor version number in the version name, "major.minor.xxx",
-as explained in the :ref:`releases section ` below.
-
- * Code that currently works should **still work**, and have the same
- results and effects, in any subsequent sub-release with the same major
- release number.
-
- * The only time we will make changes that can break existing code is at
- a **major release**.
-
- * At a major release, code that works **and emits no deprecation warnings**
- in the latest previous (minor) release should still work, and have the
- same results and effects.
-
-
-**What can possibly go wrong ?**
-
-If your code produces :ref:`deprecation warnings `, then it
-*could* behave differently, or fail, at the next major release.
-
-
-
-User Actions : How you should respond to changes and releases
-=============================================================
-
-Checklist :
-
-* when a new **testing or candidate version** is announced
- if convenient, test your working legacy code against it and report any problems.
-
-* when a new **minor version is released**
-
- * review the 'Whats New' documentation to see if it introduces any
- deprecations that may affect you.
- * run your working legacy code and check for any deprecation warnings,
- indicating that modifications may be necessary at some point
- * when convenient :
-
- * review existing code for use of deprecated features
- * rewrite code to replace deprecated features
-
-* when a new major version is **announced**
- ensure your code runs, without producing deprecation warnings, in the
- previous minor release
-
-* when a new major version is **released**
- check for new deprecation warnings, as for a minor release
-
-
-Details
-=======
-
-The Iris change process aims to minimise the negative effects of change, by
-providing :
-
- * defined processes for release and change management
- * release versioning
- * backwards code compatibility through minor version releases
- * a way to ensure compatibility with a new major version release
- * deprecation notices and warnings to highlight all impending changes
-
-Our practices are intended be compatible with the principles defined in the
-`SemVer project `_ .
-
-Key concepts covered here:
- * :ref:`Release versions `
- * :ref:`Backwards compatibility `
- * :ref:`Deprecation `
-
-
-.. _iris_backward_compatibility:
-
-Backwards compatibility
------------------------
-
-"Backwards-compatible" changes are those that leave any existing valid API
-usages unchanged (see :ref:`terminology ` below).
-Minor releases may only include backwards-compatible changes.
-
-The following are examples of backward-compatible changes :
-
- * changes to documentation
- * adding to a module : new submodules, functions, classes or properties
- * adding to a class : new methods or properties
- * adding to a function or method : new **optional** arguments or keywords
-
-The following are examples of **non-** backward-compatible changes :
-
- * removing (which includes *renaming*) any public module or submodule
- * removing any public component : a module, class, method, function or
- data object property of a public API component
- * removing any property of a public object
- * removing an argument or keyword from a method or function
- * adding a required argument to a method or function
- * removing a keyword (even one that has no effect)
- * changing the effect of *any* particular combination of arguments and/or
- keywords
-
-Note that it is also possible to modify the behaviour of an existing usage by
-making it depend on a newly-defined external control variable. This is
-effectively a change to the 'default behaviour' of a specific usage. Although
-this seems similar to adding a keyword, the cases where the new behaviour
-operates and where it does not are not distinguishable by a different code
-usage, which makes this somewhat dangerous. We do use this type of change,
-but any behaviour 'mode' controls of this sort are usually added as part of the
-:class:`iris.Future` definition.
-See :ref:`Usage of iris.FUTURE `, below.
-
-
-.. _iris_api:
-
-Terminology : API, features, usages and behaviours
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The API is the components of the iris module and its submodules which are
-"public" : In Python, by convention, this normally means everything whose name
-does not have a single leading underscore "_".
-This applies to all public modules and their properties : submodules, contained
-public classes, data and properties, functions and methods.
-An exception is when a module or class defines an '__all__' property : In
-that case, the public aspects are just the ones listed there.
-
-Note: these are standard Python conventions, not specific to Iris.
-See: `PEP8 `_.
-
-The Iris project considers all public API features as "supported", which means
-that we will not change or remove them without deprecation, and will undertake
-to fix any bugs discovered.
-We do however make an important exception for the content of the 'experimental'
-module : APIs in :mod:`iris.experimental` module are published for initial
-evaluation and feedback, and can be revised or removed without warning at a
-subsequent release.
-
-A "feature" of the API includes public objects as above, but may also be used
-more loosely to indicate a class or mode of behaviour, for example when a
-keyword has a specific value, like "interpolate(mode='linear')".
-
-A "usage" is any code referring to public API elements, for example :
-
- * `print(iris.thing)`
- * `iris.submodule.call(arg1)`
- * `iris.module.call(arg1, arg2, *more_args)`
- * `iris.module.call(arg1, arg2, control=3)`
- * `x = iris.module.class(arg, key=4)`
-
-A "behaviour" is whatever Iris does when you invoke a particular API usage,
-encompassing both returned values and any side effects such as code state
-changes or data written to files.
-
-As the above code examples are all public feature usages, they should
-therefore continue to work, with the same behaviours, at least until the next
-**major** version release.
-
-.. Note::
- What is the "same" behaviour, for backwards-compatibility purposes ?
-
- Unfortunately, the guarantee to preserve "what Iris does" within a major
- version cannot ensure *totally* identical and repeatable behaviour for any
- possible usage, because this can also depend on the exact installed
- versions of all dependencies (i.e. the other Python modules and system
- libraries that Iris uses).
-
- See :ref:`iris_dependency_versions`.
-
- Minor-release code changes are backwards-compatible, meaning that they must
- result in "the same" actions from Iris.
- Ultimately, however, this is only a statement of intent, as we need freedom
- to modify code with "no change" effects, yet *any* change to Iris code or
- dependencies could alter total behaviour in some respects :
- For instance, it could take more or less time or memory, produce results in
- a different sequence, or perform a calculation slightly differently (with
- possible small differences in floating point results).
-
- As behaviour can only be tested in specific ways on a specific
- installation, so any given user installation may experience changes in
- behaviour, though hopefully always slight, with a minor release change.
- This applies to changes in the Iris minor release version, or a different
- version of some dependency; or other changes to the operating system
- environment.
-
-
-.. _iris_change_releases:
-
-Releases and Versions
----------------------
-
-
-Iris releases have a unique identifying version string, in the form
-"..", available to code as
-:data:`iris.__version__` .
-
-This contains major and minor release numbers. The numbering and meaning of
-these are defined, following the `SemVer project `_.
-
-The essential aspects of the ".." arrangement
-are :
-
- * "", "" and "" are all integers, thus version
- 2.12 is later than 2.2 (i.e. it is "two point twelve", not "two point one
- two").
-
- * "." denote the software release version.
-
- * A non-zero "" denotes a bugfix version, thus a release "X.Y.0" may
- be followed by "X.Y.1", "X.Y.2" etc, which *only* differ by containing
- bugfixes. Any bugfix release supercedes its predecessors, and does not
- change any (valid) APIs or behaviour : hence, it is always advised to
- replace a given version with its latest bugfix successor, and there
- should be no reason not to.
-
- * "" is blank for formal releases. It used to indicate
- provisional software for testing : The version string in a development
- code branch is always labelled "-DEV", and release candidates for testing
- during the release process are labelled "-rc1", "-rc2" etc.
- For development code, the version number is that of the *next* release,
- which this code version is progressing towards, e.g. "1.2-DEV" for all
- code branches since the 1.1 release and intended for release in "1.2".
-
-.. note::
- Our use of "-" is typical, but does not follow strict SemVer
- principles.
-
-The code for a specific release is identified by a git tag which is the version
-string : see
-:ref:`Developer's Guide section on releases `.
-
-
-Major and Minor Releases
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-The term "release" refers both to a specific state of the Iris code, which we
-have assigned a given version string, *and* the act of defining it
-(i.e. we "release a release").
-
-According to `SemVer `_ principles, changes that alter the
-behaviour of existing code can only be made at a **major** release, i.e. when
-"X.0" is released following the last previous "(X-1).Y.Z".
-
-*Minor* releases, by contrast, consist of bugfixes, new features, and
-deprecations : Any valid exisiting code should be unaffected by these, so it
-will still run with the same results.
-
-At a major release, only **deprecated** behaviours and APIs can be changed or
-removed.
-
-
-.. _iris_deprecations:
-
-Deprecations
-------------
-
-A deprecation is issued when we decide that an *existing* feature needs to be
-removed or modified : We add notices to the documentation, and issue a runtime
-"Deprecation Warning" whenever the feature is used.
-
-For a wider perspective, see : ``_.
-For the developer view of this, see
-:ref:`Developer's Guide section on deprecations `.
-
-Deprecation manages incompatible changes in a strictly controlled way.
-This allows APIs to evolve to the most effective form, even when that means
-that existing code could behave differently or fail : This is important
-because the freedom to remove features helps prevent the API becoming
-progressively cluttered, and modifying existing behaviours allows us to use
-the most natural code syntax for the most used features.
-
-We can only remove features or change behaviours at a major release. Thus, we
-first deprecate the feature in a minor release, to provide adequate warning
-that existing code may need to be modified.
-
-When we make a release that introduces a deprecation :
-
- * a deprecation notice appears in the
- :ref:`What's New section `
- * deprecation notes are included in all relevant parts of the :ref:`reference
- documentation `
- * a runtime warning is produced when the old feature is used or triggered
-
-In most cases, we also provide detailed advice in the documentation and/or
-warning messages on how to replace existing usage with a 'new' way of doing
-things.
-In all cases, we must provide a transitional period where both old and new
-features are available :
-
- * the 'old' style works as it did before
- * any usage of the 'old' features will emit a
- :class:`warnings.WarningMessage` message, noting that the feature is
- deprecated and what to use instead
- * the 'new' style can be adopted as soon as convenient
-
-This is to warn users :
-
- * not to use the deprecated features in any new code, *and*
- * eventually to rewrite old code to use the newer or better alternatives
-
-
-Deprecated features support through the Release cycle
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The whole point of a deprecation is that the feature continues to work, but
-with a warning, for some time before an unavoidable change occurs.
-When a version that introduces a deprecation is released, the effects are as
-follows:
-
- * code that may be affected by the proposed change will result in
- deprecation warnings
- * code that currently works will, however, continue to work unchanged, at
- least until the next major release
- * you can avoid all deprecation warnings by suitable changes to your code
- * code which uses no deprecated features, and thus produces no deprecation
- warnings, will continue to work unchanged even at a **major** release
- * code that generates deprecation warnings may cease to work at the next
- **major** release.
-
-
-.. _iris_future_usage:
-
-Future options, `iris.FUTURE`
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A special approach is needed where the replacement behaviour is not controlled
-by a distinct API usage.
-
-When we extend an API, or add a new feature, we usually add a new method or
-keyword. In those cases, code using the new feature is clearly distinct from
-any previous (valid) usage, so this is relatively simple to manage.
-However, sometimes we really need to change the *way* an API works, without
-modifying or extending (i.e. complicating) the existing user interface.
-In that case, existing user code might sometimes have *different* behaviour
-with the new release, which we obviously need to avoid.
-
-**For example :**
-
- We might decide there is a more useful way of loading cubes from files of a
- particular input data format.
-
- * the user code usage is simply by calls to "iris.load"
- * the change is not a bugfix, as the old way isn't actually "wrong"
- * we don't want to add an extra keyword into all the relevant calls
- * we don't see a longterm future for the existing behaviour : we
- expect everyone to adopt the new interpretation, eventually
-
-For changes of this sort, the release will define a new boolean property of the
-:data:`iris.FUTURE` object, as a control to select between the 'old' and 'new'
-behaviours, with values False='old' and True='new'.
-See :data:`iris.Future` for examples.
-
-In these cases, as any "deprecated usage" is not clearly distinguishable in the
-form of the user code, it is **especially** important to take note of any
-deprecation messages appearing when legacy code runs.
-
-
-**Sequence of changes to `iris.FUTURE`**
-
-To allow user code to avoid unexpected any behavioural changes, the
-:data:`iris.Future` controls follow a special management cycle, as follows
-(see also the relevant :ref:`Developer Guide section `):
-
-At (minor) release "...":
- * Changes to API:
- * the new behaviour is made available, alongside the old one
-
- * a new future option `iris.FUTURE.` is provided to switch
- between them.
-
- * the new option defaults to `iris.FUTURE.=False`, meaning
- the 'old' behaviour is the default.
-
- * when any relevant API call is made that invokes the old behaviour, a
- deprecation warning is emitted.
-
- * User actions:
-
- * If your code encounters the new deprecation warning, you should try
- enabling the new control option, and make any necessary rewrites to
- make it work. This will stop the deprecation warning appearing.
-
- * If you encounter problems making your code work with the new
- behaviour, and don't have time to fix them, you should make your
- code explicitly turn *off* the option for now, i.e. ::
- `iris.FUTURE. = False`.
- This locks you into the old behaviour, but your code will continue
- to work, even beyond the next major release when the default
- behaviour will change (see on).
-
-At (major) release ".0...":
- * Changes to API:
- * the control default is changed to `iris.FUTURE.=True`
-
- * the control property is *itself* deprecated, so that assigning to it
- now results in a deprecation warning.
-
- * when any affected API call is made, a deprecation warning is (still)
- emitted, if the old behaviour is in force. The "old" behaviour is,
- however, still available and functional.
-
- * User actions:
-
- * If your code is already using the "new" behaviour, it will now work
- without needing to set the Future option. *You should remove* the
- code which enables the option, as this will now emit a deprecation
- message. In the *next* major release, this would cause an error.
-
- * If your code is explicitly turning the option off, it will continue
- to work in the same way at this point, but obviously time is
- runnning out.
-
- * If your code is still using the old behaviour and *not* setting the
- control option at all, its behaviour might now have changed
- unexpectedly and you should review this.
-
-At (major) release "...":
- * Changes to API:
- * the control property is removed
- * the "old" behaviour is removed
-
-
-
-.. _iris_dependency_versions:
-
-Versions of Installed Dependencies
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The dependencies of Iris (required and optional) are defined in
-:ref:`installing_iris`.
-
-Iris does not specify exact required versions for its dependencies, but it may
-specify a minimum acceptable version number. Iris is normally expected to be
-compatible with *any* version up to the latest current release.
-
-When a new release of a dependency is found to cause problems, Iris may define
-the supported version more precisely, but this would be a temporary fix which
-should be removed in a later release.
diff --git a/docs/iris/src/whitepapers/index.rst b/docs/iris/src/whitepapers/index.rst
deleted file mode 100644
index dd0876d257..0000000000
--- a/docs/iris/src/whitepapers/index.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. _whitepapers_index:
-
-============================
-Iris technical 'Whitepapers'
-============================
-Extra information on specific technical issues.
-
-.. toctree::
- :maxdepth: 1
-
- um_files_loading.rst
- missing_data_handling.rst
diff --git a/docs/iris/src/IEP/IEP001.adoc b/docs/src/IEP/IEP001.adoc
similarity index 82%
rename from docs/iris/src/IEP/IEP001.adoc
rename to docs/src/IEP/IEP001.adoc
index d38b2e8478..2daef2363a 100644
--- a/docs/iris/src/IEP/IEP001.adoc
+++ b/docs/src/IEP/IEP001.adoc
@@ -119,7 +119,7 @@ cube.sel(height=1.5)
The semantics of position-based slices will continue to match that of normal Python slices. The start position is included, the end position is excluded.
-Value-based slices will be stricly inclusive, with both the start and end values included. This behaviour differs from normal Python slices but is in common with pandas.
+Value-based slices will be strictly inclusive, with both the start and end values included. This behaviour differs from normal Python slices but is in common with pandas.
Just as for normal Python slices, we do not need to provide the ability to control the include/exclude behaviour for slicing.
@@ -162,12 +162,12 @@ There is a risk that this topic could bog down when dealing with non-standard ca
* Boolean array indexing
* Lambdas?
* What to do about constrained loading?
-* Relationship to http://scitools.org.uk/iris/docs/v1.9.2/iris/iris/cube.html#iris.cube.Cube.intersection[iris.cube.Cube.intersection]?
+* Relationship to https://scitools.org.uk/iris/docs/v1.9.2/iris/iris/cube.html#iris.cube.Cube.intersection[iris.cube.Cube.intersection]?
* Relationship to interpolation (especially nearest-neighbour)?
** e.g. What to do about values that don't exist?
*** pandas throws a KeyError
-*** xarray supports (several) nearest-neighbour schemes via http://xarray.pydata.org/en/stable/indexing.html#nearest-neighbor-lookups[`data.sel()`]
-*** Apparently http://holoviews.org/[holoviews] does nearest-neighbour interpolation.
+*** xarray supports (several) nearest-neighbour schemes via https://xarray.pydata.org/en/stable/indexing.html#nearest-neighbor-lookups[`data.sel()`]
+*** Apparently https://holoviews.org/[holoviews] does nearest-neighbour interpolation.
* multi-dimensional coordinate => unroll?
* var_name only selection? `cube.vloc(t0=12)`
* Orthogonal only? Or also independent? `cube.loc_points(lon=[1, 1, 5], lat=[31, 33, 32])`
@@ -185,9 +185,9 @@ cube.interpolate(
## References
. Iris
- * http://scitools.org.uk/iris/docs/v1.9.2/iris/iris.html#iris.Constraint[iris.Constraint]
- * http://scitools.org.uk/iris/docs/v1.9.2/userguide/subsetting_a_cube.html[Subsetting a cube]
-. http://pandas.pydata.org/pandas-docs/stable/indexing.html[pandas indexing]
-. http://xarray.pydata.org/en/stable/indexing.html[xarray indexing]
-. http://legacy.python.org/dev/peps/pep-0472/[PEP 472 - Support for indexing with keyword arguments]
-. http://nbviewer.jupyter.org/gist/rsignell-usgs/13d7ce9d95fddb4983d4cbf98be6c71d[Time slicing NetCDF or OPeNDAP datasets] - Rich Signell's xarray/iris comparison focussing on time handling and performance
+ * https://scitools.org.uk/iris/docs/v1.9.2/iris/iris.html#iris.Constraint[iris.Constraint]
+ * https://scitools.org.uk/iris/docs/v1.9.2/userguide/subsetting_a_cube.html[Subsetting a cube]
+. https://pandas.pydata.org/pandas-docs/stable/indexing.html[pandas indexing]
+. https://xarray.pydata.org/en/stable/indexing.html[xarray indexing]
+. https://legacy.python.org/dev/peps/pep-0472/[PEP 472 - Support for indexing with keyword arguments]
+. https://nbviewer.jupyter.org/gist/rsignell-usgs/13d7ce9d95fddb4983d4cbf98be6c71d[Time slicing NetCDF or OPeNDAP datasets] - Rich Signell's xarray/iris comparison focussing on time handling and performance
diff --git a/docs/src/Makefile b/docs/src/Makefile
new file mode 100644
index 0000000000..8d652878f6
--- /dev/null
+++ b/docs/src/Makefile
@@ -0,0 +1,75 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+SRCDIR = .
+
+# See https://www.sphinx-doc.org/en/master/man/sphinx-build.html?highlight=--keep-going#cmdoption-sphinx-build-W
+WARNING_TO_ERROR = -W --keep-going
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html html-noplot dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest show
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " help to view this help"
+ @echo " html to make standalone HTML files"
+ @echo " html-noplot to make standalone HTML files, skip gallery"
+ @echo " html-noapi to make standalone HTML files, skip the API"
+ @echo " html-quick to make standalone HTML files, skip the gallery and API"
+ @echo " clean to remove all built files"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " show to open the built documentation in the default browser"
+ @echo " livehtml to auto build the docs when any file changes are detected."
+ @echo " You need to install sphinx-autobuild first:"
+ @echo " conda install -c conda-forge sphinx-autobuild"
+
+html:
+ $(SPHINXBUILD) $(WARNING_TO_ERROR) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html"
+
+html-noplot:
+ $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML (no gallery docs) pages are in $(BUILDDIR)/html"
+
+html-noapi:
+ export SKIP_API=1; $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML (no api docs) pages are in $(BUILDDIR)/html"
+
+html-quick:
+ export SKIP_API=1; $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML (no gallery or api docs) pages are in $(BUILDDIR)/html"
+
+clean:
+ -rm -rf $(BUILDDIR)
+ -rm -rf $(SRCDIR)/generated
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the "
+ @echo "results in $(BUILDDIR)/doctest/output.txt."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output "
+ @echo "or in $(BUILDDIR)/linkcheck/output.txt."
+
+show:
+ @python -c "import webbrowser; webbrowser.open_new_tab('file://$(shell pwd)/$(BUILDDIR)/html/index.html')"
+
+livehtml:
+ sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) --ignore generated $(O)
\ No newline at end of file
diff --git a/docs/src/_static/README.md b/docs/src/_static/README.md
new file mode 100644
index 0000000000..b9f2877a30
--- /dev/null
+++ b/docs/src/_static/README.md
@@ -0,0 +1,31 @@
+# Iris logos
+
+[](iris-logo-title.svg)
+
+Code for generating the logos is at:
+[SciTools/marketing/iris/logo/generate_logo.py](https://github.com/SciTools/marketing/blob/master/iris/logo/generate_logo.py)
+
+See the docstring of the `generate_logo()` function for more information.
+
+## Why a scripted logo?
+
+SVG logos are ideal for source-controlled projects:
+
+* Low file size, with infinitely scaling quality
+* Universally recognised vector format, editable by many software packages
+* XML-style content = human-readable diff when changes are made
+
+But Iris' logo is difficult to reproduce/edit using an SVG editor alone:
+
+* Includes correctly projected, low resolution coastlines
+* Needs precise alignment of the 'visual centre' of the iris with the centres
+ of the Earth and the image
+
+An SVG image is simply XML format, so can be easily assembled automatically
+with a script, which can also be engineered to address the above problems.
+
+Further advantages of using a script:
+
+* Parameterised text, making it easy to standardise the logo across all Iris
+ packages
+* Can generate an animated GIF/SVG of a rotating Earth
diff --git a/docs/src/_static/icon_api.svg b/docs/src/_static/icon_api.svg
new file mode 100644
index 0000000000..bf2f8d67bb
--- /dev/null
+++ b/docs/src/_static/icon_api.svg
@@ -0,0 +1,155 @@
+
+
+
+
\ No newline at end of file
diff --git a/docs/src/_static/icon_development.svg b/docs/src/_static/icon_development.svg
new file mode 100644
index 0000000000..dbc342688c
--- /dev/null
+++ b/docs/src/_static/icon_development.svg
@@ -0,0 +1,63 @@
+
+
diff --git a/docs/src/_static/icon_instructions.svg b/docs/src/_static/icon_instructions.svg
new file mode 100644
index 0000000000..62b3fc3620
--- /dev/null
+++ b/docs/src/_static/icon_instructions.svg
@@ -0,0 +1,162 @@
+
+
+
+
\ No newline at end of file
diff --git a/docs/src/_static/icon_new_product.svg b/docs/src/_static/icon_new_product.svg
new file mode 100644
index 0000000000..f222e1e066
--- /dev/null
+++ b/docs/src/_static/icon_new_product.svg
@@ -0,0 +1,182 @@
+
+
diff --git a/docs/src/_static/icon_shuttle.svg b/docs/src/_static/icon_shuttle.svg
new file mode 100644
index 0000000000..46ba64d2e0
--- /dev/null
+++ b/docs/src/_static/icon_shuttle.svg
@@ -0,0 +1,71 @@
+
+
diff --git a/docs/src/_static/icon_support.png b/docs/src/_static/icon_support.png
new file mode 100644
index 0000000000..567cdb1b2f
Binary files /dev/null and b/docs/src/_static/icon_support.png differ
diff --git a/docs/src/_static/icon_thumb.png b/docs/src/_static/icon_thumb.png
new file mode 100644
index 0000000000..6a14875e22
Binary files /dev/null and b/docs/src/_static/icon_thumb.png differ
diff --git a/docs/src/_static/iris-logo-title-dark.svg b/docs/src/_static/iris-logo-title-dark.svg
new file mode 100644
index 0000000000..b7358aafec
--- /dev/null
+++ b/docs/src/_static/iris-logo-title-dark.svg
@@ -0,0 +1,107 @@
+
+
\ No newline at end of file
diff --git a/docs/src/_static/iris-logo-title.svg b/docs/src/_static/iris-logo-title.svg
new file mode 100644
index 0000000000..98dd1a73d5
--- /dev/null
+++ b/docs/src/_static/iris-logo-title.svg
@@ -0,0 +1,107 @@
+
+
+
+ Banner logo for the SciTools Iris project - https://github.com/SciTools/iris/
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Iris
+
\ No newline at end of file
diff --git a/docs/src/_static/iris-logo.svg b/docs/src/_static/iris-logo.svg
new file mode 100644
index 0000000000..fe49411b45
--- /dev/null
+++ b/docs/src/_static/iris-logo.svg
@@ -0,0 +1,104 @@
+
+
+
+ Logo for the SciTools Iris project - https://github.com/SciTools/iris/
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/src/_static/theme_override.css b/docs/src/_static/theme_override.css
new file mode 100644
index 0000000000..355119f8a5
--- /dev/null
+++ b/docs/src/_static/theme_override.css
@@ -0,0 +1,28 @@
+/* import the standard theme css */
+@import url("styles/theme.css");
+
+/* now we can add custom css.... */
+
+/* Used for very strong warning */
+#slim-red-box-banner {
+ background: #ff0000;
+ box-sizing: border-box;
+ color: #ffffff;
+ font-weight: normal;
+ padding: 0.5em;
+}
+
+#slim-red-box-banner a {
+ color: #ffffff;
+ font-weight: normal;
+ text-decoration: underline;
+}
+
+/* bullet point list with green ticks */
+ul.squarelist {
+ /* https://developer.mozilla.org/en-US/docs/Web/CSS/list-style-type */
+ list-style-type: "\2705";
+ margin-left: 0;
+ text-indent: 1em;
+ padding-left: 5em;
+}
diff --git a/docs/src/_templates/custom_footer.html b/docs/src/_templates/custom_footer.html
new file mode 100644
index 0000000000..f81fcc583e
--- /dev/null
+++ b/docs/src/_templates/custom_footer.html
@@ -0,0 +1 @@
+
Built using Python {{ python_version }}.
diff --git a/docs/src/_templates/custom_sidebar_logo_version.html b/docs/src/_templates/custom_sidebar_logo_version.html
new file mode 100644
index 0000000000..c9d9ac6e2e
--- /dev/null
+++ b/docs/src/_templates/custom_sidebar_logo_version.html
@@ -0,0 +1,26 @@
+{% if on_rtd %}
+ {% if rtd_version == 'latest' %}
+
+
+
+ {% elif rtd_version == 'stable' %}
+
+
+
+ {% elif rtd_version_type == 'tag' %}
+ {# Covers builds for specific tags, including RC's. #}
+
+
+
+ {% else %}
+ {# Anything else build by RTD will be the HEAD of an activated branch #}
+
+
+
+ {% endif %}
+{%- else %}
+ {# not on rtd #}
+
+
+
+{%- endif %}
diff --git a/docs/src/_templates/imagehash.html b/docs/src/_templates/imagehash.html
new file mode 100644
index 0000000000..8b0dac0cce
--- /dev/null
+++ b/docs/src/_templates/imagehash.html
@@ -0,0 +1,15 @@
+{% extends "!layout.html" %}
+
+{% block body %}
+
+
Test: {{ test }}
+
+
+{% for hash, file in hashfiles %}
+
+
{{hash}}
+
+
+{% endfor %}
+
+{% endblock %}
\ No newline at end of file
diff --git a/docs/src/common_links.inc b/docs/src/common_links.inc
new file mode 100644
index 0000000000..a49a98bfa6
--- /dev/null
+++ b/docs/src/common_links.inc
@@ -0,0 +1,84 @@
+.. comment
+ Common resources in alphabetical order:
+
+.. _black: https://black.readthedocs.io/en/stable/
+.. _cartopy: https://github.com/SciTools/cartopy
+.. _flake8: https://flake8.pycqa.org/en/stable/
+.. _.flake8.yml: https://github.com/SciTools/iris/blob/main/.flake8
+.. _cirrus-ci: https://cirrus-ci.com/github/SciTools/iris
+.. _codespell: https://github.com/codespell-project/codespell
+.. _conda: https://docs.conda.io/en/latest/
+.. _contributor: https://github.com/SciTools/iris/graphs/contributors
+.. _core developers: https://github.com/orgs/SciTools/teams/iris-devs/members
+.. _generating ssh keys for GitHub: https://docs.github.com/en/github/authenticating-to-github/adding-a-new-ssh-key-to-your-github-account
+.. _GitHub Actions: https://docs.github.com/en/actions
+.. _GitHub Help Documentation: https://docs.github.com/en/github
+.. _GitHub Discussions: https://github.com/SciTools/iris/discussions
+.. _Iris: https://github.com/SciTools/iris
+.. _Iris GitHub: https://github.com/SciTools/iris
+.. _Iris GitHub Actions: https://github.com/SciTools/iris/actions
+.. _iris-sample-data: https://github.com/SciTools/iris-sample-data
+.. _iris-test-data: https://github.com/SciTools/iris-test-data
+.. _isort: https://pycqa.github.io/isort/
+.. _issue: https://github.com/SciTools/iris/issues
+.. _issues: https://github.com/SciTools/iris/issues
+.. _legacy documentation: https://github.com/SciTools/scitools.org.uk/tree/master/iris/docs/archive
+.. _matplotlib: https://matplotlib.org/stable/
+.. _napolean: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html
+.. _nox: https://nox.thea.codes/en/stable/
+.. _New Issue: https://github.com/scitools/iris/issues/new/choose
+.. _pre-commit: https://pre-commit.com/
+.. _pull request: https://github.com/SciTools/iris/pulls
+.. _pull requests: https://github.com/SciTools/iris/pulls
+.. _Read the Docs: https://scitools-iris.readthedocs.io/en/latest/
+.. _readthedocs.yml: https://github.com/SciTools/iris/blob/main/requirements/readthedocs.yml
+.. _ruff: https://github.com/astral-sh/ruff
+.. _SciTools: https://github.com/SciTools
+.. _scitools-iris: https://pypi.org/project/scitools-iris/
+.. _Shapely: https://shapely.readthedocs.io/en/stable/index.html
+.. _sphinx: https://www.sphinx-doc.org/en/master/
+.. _sphinx-apidoc: https://github.com/sphinx-contrib/apidoc
+.. _test-iris-imagehash: https://github.com/SciTools/test-iris-imagehash
+.. _using git: https://docs.github.com/en/github/using-git
+.. _requirements: https://github.com/SciTools/iris/tree/main/requirements
+.. _CF-UGRID: https://ugrid-conventions.github.io/ugrid-conventions/
+.. _issues on GitHub: https://github.com/SciTools/iris/issues?q=is%3Aopen+is%3Aissue+sort%3Areactions-%2B1-desc
+.. _python-stratify: https://github.com/SciTools/python-stratify
+.. _iris-esmf-regrid: https://github.com/SciTools-incubator/iris-esmf-regrid
+.. _netCDF4: https://github.com/Unidata/netcdf4-python
+.. _SciTools Contributor's License Agreement (CLA): https://cla-assistant.io/SciTools/
+
+
+.. comment
+ Core developers and prolific contributors (@github names) in alphabetical order:
+
+.. _@abooton: https://github.com/abooton
+.. _@alastair-gemmell: https://github.com/alastair-gemmell
+.. _@ajdawson: https://github.com/ajdawson
+.. _@bjlittle: https://github.com/bjlittle
+.. _@bouweandela: https://github.com/bouweandela
+.. _@bsherratt: https://github.com/bsherratt
+.. _@corinnebosley: https://github.com/corinnebosley
+.. _@cpelley: https://github.com/cpelley
+.. _@djkirkham: https://github.com/djkirkham
+.. _@DPeterK: https://github.com/DPeterK
+.. _@ESadek-MO: https://github.com/ESadek-MO
+.. _@esc24: https://github.com/esc24
+.. _@HGWright: https://github.com/HGWright
+.. _@jamesp: https://github.com/jamesp
+.. _@jonseddon: https://github.com/jonseddon
+.. _@jvegasbsc: https://github.com/jvegasbsc
+.. _@lbdreyer: https://github.com/lbdreyer
+.. _@marqh: https://github.com/marqh
+.. _@pelson: https://github.com/pelson
+.. _@pp-mo: https://github.com/pp-mo
+.. _@QuLogic: https://github.com/QuLogic
+.. _@rcomer: https://github.com/rcomer
+.. _@rhattersley: https://github.com/rhattersley
+.. _@schlunma: https://github.com/schlunma
+.. _@stephenworsley: https://github.com/stephenworsley
+.. _@tkknight: https://github.com/tkknight
+.. _@trexfeathers: https://github.com/trexfeathers
+.. _@ukmo-ccbunney: https://github.com/ukmo-ccbunney
+.. _@wjbenfold: https://github.com/wjbenfold
+.. _@zklaus: https://github.com/zklaus
diff --git a/docs/src/community/index.rst b/docs/src/community/index.rst
new file mode 100644
index 0000000000..ee227513b3
--- /dev/null
+++ b/docs/src/community/index.rst
@@ -0,0 +1,60 @@
+.. include:: ../common_links.inc
+
+.. todo:
+ consider scientific-python.org
+ consider scientific-python.org/specs/
+
+Iris in the Community
+=====================
+
+Iris aims to be a valuable member of the open source scientific Python
+community.
+
+We listen out for developments in our dependencies and neighbouring projects,
+and we reach out to them when we can solve problems together; please feel free
+to reach out to us!
+
+We are aware of our place in the user's wider 'toolbox' - offering unique
+functionality and interoperating smoothly with other packages.
+
+We welcome contributions from all; whether that's an opinion, a 1-line
+clarification, or a whole new feature 🙂
+
+Quick Links
+-----------
+
+* `GitHub Discussions`_
+* :ref:`Getting involved`
+* `Twitter `_
+
+Interoperability
+----------------
+
+There's a big choice of Python tools out there! Each one has strengths and
+weaknesses in different areas, so we don't want to force a single choice for your
+whole workflow - we'd much rather make it easy for you to choose the right tool
+for the moment, switching whenever you need. Below are our ongoing efforts at
+smoother interoperability:
+
+.. not using toctree due to combination of child pages and cross-references.
+
+* The :mod:`iris.pandas` module
+* :doc:`iris_xarray`
+* :doc:`phrasebook`
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ iris_xarray
+ phrasebook
+
+Plugins
+-------
+
+Iris can be extended with **plugins**! See below for further information:
+
+.. toctree::
+ :maxdepth: 2
+
+ plugins
diff --git a/docs/src/community/iris_xarray.rst b/docs/src/community/iris_xarray.rst
new file mode 100644
index 0000000000..71585d8b9f
--- /dev/null
+++ b/docs/src/community/iris_xarray.rst
@@ -0,0 +1,200 @@
+.. include:: ../common_links.inc
+
+======================
+Iris ❤️ :term:`Xarray`
+======================
+
+There is a lot of overlap between Iris and :term:`Xarray`, but some important
+differences too. Below is a summary of the most important differences, so that
+you can be prepared, and to help you choose the best package for your use case.
+See :doc:`phrasebook` for a broad comparison of terminology.
+
+Overall Experience
+------------------
+
+Iris is the more specialised package, focused on making it as easy
+as possible to work with meteorological and climatological data. Iris
+is built to natively handle many key concepts, such as the CF conventions,
+coordinate systems and bounded coordinates. Iris offers a smaller toolkit of
+operations compared to Xarray, particularly around API for sophisticated
+computation such as array manipulation and multi-processing.
+
+Xarray's more generic data model and community-driven development give it a
+richer range of operations and broader possible uses. Using Xarray
+specifically for meteorology/climatology may require deeper knowledge
+compared to using Iris, and you may prefer to add Xarray plugins
+such as :ref:`cfxarray` to get the best experience. Advanced users can likely
+achieve better performance with Xarray than with Iris.
+
+Conversion
+----------
+There are multiple ways to convert between Iris and Xarray objects.
+
+* Xarray includes the :meth:`~xarray.DataArray.to_iris` and
+ :meth:`~xarray.DataArray.from_iris` methods - detailed in the
+ `Xarray IO notes on Iris`_. Since Iris evolves independently of Xarray, be
+ vigilant for concepts that may be lost during the conversion.
+* Because both packages are closely linked to the :term:`NetCDF Format`, it is
+ feasible to save a NetCDF file using one package then load that file using
+ the other package. This will be lossy in places, as both Iris and Xarray
+ are opinionated on how certain NetCDF concepts relate to their data models.
+* `ncdata `_ is a package which
+ the Iris development team have developed to manage netcdf data, which can act as an
+ improved 'bridge' between Iris and Xarray :
+
+Ncdata can convert Iris cubes to an Xarray dataset, or vice versa, with minimal
+overhead and as lossless as possible.
+
+For example :
+
+.. code-block:: python
+
+ from ncdata.iris_xarray import cubes_from_xarray, cubes_to_xarray
+ cubes = cubes_from_xarray(dataset)
+ xrds = cubes_to_xarray(cubes)
+
+Ncdata avoids the feature limitations previously mentioned regarding Xarray's
+:meth:`~xarray.DataArray.to_iris` and :meth:`~xarray.DataArray.from_iris`,
+because it doesn't replicate any logic of either Xarray or Iris.
+Instead, it uses the netcdf file interfaces of both to exchange data
+"as if" via a netcdf file. So, these conversions *behave* just like exchanging data
+via a file, but are far more efficient because they can transfer data without copying
+arrays or fetching lazy data.
+
+Regridding
+----------
+Iris and Xarray offer a range of regridding methods - both natively and via
+additional packages such as `iris-esmf-regrid`_ and `xESMF`_ - which overlap
+in places
+but tend to cover a different set of use cases (e.g. Iris handles unstructured
+meshes but offers access to fewer ESMF methods). The behaviour of these
+regridders also differs slightly (even between different regridders attached to
+the same package) so the appropriate package to use depends highly on the
+particulars of the use case.
+
+Plotting
+--------
+Xarray and Iris have a large overlap of functionality when creating
+:term:`Matplotlib` plots and both support the plotting of multidimensional
+coordinates. This means the experience is largely similar using either package.
+
+Xarray supports further plotting backends through external packages (e.g. Bokeh through `hvPlot`_)
+and, if a user is already familiar with `pandas`_, the interface should be
+familiar. It also supports some different plot types to Iris, and therefore can
+be used for a wider variety of plots. It also has benefits regarding "out of
+the box", quick customisations to plots. However, if further customisation is
+required, knowledge of matplotlib is still required.
+
+In both cases, :term:`Cartopy` is/can be used. Iris does more work
+automatically for the user here, creating Cartopy
+:class:`~cartopy.mpl.geoaxes.GeoAxes` for latitude and longitude coordinates,
+whereas the user has to do this manually in Xarray.
+
+Statistics
+----------
+Both libraries are quite comparable with generally similar capabilities,
+performance and laziness. Iris offers more specificity in some cases, such as
+some more specific unique functions and masked tolerance in most statistics.
+Xarray seems more approachable however, with some less unique but more
+convenient solutions (these tend to be wrappers to :term:`Dask` functions).
+
+Laziness and Multi-Processing with :term:`Dask`
+-----------------------------------------------
+Iris and Xarray both support lazy data and out-of-core processing through
+utilisation of Dask.
+
+While both Iris and Xarray expose :term:`NumPy` conveniences at the API level
+(e.g. the `ndim()` method), only Xarray exposes Dask conveniences. For example
+:attr:`xarray.DataArray.chunks`, which gives the user direct control
+over the underlying Dask array chunks. The Iris API instead takes control of
+such concepts and user control is only possible by manipulating the underlying
+Dask array directly (accessed via :meth:`iris.cube.Cube.core_data`).
+
+:class:`xarray.DataArray`\ s comply with `NEP-18`_, allowing NumPy arrays to be
+based on them, and they also include the necessary extra members for Dask
+arrays to be based on them too. Neither of these is currently possible with
+Iris :class:`~iris.cube.Cube`\ s, although an ambition for the future.
+
+NetCDF File Control
+-------------------
+(More info: :ref:`netcdf_io`)
+
+Unlike Iris, Xarray generally provides full control of major file structures,
+i.e. dimensions + variables, including their order in the file. It mostly
+respects these in a file input, and can reproduce them on output.
+However, attribute handling is not so complete: like Iris, it interprets and
+modifies some recognised aspects, and can add some extra attributes not in the
+input.
+
+Whereas Iris is primarily designed to handle netCDF data encoded according to
+`CF Conventions `_ , this is not so important to Xarray,
+which therefore may make it harder to correctly manage this type of data.
+While Xarray CF support is not complete, it may improve, and obviously
+:ref:`cfxarray` may be relevant here.
+There is also relevant documentation
+`at this page `_.
+
+In some particular aspects, CF data is not loaded well (or at all), and in many cases
+output is not fully CF compliant (as-per `the cf checker `_).
+
+* xarray has it's own interpretation of coordinates, which is different from the CF-based
+ approach in Iris, and means that the use of the "coordinates" attribute in output is
+ often not CF compliant.
+* dates are converted to datetime-like objects internally. There are special features
+ providing `support for non-standard calendars `_,
+ however date units may not always be saved correctly.
+* CF-style coordinate bounds variables are not fully understood. The CF approach
+ where bounds variables do not usually define their units or standard_names can cause
+ problems. Certain files containing bounds variables with more than 2 bounds (e.g.
+ unstructured data) may not load at all.
+* missing points are always represented as NaNs, as-per Pandas usage.
+ (See :ref:`xarray_missing_data` ).
+ This means that fill values are not preserved, and that masked integer data is
+ converted to floats.
+ The netCDF default fill-values are not supported, so that variables with no
+ "_FillValue" attribute will have missing points equal to the fill-value
+ in place of NaNs. By default, output variables generally have ``_FillValue = NaN``.
+
+Ultimately, however, nearly everything wanted in a particular desired result file
+**can** be achieved in Xarray, via provided override mechanisms (`loading keywords`_
+and the '`encoding`_' dictionaries).
+
+.. _xarray_missing_data:
+
+Missing Data
+------------
+Xarray uses :data:`numpy.nan` to represent missing values and this will support
+many simple use cases assuming the data are floats. Iris enables more
+sophisticated missing data handling by representing missing values as masks
+(:class:`numpy.ma.MaskedArray` for real data and :class:`dask.array.Array`
+for lazy data) which allows data to be any data type and to include either/both
+a mask and :data:`~numpy.nan`\ s.
+
+.. _cfxarray:
+
+`cf-xarray`_
+-------------
+Iris has a data model entirely based on :term:`CF Conventions`. Xarray has a
+data model based on :term:`NetCDF Format` with cf-xarray acting as translation
+into CF. Xarray/cf-xarray methods can be
+called and data accessed with CF like arguments (e.g. axis, standard name) and
+there are some CF specific utilities (similar
+to Iris utilities). Iris tends to cover more of and be stricter about CF.
+
+
+.. seealso::
+
+ * `Xarray IO notes on Iris`_
+ * `Xarray notes on other NetCDF libraries`_
+
+.. _Xarray IO notes on Iris: https://docs.xarray.dev/en/stable/user-guide/io.html#iris
+.. _Xarray notes on other NetCDF libraries: https://docs.xarray.dev/en/stable/getting-started-guide/faq.html#what-other-netcdf-related-python-libraries-should-i-know-about
+.. _loading keywords: https://docs.xarray.dev/en/stable/generated/xarray.open_dataset.html#xarray.open_dataset
+.. _encoding: https://docs.xarray.dev/en/stable/user-guide/io.html#writing-encoded-data
+.. _xESMF: https://github.com/pangeo-data/xESMF/
+.. _seaborn: https://seaborn.pydata.org/
+.. _hvPlot: https://hvplot.holoviz.org/
+.. _pandas: https://pandas.pydata.org/
+.. _NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html
+.. _cf-xarray: https://github.com/xarray-contrib/cf-xarray
+.. _iris#4994: https://github.com/SciTools/iris/issues/4994
diff --git a/docs/src/community/phrasebook.rst b/docs/src/community/phrasebook.rst
new file mode 100644
index 0000000000..bcd91cca83
--- /dev/null
+++ b/docs/src/community/phrasebook.rst
@@ -0,0 +1,66 @@
+.. include:: ../common_links.inc
+
+.. _phrasebook:
+
+Package Phrasebook
+===================
+
+There are a number of similar packages to Iris, and a lot of these have their own
+terminology for similar things. Whether you're coming or going, we hope this might
+be a helpful guide to these differences!
+Definitions for each can be found in :ref:`glossary`. See also
+`Xarray terminology `_.
+
+.. list-table:: Phrasebook
+ :widths: 25 25 25 50
+ :header-rows: 1
+
+ * - Iris
+ - Xarray
+ - Example
+ - Notes
+ * - Non-Lazy
+ - Eager
+ -
+ - Used to relate to functions, rather than the data.
+ * - Cube
+ - DataArray
+ -
+ -
+ * - CubeList
+ - Dataset
+ -
+ - Though similar, a CubeList is a simpler object, and is
+ not a perfect comparison to a Dataset
+ * - Merge/ Concatenate
+ - Concatenate
+ - `Xarray concatenate `_
+ - Xarray's concatenate has the capability to largely do what both
+ Iris merge and Iris concatenate do. However, this is not a perfect comparison,
+ please see the link for more information.
+ * -
+ - Merge
+ - `Xarray merge `_
+ - Xarray's Merge function doesn't map neatly map to any Iris feature.
+ Please see the link for more information.
+ * - Scalar Coordinate
+ -
+ -
+ - Iris makes a distinction between scalar coordinates and non-scalar coordinates,
+ whereas xarray documentation makes a distinction between scalar and non-scalar *data*.
+ It is possible to make coordinates with scalar data in both Iris and xarray
+ but only Iris will label such coordinates.
+ * - AuxCoord
+ - Non-Dimensional Coordinate
+ -
+ - Coordinates in Iris and xarray are categorised using different rules,
+ and so are not a one-to-one match.
+ * - DimCoord
+ - Dimension Coordinate
+ -
+ - Coordinates in Iris and xarray are categorised using different rules,
+ and so are not a one-to-one match.
+
+----
+
+`To top `_
\ No newline at end of file
diff --git a/docs/src/community/plugins.rst b/docs/src/community/plugins.rst
new file mode 100644
index 0000000000..0d79d64623
--- /dev/null
+++ b/docs/src/community/plugins.rst
@@ -0,0 +1,68 @@
+.. _namespace package: https://packaging.python.org/en/latest/guides/packaging-namespace-packages/
+
+.. _community_plugins:
+
+Plugins
+=======
+
+Iris supports **plugins** under the ``iris.plugins`` `namespace package`_.
+This allows packages that extend Iris' functionality to be developed and
+maintained independently, while still being installed into ``iris.plugins``
+instead of a separate package. For example, a plugin may provide loaders or
+savers for additional file formats, or alternative visualisation methods.
+
+
+Using plugins
+-------------
+
+Once a plugin is installed, it can be used either via the
+:func:`iris.use_plugin` function, or by importing it directly:
+
+.. code-block:: python
+
+ import iris
+
+ iris.use_plugin("my_plugin")
+ # OR
+ import iris.plugins.my_plugin
+
+
+Creating plugins
+----------------
+
+The choice of a `namespace package`_ makes writing a plugin relatively
+straightforward: it simply needs to appear as a folder within ``iris/plugins``,
+then can be distributed in the same way as any other package. An example
+repository layout:
+
+.. code-block:: text
+
+ + lib
+ + iris
+ + plugins
+ + my_plugin
+ - __init__.py
+ - (more code...)
+ - README.md
+ - pyproject.toml
+ - setup.cfg
+ - (other project files...)
+
+In particular, note that there must **not** be any ``__init__.py`` files at
+higher levels than the plugin itself.
+
+The package name - how it is referred to by PyPI/conda, specified by
+``metadata.name`` in ``setup.cfg`` - is recommended to include both "iris" and
+the plugin name. Continuing this example, its ``setup.cfg`` should include, at
+minimum:
+
+.. code-block:: ini
+
+ [metadata]
+ name = iris-my-plugin
+
+ [options]
+ packages = find_namespace:
+
+ [options.packages.find]
+ where = lib
diff --git a/docs/src/conf.py b/docs/src/conf.py
new file mode 100644
index 0000000000..70b1063585
--- /dev/null
+++ b/docs/src/conf.py
@@ -0,0 +1,462 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the BSD license.
+# See LICENSE in the root of the repository for full licensing details.
+
+# -*- coding: utf-8 -*-
+#
+# Iris documentation build configuration file, created by
+# sphinx-quickstart on Tue May 25 13:26:23 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+# ----------------------------------------------------------------------------
+
+"""Config for sphinx."""
+
+import datetime
+from importlib.metadata import version as get_version
+from inspect import getsource
+import ntpath
+import os
+from pathlib import Path
+import re
+from subprocess import run
+import sys
+from tempfile import gettempdir
+from urllib.parse import quote
+import warnings
+
+
+# function to write useful output to stdout, prefixing the source.
+def autolog(message):
+ print("[{}] {}".format(ntpath.basename(__file__), message))
+
+
+# -- Check for dev make options to build quicker
+skip_api = os.environ.get("SKIP_API")
+
+# -- Are we running on the readthedocs server, if so do some setup -----------
+on_rtd = os.environ.get("READTHEDOCS") == "True"
+
+# This is the rtd reference to the version, such as: latest, stable, v3.0.1 etc
+rtd_version = os.environ.get("READTHEDOCS_VERSION")
+if rtd_version is not None:
+ # Make rtd_version safe for use in shields.io badges.
+ rtd_version = rtd_version.replace("_", "__")
+ rtd_version = rtd_version.replace("-", "--")
+ rtd_version = quote(rtd_version)
+
+# branch, tag, external (for pull request builds), or unknown.
+rtd_version_type = os.environ.get("READTHEDOCS_VERSION_TYPE")
+
+# For local testing purposes we can force being on RTD and the version
+# on_rtd = True # useful for testing
+# rtd_version = "latest" # useful for testing
+# rtd_version = "stable" # useful for testing
+# rtd_version_type = "tag" # useful for testing
+# rtd_version = "my_branch" # useful for testing
+
+if on_rtd:
+ autolog("Build running on READTHEDOCS server")
+
+ # list all the READTHEDOCS environment variables that may be of use
+ autolog("Listing all environment variables on the READTHEDOCS server...")
+
+ for item, value in os.environ.items():
+ autolog("[READTHEDOCS] {} = {}".format(item, value))
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+
+# custom sphinx extensions
+sys.path.append(os.path.abspath("sphinxext"))
+
+# add some sample files from the developers guide..
+sys.path.append(os.path.abspath(os.path.join("developers_guide")))
+
+# why isn't the iris path added to it is discoverable too? We dont need to,
+# the sphinext to generate the api rst knows where the source is. If it
+# is added then the travis build will likely fail.
+
+# -- Project information -----------------------------------------------------
+
+project = "Iris"
+
+# define the copyright information for latex builds. Note, for html builds,
+# the copyright exists directly inside "_templates/layout.html"
+copyright_years = f"2010 - {datetime.datetime.now().year}"
+copyright = f"{copyright_years}, Iris Contributors"
+author = "Iris Developers"
+
+# The version info for the project you're documenting, acts as replacement for
+# |version|, also used in various other places throughout the built documents.
+version = get_version("scitools-iris")
+release = version
+autolog(f"Iris Version = {version}")
+autolog(f"Iris Release = {release}")
+
+# -- General configuration ---------------------------------------------------
+
+# Create a variable that can be inserted in the rst "|copyright_years|".
+# You can add more variables here if needed.
+
+build_python_version = ".".join([str(i) for i in sys.version_info[:3]])
+
+
+def _dotv(version):
+ result = version
+ match = re.match(r"^py(\d+)$", version)
+ if match:
+ digits = match.group(1)
+ if len(digits) > 1:
+ result = f"{digits[0]}.{digits[1:]}"
+ return result
+
+
+# Automate the discovery of the python versions tested with CI.
+python_support = sorted(
+ [fname.stem for fname in Path(".").glob("../../requirements/py*.yml")]
+)
+
+if not python_support:
+ python_support = "unknown Python versions"
+elif len(python_support) == 1:
+ python_support = f"Python {_dotv(python_support[0])}"
+else:
+ rest = ", ".join([_dotv(v) for v in python_support[:-1]])
+ last = _dotv(python_support[-1])
+ python_support = f"Python {rest} and {last}"
+
+rst_epilog = f"""
+.. |copyright_years| replace:: {copyright_years}
+.. |python_version| replace:: {build_python_version}
+.. |python_support| replace:: {python_support}
+.. |iris_version| replace:: v{version}
+.. |build_date| replace:: ({datetime.datetime.now().strftime('%d %b %Y')})
+"""
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
+# ones.
+extensions = [
+ "sphinx.ext.todo",
+ "sphinx.ext.duration",
+ "sphinx.ext.coverage",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.autosummary",
+ "sphinx.ext.doctest",
+ "sphinx.ext.extlinks",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.intersphinx",
+ "sphinx_copybutton",
+ "sphinx.ext.napoleon",
+ "sphinx_design",
+ "sphinx_gallery.gen_gallery",
+ "matplotlib.sphinxext.mathmpl",
+ "matplotlib.sphinxext.plot_directive",
+]
+
+if skip_api == "1":
+ autolog("Skipping the API docs generation (SKIP_API=1)")
+else:
+ extensions.extend(["sphinxcontrib.apidoc"])
+ extensions.extend(["api_rst_formatting"])
+
+# -- Napoleon extension -------------------------------------------------------
+# See https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html
+napoleon_google_docstring = True
+napoleon_numpy_docstring = True
+napoleon_include_init_with_doc = False
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = True # includes dunders in api doc
+napoleon_use_admonition_for_examples = False
+napoleon_use_admonition_for_notes = False
+napoleon_use_admonition_for_references = False
+napoleon_use_ivar = False
+napoleon_use_param = True
+napoleon_use_rtype = True
+napoleon_use_keyword = True
+napoleon_custom_sections = None
+
+# -- copybutton extension -----------------------------------------------------
+# See https://sphinx-copybutton.readthedocs.io/en/latest/
+copybutton_prompt_text = r">>> |\.\.\. "
+copybutton_prompt_is_regexp = True
+copybutton_line_continuation_character = "\\"
+
+# sphinx.ext.todo configuration -----------------------------------------------
+# See https://www.sphinx-doc.org/en/master/usage/extensions/todo.html
+todo_include_todos = False
+todo_emit_warnings = False
+
+# sphinx.ext.autodoc configuration --------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_default_options
+autodoc_default_options = {
+ "members": True,
+ "member-order": "alphabetical",
+ "undoc-members": True,
+ "private-members": False,
+ "special-members": False,
+ "inherited-members": True,
+ "show-inheritance": True,
+}
+
+# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_typehints
+autodoc_typehints = "description"
+autosummary_generate = True
+autosummary_imported_members = True
+autopackage_name = ["iris"]
+autoclass_content = "both"
+modindex_common_prefix = ["iris"]
+
+# -- apidoc extension ---------------------------------------------------------
+# See https://github.com/sphinx-contrib/apidoc
+source_code_root = (Path(__file__).parents[2]).absolute()
+module_dir = source_code_root / "lib"
+apidoc_module_dir = str(module_dir)
+apidoc_output_dir = str(Path(__file__).parent / "generated/api")
+apidoc_toc_file = False
+
+apidoc_excluded_paths = [
+ str(module_dir / "iris/tests"),
+ str(module_dir / "iris/experimental/raster.*"), # gdal conflicts
+]
+
+apidoc_module_first = True
+apidoc_separate_modules = True
+apidoc_extra_args = []
+
+autolog(f"[sphinx-apidoc] source_code_root = {source_code_root}")
+autolog(f"[sphinx-apidoc] apidoc_excluded_paths = {apidoc_excluded_paths}")
+autolog(f"[sphinx-apidoc] apidoc_output_dir = {apidoc_output_dir}")
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# -- intersphinx extension ----------------------------------------------------
+# See https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
+intersphinx_mapping = {
+ "cartopy": ("https://scitools.org.uk/cartopy/docs/latest/", None),
+ "cf_units": ("https://cf-units.readthedocs.io/en/stable/", None),
+ "cftime": ("https://unidata.github.io/cftime/", None),
+ "dask": ("https://docs.dask.org/en/stable/", None),
+ "geovista": ("https://geovista.readthedocs.io/en/latest/", None),
+ "iris-esmf-regrid": ("https://iris-esmf-regrid.readthedocs.io/en/stable/", None),
+ "matplotlib": ("https://matplotlib.org/stable/", None),
+ "numpy": ("https://numpy.org/doc/stable/", None),
+ "pandas": ("https://pandas.pydata.org/docs/", None),
+ "python": ("https://docs.python.org/3/", None),
+ "pyvista": ("https://docs.pyvista.org/", None),
+ "scipy": ("https://docs.scipy.org/doc/scipy/", None),
+}
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = "sphinx"
+
+# -- plot_directive extension -------------------------------------------------
+# See https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html#options
+plot_formats = [
+ ("png", 100),
+]
+
+# -- Extlinks extension -------------------------------------------------------
+# See https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
+
+extlinks = {
+ "issue": ("https://github.com/SciTools/iris/issues/%s", "Issue #%s"),
+ "pull": ("https://github.com/SciTools/iris/pull/%s", "PR #%s"),
+ "discussion": (
+ "https://github.com/SciTools/iris/discussions/%s",
+ "Discussion #%s",
+ ),
+}
+
+# -- Doctest ("make doctest")--------------------------------------------------
+
+doctest_global_setup = "import iris"
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_favicon = "_static/iris-logo.svg"
+html_theme = "pydata_sphinx_theme"
+
+# See https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/configuring.html#configure-the-search-bar-position
+html_sidebars = {
+ "**": [
+ "custom_sidebar_logo_version",
+ "search-field",
+ "sidebar-nav-bs",
+ "sidebar-ethical-ads",
+ ]
+}
+
+# See https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/configuring.html
+html_theme_options = {
+ "footer_start": ["copyright", "sphinx-version"],
+ "footer_end": ["custom_footer"],
+ "navigation_depth": 3,
+ "navigation_with_keys": False,
+ "show_toc_level": 2,
+ "show_prev_next": True,
+ "navbar_align": "content",
+ # removes the search box from the top bar
+ "navbar_persistent": [],
+ # TODO: review if 6 links is too crowded.
+ "header_links_before_dropdown": 6,
+ "github_url": "https://github.com/SciTools/iris",
+ "twitter_url": "https://twitter.com/scitools_iris",
+ # icons available: https://fontawesome.com/v5.15/icons?d=gallery&m=free
+ "icon_links": [
+ {
+ "name": "GitHub Discussions",
+ "url": "https://github.com/SciTools/iris/discussions",
+ "icon": "far fa-comments",
+ },
+ {
+ "name": "PyPI",
+ "url": "https://pypi.org/project/scitools-iris/",
+ "icon": "fas fa-box",
+ },
+ {
+ "name": "Conda",
+ "url": "https://anaconda.org/conda-forge/iris",
+ "icon": "fas fa-boxes",
+ },
+ ],
+ "use_edit_page_button": True,
+ # Omit `theme-switcher` from navbar_end below to disable it
+ # Info: https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/light-dark.html#configure-default-theme-mode
+ # "navbar_end": ["navbar-icon-links"],
+ # https://pydata-sphinx-theme.readthedocs.io/en/v0.11.0/user_guide/branding.html#different-logos-for-light-and-dark-mode
+ "logo": {
+ "image_light": "_static/iris-logo-title.svg",
+ "image_dark": "_static/iris-logo-title-dark.svg",
+ },
+}
+
+# if we are building via Read The Docs and it is the latest (not stable)
+if on_rtd and rtd_version == "latest":
+ html_theme_options["announcement"] = f"""
+ You are viewing the latest unreleased documentation
+ {version}. You can switch to a
+ stable
+ version."""
+
+rev_parse = run(["git", "rev-parse", "--short", "HEAD"], capture_output=True)
+commit_sha = rev_parse.stdout.decode().strip()
+
+html_context = {
+ # pydata_theme
+ "github_repo": "iris",
+ "github_user": "scitools",
+ "github_version": "main",
+ "doc_path": "docs/src",
+ # default theme. Also disabled the button in the html_theme_options.
+ # Info: https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/light-dark.html#configure-default-theme-mode
+ "default_mode": "auto",
+ # custom
+ "on_rtd": on_rtd,
+ "rtd_version": rtd_version,
+ "rtd_version_type": rtd_version_type,
+ "version": version,
+ "copyright_years": copyright_years,
+ "python_version": build_python_version,
+ "commit_sha": commit_sha,
+}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+html_style = "theme_override.css"
+
+# url link checker. Some links work but report as broken, lets ignore them.
+# See https://www.sphinx-doc.org/en/1.2/config.html#options-for-the-linkcheck-builder
+linkcheck_ignore = [
+ "https://catalogue.ceda.ac.uk/uuid/82adec1f896af6169112d09cc1174499",
+ "https://cfconventions.org",
+ "https://code.google.com/p/msysgit/downloads/list",
+ "https://effbot.org",
+ "https://help.github.com",
+ "https://docs.github.com",
+ "https://github.com",
+ "https://www.personal.psu.edu/cab38/ColorBrewer/ColorBrewer_updates.html",
+ "https://scitools.github.com/cartopy",
+ "https://www.wmo.int/pages/prog/www/DPFS/documents/485_Vol_I_en_colour.pdf",
+ "https://software.ac.uk/how-cite-software",
+ "https://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml",
+ "https://www.nationalarchives.gov.uk/doc/open-government-licence",
+ "https://www.metoffice.gov.uk/",
+ "https://biggus.readthedocs.io/",
+ "https://stickler-ci.com/",
+ "https://twitter.com/scitools_iris",
+ "https://stackoverflow.com/questions/tagged/python-iris",
+ "https://www.flaticon.com/",
+]
+
+# list of sources to exclude from the build.
+exclude_patterns = []
+
+# -- sphinx-gallery config ----------------------------------------------------
+# See https://sphinx-gallery.github.io/stable/configuration.html
+
+
+def reset_modules(gallery_conf, fname):
+ """Force re-registering of nc-time-axis with matplotlib for each example.
+
+ Required for sphinx-gallery>=0.11.0.
+ """
+ from sys import modules
+
+ _ = modules.pop("nc_time_axis", None)
+
+
+# https://sphinx-gallery.github.io/dev/configuration.html#importing-callables
+reset_modules_dir = Path(gettempdir()) / reset_modules.__name__
+reset_modules_dir.mkdir(exist_ok=True)
+(reset_modules_dir / f"{reset_modules.__name__}.py").write_text(
+ getsource(reset_modules)
+)
+sys.path.insert(0, str(reset_modules_dir))
+
+
+sphinx_gallery_conf = {
+ # path to your example scripts
+ "examples_dirs": ["../gallery_code"],
+ # path to where to save gallery generated output
+ "gallery_dirs": ["generated/gallery"],
+ # filename pattern for the files in the gallery
+ "filename_pattern": "/plot_",
+ # filename pattern to ignore in the gallery
+ "ignore_pattern": r"__init__\.py",
+ # force gallery building, unless overridden (see src/Makefile)
+ "plot_gallery": "'True'",
+ "reset_modules": f"{reset_modules.__name__}.{reset_modules.__name__}",
+}
+
+# -----------------------------------------------------------------------------
+# Remove warnings
+warnings.filterwarnings("ignore")
+
+# -- numfig options (built-in) ------------------------------------------------
+# Enable numfig.
+numfig = True
+
+numfig_format = {
+ "code-block": "Example %s",
+ "figure": "Figure %s",
+ "section": "Section %s",
+ "table": "Table %s",
+}
diff --git a/docs/src/copyright.rst b/docs/src/copyright.rst
new file mode 100644
index 0000000000..b0d68cfe8c
--- /dev/null
+++ b/docs/src/copyright.rst
@@ -0,0 +1,36 @@
+
+Iris Copyright, Licensing and Contributors
+==========================================
+
+Iris Code
+---------
+
+All Iris source code, unless explicitly stated, is ``Copyright Iris
+contributors`` and is licensed under the **BSD-3 License**.
+You should find all source files with the following header:
+
+.. admonition:: Code License
+
+ Copyright Iris contributors
+
+ This file is part of Iris and is released under the BSD license.
+ See LICENSE in the root of the repository for full licensing details.
+
+
+Iris Documentation and Examples
+-------------------------------
+
+All documentation, examples and sample data found on this website and in source repository
+are licensed under the UK's Open Government Licence:
+
+.. admonition:: Documentation, example and data license
+
+ (C) British Crown Copyright |copyright_years|
+
+ You may use and reuse the information featured on this website (not including logos) free of
+ charge in any format or medium, under the terms of the
+ `Open Government Licence `_.
+ We encourage users to establish hypertext links to this website.
+
+ Any email enquiries regarding the use and reuse of this information resource should be
+ sent to: psi@nationalarchives.gsi.gov.uk.
diff --git a/docs/src/developers_guide/assets/developer-settings-github-apps.png b/docs/src/developers_guide/assets/developer-settings-github-apps.png
new file mode 100644
index 0000000000..a63994d087
Binary files /dev/null and b/docs/src/developers_guide/assets/developer-settings-github-apps.png differ
diff --git a/docs/src/developers_guide/assets/download-pem.png b/docs/src/developers_guide/assets/download-pem.png
new file mode 100644
index 0000000000..cbceb1304d
Binary files /dev/null and b/docs/src/developers_guide/assets/download-pem.png differ
diff --git a/docs/src/developers_guide/assets/generate-key.png b/docs/src/developers_guide/assets/generate-key.png
new file mode 100644
index 0000000000..ac894dc71b
Binary files /dev/null and b/docs/src/developers_guide/assets/generate-key.png differ
diff --git a/docs/src/developers_guide/assets/gha-token-example.png b/docs/src/developers_guide/assets/gha-token-example.png
new file mode 100644
index 0000000000..cba1cf6935
Binary files /dev/null and b/docs/src/developers_guide/assets/gha-token-example.png differ
diff --git a/docs/src/developers_guide/assets/install-app.png b/docs/src/developers_guide/assets/install-app.png
new file mode 100644
index 0000000000..31259de588
Binary files /dev/null and b/docs/src/developers_guide/assets/install-app.png differ
diff --git a/docs/src/developers_guide/assets/install-iris-actions.png b/docs/src/developers_guide/assets/install-iris-actions.png
new file mode 100644
index 0000000000..db16dee55b
Binary files /dev/null and b/docs/src/developers_guide/assets/install-iris-actions.png differ
diff --git a/docs/src/developers_guide/assets/installed-app.png b/docs/src/developers_guide/assets/installed-app.png
new file mode 100644
index 0000000000..ab87032393
Binary files /dev/null and b/docs/src/developers_guide/assets/installed-app.png differ
diff --git a/docs/src/developers_guide/assets/iris-actions-secret.png b/docs/src/developers_guide/assets/iris-actions-secret.png
new file mode 100644
index 0000000000..f32456d0f2
Binary files /dev/null and b/docs/src/developers_guide/assets/iris-actions-secret.png differ
diff --git a/docs/src/developers_guide/assets/iris-github-apps.png b/docs/src/developers_guide/assets/iris-github-apps.png
new file mode 100644
index 0000000000..50753532b7
Binary files /dev/null and b/docs/src/developers_guide/assets/iris-github-apps.png differ
diff --git a/docs/src/developers_guide/assets/iris-secrets-created.png b/docs/src/developers_guide/assets/iris-secrets-created.png
new file mode 100644
index 0000000000..19b0ba11dc
Binary files /dev/null and b/docs/src/developers_guide/assets/iris-secrets-created.png differ
diff --git a/docs/src/developers_guide/assets/iris-security-actions.png b/docs/src/developers_guide/assets/iris-security-actions.png
new file mode 100644
index 0000000000..7cbe3a7dc2
Binary files /dev/null and b/docs/src/developers_guide/assets/iris-security-actions.png differ
diff --git a/docs/src/developers_guide/assets/iris-settings.png b/docs/src/developers_guide/assets/iris-settings.png
new file mode 100644
index 0000000000..70714235c2
Binary files /dev/null and b/docs/src/developers_guide/assets/iris-settings.png differ
diff --git a/docs/src/developers_guide/assets/org-perms-members.png b/docs/src/developers_guide/assets/org-perms-members.png
new file mode 100644
index 0000000000..99fd8985e2
Binary files /dev/null and b/docs/src/developers_guide/assets/org-perms-members.png differ
diff --git a/docs/src/developers_guide/assets/repo-perms-contents.png b/docs/src/developers_guide/assets/repo-perms-contents.png
new file mode 100644
index 0000000000..4c325c334d
Binary files /dev/null and b/docs/src/developers_guide/assets/repo-perms-contents.png differ
diff --git a/docs/src/developers_guide/assets/repo-perms-pull-requests.png b/docs/src/developers_guide/assets/repo-perms-pull-requests.png
new file mode 100644
index 0000000000..812f5ef951
Binary files /dev/null and b/docs/src/developers_guide/assets/repo-perms-pull-requests.png differ
diff --git a/docs/src/developers_guide/assets/scitools-settings.png b/docs/src/developers_guide/assets/scitools-settings.png
new file mode 100644
index 0000000000..8d7e728ab5
Binary files /dev/null and b/docs/src/developers_guide/assets/scitools-settings.png differ
diff --git a/docs/src/developers_guide/assets/user-perms.png b/docs/src/developers_guide/assets/user-perms.png
new file mode 100644
index 0000000000..607c7dcdb6
Binary files /dev/null and b/docs/src/developers_guide/assets/user-perms.png differ
diff --git a/docs/src/developers_guide/assets/webhook-active.png b/docs/src/developers_guide/assets/webhook-active.png
new file mode 100644
index 0000000000..538362f335
Binary files /dev/null and b/docs/src/developers_guide/assets/webhook-active.png differ
diff --git a/docs/src/developers_guide/asv_example_images/commits.png b/docs/src/developers_guide/asv_example_images/commits.png
new file mode 100644
index 0000000000..4e0d695322
Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/commits.png differ
diff --git a/docs/src/developers_guide/asv_example_images/comparison.png b/docs/src/developers_guide/asv_example_images/comparison.png
new file mode 100644
index 0000000000..e146d30696
Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/comparison.png differ
diff --git a/docs/src/developers_guide/asv_example_images/scalability.png b/docs/src/developers_guide/asv_example_images/scalability.png
new file mode 100644
index 0000000000..260c3ef536
Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/scalability.png differ
diff --git a/docs/src/developers_guide/ci_checks.png b/docs/src/developers_guide/ci_checks.png
new file mode 100644
index 0000000000..54ab672b3c
Binary files /dev/null and b/docs/src/developers_guide/ci_checks.png differ
diff --git a/docs/src/developers_guide/contributing_benchmarks.rst b/docs/src/developers_guide/contributing_benchmarks.rst
new file mode 100644
index 0000000000..ccb9a50e39
--- /dev/null
+++ b/docs/src/developers_guide/contributing_benchmarks.rst
@@ -0,0 +1,64 @@
+.. include:: ../common_links.inc
+
+.. _contributing.benchmarks:
+
+Benchmarking
+============
+Iris includes architecture for benchmarking performance and other metrics of
+interest. This is done using the `Airspeed Velocity`_ (ASV) package.
+
+
+.. note:: Full detail on the setup and how to run or write benchmarks is in
+ `benchmarks/README.md`_ in the Iris repository.
+
+Continuous Integration
+----------------------
+The primary purpose of `Airspeed Velocity`_, and Iris' specific benchmarking
+setup, is to monitor for performance changes using statistical comparison
+between commits, and this forms part of Iris' continuous integration.
+
+Accurately assessing performance takes longer than functionality pass/fail
+tests, so the benchmark suite is not automatically run against open pull
+requests, instead it is **run overnight against each the commits of the
+previous day** to check if any commit has introduced performance shifts.
+Detected shifts are reported in a new Iris GitHub issue.
+
+.. _on_demand_pr_benchmark:
+
+If a pull request author/reviewer suspects their changes may cause performance
+shifts, they can manually order their pull request to be benchmarked by adding
+the ``benchmark_this`` label to the PR. Read more in `benchmarks/README.md`_.
+
+Other Uses
+----------
+Even when not statistically comparing commits, ASV's accurate execution time
+results - recorded using a sophisticated system of repeats - have other
+applications.
+
+* Absolute numbers can be interpreted providing they are recorded on a
+ dedicated resource.
+* Results for a series of commits can be visualised for an intuitive
+ understanding of when and why changes occurred.
+
+ .. image:: asv_example_images/commits.png
+ :width: 300
+
+* Parameterised benchmarks make it easy to visualise:
+
+ * Comparisons
+
+ .. image:: asv_example_images/comparison.png
+ :width: 300
+
+ * Scalability
+
+ .. image:: asv_example_images/scalability.png
+ :width: 300
+
+This also isn't limited to execution times. ASV can also measure memory demand,
+and even arbitrary numbers (e.g. file size, regridding accuracy), although
+without the repetition logic that execution timing has.
+
+
+.. _Airspeed Velocity: https://github.com/airspeed-velocity/asv
+.. _benchmarks/README.md: https://github.com/SciTools/iris/blob/main/benchmarks/README.md
diff --git a/docs/src/developers_guide/contributing_changes.rst b/docs/src/developers_guide/contributing_changes.rst
new file mode 100644
index 0000000000..48357874a7
--- /dev/null
+++ b/docs/src/developers_guide/contributing_changes.rst
@@ -0,0 +1,11 @@
+
+.. _contributing.changes:
+
+Contributing Your Changes
+=========================
+
+.. toctree::
+ :maxdepth: 3
+
+ documenting/whats_new_contributions
+ contributing_pull_request_checklist
diff --git a/docs/src/developers_guide/contributing_ci_tests.rst b/docs/src/developers_guide/contributing_ci_tests.rst
new file mode 100644
index 0000000000..542178c2ff
--- /dev/null
+++ b/docs/src/developers_guide/contributing_ci_tests.rst
@@ -0,0 +1,151 @@
+.. include:: ../common_links.inc
+
+.. _developer_testing_ci:
+
+Continuous Integration (CI) Testing
+===================================
+
+.. note:: Iris is currently supported and tested against |python_support|
+ running on Linux. We do not currently actively test on other
+ platforms such as Windows or macOS.
+
+The `Iris`_ GitHub repository is configured to run checks against all its
+branches automatically whenever a pull-request is created, updated or merged.
+The checks performed are:
+
+* :ref:`testing_gha`
+* :ref:`testing_cla`
+* :ref:`pre_commit_ci`
+
+
+.. _testing_gha:
+
+GitHub Actions
+**************
+
+Iris unit and integration tests are an essential mechanism to ensure
+that the Iris code base is working as expected. :ref:`developer_running_tests`
+may be performed manually by a developer locally. However Iris is configured to
+use `GitHub Actions`_ (GHA) for automated Continuous Integration (CI) testing.
+
+The Iris GHA YAML configuration files in the ``.github/workflows`` directory
+defines the CI tasks to be performed. For further details
+refer to the `GitHub Actions`_ documentation. The tasks performed during CI include:
+
+* running the system, integration and unit tests for Iris
+* ensuring the documentation gallery builds successfully
+* performing all doc-tests within the code base
+* checking all URL references within the code base and documentation are valid
+
+The above GHA tasks are run automatically against all `Iris`_ branches
+on GitHub whenever a pull-request is submitted, updated or merged. See the
+`Iris GitHub Actions`_ dashboard for details of recent past and active CI jobs.
+
+
+.. _gha_test_env:
+
+GitHub Actions Test Environment
+-------------------------------
+
+The CI test environments for our GHA is determined from the requirement files
+in ``requirements/pyXX.yml``. These are conda environment files list the top-level
+package dependencies for running and testing Iris.
+
+For reproducible test results, these environments are resolved for all their dependencies
+and stored as conda lock files in the ``requirements/locks`` directory. The test environments
+will not resolve the dependencies each time, instead they will use the lock files to reproduce the
+exact same environment each time.
+
+**If you have updated the requirement YAML files with new dependencies, you will need to
+generate new lock files.** To do this, run the command::
+
+ python tools/update_lockfiles.py -o requirements/locks requirements/py*.yml
+
+or simply::
+
+ make lockfiles
+
+and add the changed lockfiles to your pull request.
+
+New lockfiles are generated automatically each week to ensure that Iris continues to be
+tested against the latest available version of its dependencies.
+Each week the yaml files in ``requirements`` are resolved by a GitHub Action.
+If the resolved environment has changed, a pull request is created with the new lock files.
+The CI test suite will run on this pull request. If the tests fail, a developer
+will need to create a new branch based off the ``auto-update-lockfiles`` branch
+and add the required fixes to this new branch. If the fixes are made to the
+``auto-update-lockfiles`` branch these will be overwritten the next time the
+Github Action is run.
+
+
+GitHub Checklist
+----------------
+
+An example snapshot from a successful GitHub pull-request shows all tests
+passing:
+
+.. image:: ci_checks.png
+
+If any CI tasks fail, then the pull-request is unlikely to be merged to the
+Iris target branch by a core developer.
+
+
+.. _testing_cla:
+
+`CLA Assistant`_
+****************
+
+A bot which checks that the GitHub authors of the pull-request have signed the
+|SciTools Contributor's License Agreement (CLA)|_.
+
+
+.. _pre_commit_ci:
+
+pre-commit CI
+*************
+
+A CI service for the `pre-commit`_ framework that checks and auto fixes all
+pull-requests given the `Iris`_ GitHub repository `.pre-commit-config.yaml`_.
+
+See the `pre-commit.ci dashboard`_ for details of recent past and active Iris jobs.
+
+.. note::
+
+ The `codespell`_ ``pre-commit`` hook checks the spelling of the whole codebase
+ and documentation. This hook is configured in the ``[tool.codespell]`` section
+ of the ``pyproject.toml`` file.
+
+ Append to the ``ignore-words-list`` option any **valid words** that are
+ considered **not** a typo and should **not** be corrected by `codespell`_.
+
+ruff
+----
+As of **Iris 3.8** `ruff`_ has been adopted to ensure our codebase is using best
+practice. `ruff`_ is configured in the `Iris`_ GitHub repository using
+`.pre-commit-config.yaml`_.
+
+You can install and run `ruff`_ in your development **iris-dev** conda environment
+via::
+
+ conda activate iris-dev
+ pip install ruff
+ cd iris
+ ruff .
+
+.. note::
+
+ The `ruff`_ ``pre-commit`` hook checks for compliance of the whole codebase.
+ This hook is configured in the ``[tool.ruff]`` section
+ of the ``pyproject.toml`` file.
+
+ Edit the ``.ruff.toml`` file to include any *temporary* rules to be ignored. Edit the ``pyproject.toml`` to include any *permanent* rules to be ignored. We
+ aim to be fully `ruff`_ compliant as possible.
+
+For more information on how to use `ruff`_ please see the `ruff documentation`_.
+
+
+.. _.pre-commit-config.yaml: https://github.com/SciTools/iris/blob/main/.pre-commit-config.yaml
+.. _pre-commit.ci dashboard: https://results.pre-commit.ci/repo/github/5312648
+.. _CLA Assistant: https://github.com/cla-assistant/cla-assistant
+.. |SciTools Contributor's License Agreement (CLA)| replace:: **SciTools Contributor's License Agreement (CLA)**
+.. _ruff documentation: https://docs.astral.sh/ruff/tutorial/
diff --git a/docs/src/developers_guide/contributing_code_formatting.rst b/docs/src/developers_guide/contributing_code_formatting.rst
new file mode 100644
index 0000000000..bb3140e4f9
--- /dev/null
+++ b/docs/src/developers_guide/contributing_code_formatting.rst
@@ -0,0 +1,69 @@
+.. include:: ../common_links.inc
+
+.. _code_formatting:
+
+Code Formatting
+===============
+
+To ensure a consistent code format throughout Iris, we recommend using
+tools to check the source directly.
+
+* `black`_ for an opinionated coding auto-formatter
+* `flake8`_ linting checks
+
+The preferred way to run these tools automatically is to setup and configure
+`pre-commit`_.
+
+You can install ``pre-commit`` in your development environment using ``pip``::
+
+ $ pip install pre-commit
+
+or alternatively using ``conda``::
+
+ $ conda install -c conda-forge pre-commit
+
+.. note:: If you have setup your Python environment using the guide
+ :ref:`installing_from_source` then ``pre-commit`` should already
+ be present.
+
+In order to install the ``pre-commit`` git hooks defined in our
+``.pre-commit-config.yaml`` file, you must now run the following command from
+the root directory of Iris::
+
+ $ pre-commit install
+
+Upon performing a ``git commit``, your code will now be automatically formatted
+to the ``black`` configuration defined in our ``pyproject.toml`` file, and
+linted according to our ``.flake8`` configuration file. Note that,
+``pre-commit`` will automatically download and install the necessary packages
+for each ``.pre-commit-config.yaml`` git hook.
+
+Additionally, you may wish to enable ``black`` for your preferred
+`editor/IDE `_.
+
+With the ``pre-commit`` configured, the output of performing a ``git commit``
+will look similar to::
+
+ Check for added large files..............................................Passed
+ Check for merge conflicts................................................Passed
+ Debug Statements (Python)............................(no files to check)Skipped
+ Don't commit to branch...................................................Passed
+ black................................................(no files to check)Skipped
+ flake8...............................................(no files to check)Skipped
+ [contribution_overhaul c8513187] this is my commit message
+ 2 files changed, 10 insertions(+), 9 deletions(-)
+
+
+.. note:: You can also run `black`_ and `flake8`_ manually. Please see the
+ their officially documentation for more information.
+
+Type Hinting
+------------
+Iris is gradually adding
+`type hints `_ into the
+codebase. The reviewer will look for type hints in a pull request; if you're
+not confident with these, feel free to work together with the reviewer to
+add/improve them.
+
+
+.. _pre-commit: https://pre-commit.com/
diff --git a/docs/src/developers_guide/contributing_codebase_index.rst b/docs/src/developers_guide/contributing_codebase_index.rst
new file mode 100644
index 0000000000..b59a196ff0
--- /dev/null
+++ b/docs/src/developers_guide/contributing_codebase_index.rst
@@ -0,0 +1,13 @@
+.. _contributing.documentation.codebase:
+
+Working with the Code Base
+==========================
+
+.. toctree::
+ :maxdepth: 3
+
+ contributing_code_formatting
+ documenting/docstrings
+ documenting/rest_guide
+ contributing_deprecations
+ contributing_testing_index
diff --git a/docs/src/developers_guide/contributing_deprecations.rst b/docs/src/developers_guide/contributing_deprecations.rst
new file mode 100644
index 0000000000..8c5cb21feb
--- /dev/null
+++ b/docs/src/developers_guide/contributing_deprecations.rst
@@ -0,0 +1,120 @@
+.. _iris_development_deprecations:
+
+Deprecations
+************
+
+If you need to make a backwards-incompatible change to a public API
+[#public-api]_ that has been included in a release (e.g. deleting a
+method), then you must first deprecate the old behaviour in at least
+one release, before removing/updating it in the next
+`major release `_.
+
+
+Adding a Deprecation
+====================
+
+.. _removing-a-public-api:
+
+Removing a Public API
+---------------------
+
+The simplest form of deprecation occurs when you need to remove a public
+API. The public API in question is deprecated for a period before it is
+removed to allow time for user code to be updated. Sometimes the
+deprecation is accompanied by the introduction of a new public API.
+
+Under these circumstances the following points apply:
+
+- Using the deprecated API must result in a concise deprecation warning which
+ is an instance of :class:`iris.IrisDeprecation`.
+ It is easiest to call
+ :func:`iris._deprecation.warn_deprecated`, which is a
+ simple wrapper to :func:`warnings.warn` with the signature
+ `warn_deprecation(message, **kwargs)`.
+- Where possible, your deprecation warning should include advice on
+ how to avoid using the deprecated API. For example, you might
+ reference a preferred API, or more detailed documentation elsewhere.
+- You must update the docstring for the deprecated API to include a
+ Sphinx deprecation directive:
+
+ :literal:`.. deprecated:: `
+
+ where you should replace `` with the major and minor version
+ of Iris in which this API is first deprecated. For example: `1.8`.
+
+ As with the deprecation warning, you should include advice on how to
+ avoid using the deprecated API within the content of this directive.
+ Feel free to include more detail in the updated docstring than in the
+ deprecation warning.
+- You should check the documentation for references to the deprecated
+ API and update them as appropriate.
+
+Changing a Default
+------------------
+
+When you need to change the default behaviour of a public API the
+situation is slightly more complex. The recommended solution is to use
+the :data:`iris.FUTURE` object. The :data:`iris.FUTURE` object provides
+boolean attributes that allow user code to control at run-time the
+default behaviour of corresponding public APIs. When a boolean attribute
+is set to `False` it causes the corresponding public API to use its
+deprecated default behaviour. When a boolean attribute is set to `True`
+it causes the corresponding public API to use its new default behaviour.
+
+The following points apply in addition to those for removing a public
+API:
+
+- You should add a new boolean attribute to :data:`iris.FUTURE` (by
+ modifying :class:`iris.Future`) that controls the default behaviour
+ of the public API that needs updating. The initial state of the new
+ boolean attribute should be `False`. You should name the new boolean
+ attribute to indicate that setting it to `True` will select the new
+ default behaviour.
+- You should include a reference to this :data:`iris.FUTURE` flag in your
+ deprecation warning and corresponding Sphinx deprecation directive.
+
+
+Removing a Deprecation
+======================
+
+When the time comes to make a new major release you should locate any
+deprecated APIs within the code that satisfy the one release
+minimum period described previously. Locating deprecated APIs can easily
+be done by searching for the Sphinx deprecation directives and/or
+deprecation warnings.
+
+Removing a Public API
+---------------------
+
+The deprecated API should be removed and any corresponding documentation
+and/or example code should be removed/updated as appropriate.
+
+.. _iris_developer_future:
+
+Changing a Default
+------------------
+
+- You should update the initial state of the relevant boolean attribute
+ of :data:`iris.FUTURE` to `True`.
+- You should deprecate setting the relevant boolean attribute of
+ :class:`iris.Future` in the same way as described in
+ :ref:`removing-a-public-api`.
+
+
+.. rubric:: Footnotes
+
+.. [#public-api] A name without a leading underscore in any of its
+ components, with the exception of the :mod:`iris.experimental` and
+ :mod:`iris.tests` packages.
+
+ Example public names are:
+ - `iris.this.`
+ - `iris.this.that`
+
+ Example private names are:
+ - `iris._this`
+ - `iris.this._that`
+ - `iris._this.that`
+ - `iris._this._that`
+ - `iris.experimental.something`
+ - `iris.tests.get_data_path`
diff --git a/docs/src/developers_guide/contributing_documentation.rst b/docs/src/developers_guide/contributing_documentation.rst
new file mode 100644
index 0000000000..e289b1548d
--- /dev/null
+++ b/docs/src/developers_guide/contributing_documentation.rst
@@ -0,0 +1,21 @@
+
+How to Contribute to the Documentation
+--------------------------------------
+
+Documentation is important and we encourage any improvements that can be made.
+If you believe the documentation is not clear please contribute a change to
+improve the documentation for all users.
+
+If you're confident diving right in, please head for
+:ref:`contributing.documentation_full`.
+
+If you're not then we've got a step-by-step guide here to walk you through it:
+:ref:`contributing.documentation_easy`
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ contributing_documentation_easy
+ contributing_documentation_full
+
\ No newline at end of file
diff --git a/docs/src/developers_guide/contributing_documentation_easy.rst b/docs/src/developers_guide/contributing_documentation_easy.rst
new file mode 100755
index 0000000000..a0513fe560
--- /dev/null
+++ b/docs/src/developers_guide/contributing_documentation_easy.rst
@@ -0,0 +1,103 @@
+
+.. include:: ../common_links.inc
+
+.. _contributing.documentation_easy:
+
+Contributing to the Documentation (the easy way)
+------------------------------------------------
+
+Documentation is important and we encourage any improvements that can be made.
+If you believe the documentation is not clear please contribute a change to
+improve the documentation for all users.
+
+The guide below is designed to be accessible to those with little-to-no
+knowledge of programming and GitHub. If you find that something doesn't work as
+described or could use more explanation then please let us know (or contribute
+the improvement yourself)!
+
+First Time Only Steps
+^^^^^^^^^^^^^^^^^^^^^
+
+1. Create a `GitHub `_ account.
+
+2. Complete the `SciTools Contributor's License Agreement (CLA)`_.
+ This is a one-off requirement for anyone who wishes to contribute to a
+ Scitools repository - including the documentation.
+
+Steps to Complete Each Time You Propose Changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+1. Navigate to the documentation page that you want to edit (on this site).
+
+2. Click the ``Edit on GitHub`` button at the **top right** of the page.
+
+.. image:: edit_on_github.png
+
+3. In the resulting GitHub page select **main** from the ``Switch
+ branches/tags`` drop-down menu near the **top left** of the page (to the left
+ of the ``iris / docs / src / ...`` links) if it isn't already. This changes
+ the branch to **main**.
+
+.. image:: find_main.png
+
+4. Click the pencil symbol near the **top right** (to the right of the ``Raw``
+ and ``Blame`` buttons).
+
+.. image:: edit_button.png
+
+5. Make your edits! Try to strike a balance between informing the audience
+ enough that they understand and overwhelming them with information.
+
+.. note::
+
+ You may see the following message at the top of the edit page, informing you
+ that GitHub has created you your own ``fork`` (or copy) of the project as a
+ precursor to allowing you to edit the page. Your changes will be merged into
+ the main version of the documentation later.
+
+ .. image:: fork_banner.png
+
+6. Scroll to the bottom of the edit page and enter some appropriate information
+ in the two boxes under ``Propose changes``. You can just keep the default text
+ if you like or enter something more specific - a short sentence explaining
+ what's changed is fine. Then click the ``Propose changes`` button.
+
+.. image:: propose_changes.png
+
+7. In the resulting page titled ``Pull Request``, write a brief description of
+ what you've changed underneath the following three lines:
+
+.. code::
+
+ ### Description
+
+
+
+Describing what you've changed and why will help the person who reviews your changes.
+
+.. image:: pull_request.png
+
+8. Click the ``Create pull request`` button.
+
+.. tip::
+
+ If you're not sure that you're making your pull request right, or have a
+ question, then make it anyway! You can then comment on it to ask your
+ question, then someone from the dev team will be happy to help you out (then
+ edit your pull request if you need to).
+
+What Happens Next?
+^^^^^^^^^^^^^^^^^^
+
+Another Iris contributor will review your changes (this happens for everyone who
+makes changes to Iris or its documentation). The reviewer might make comments or
+ask questions (don't worry about missing these, GitHub will email you to let you
+know). You can respond to these comments underneath where they appear in GitHub.
+
+Once you've worked everything out together, the reviewer will merge your changes
+into the main version of the documentation so that they're accessible for
+everyone to benefit from.
+
+**You've now contributed to the Iris documentation!** If you've caught the bug
+and want to get more involved (or you're just interested what that would mean)
+then chat to the person reviewing your code or another Iris contributor.
\ No newline at end of file
diff --git a/docs/src/developers_guide/contributing_documentation_full.rst b/docs/src/developers_guide/contributing_documentation_full.rst
new file mode 100755
index 0000000000..5cb5269fa1
--- /dev/null
+++ b/docs/src/developers_guide/contributing_documentation_full.rst
@@ -0,0 +1,171 @@
+.. include:: ../common_links.inc
+
+.. _contributing.documentation_full:
+
+Contributing to the Documentation
+---------------------------------
+
+This guide is for those comfortable with the development process, looking for
+the specifics of how to apply that knowledge to Iris. You may instead find it
+easier to use the :ref:`contributing.documentation_easy`.
+
+Any change to the Iris project whether it is a bugfix, new feature or
+documentation update must use the :ref:`development-workflow`.
+
+
+Requirements
+~~~~~~~~~~~~
+
+The documentation uses specific packages that need to be present. Please see
+:ref:`installing_iris` for instructions.
+
+
+.. _contributing.documentation.building:
+
+Building
+~~~~~~~~
+
+This documentation was built using the latest Python version that Iris
+supports. For more information see :ref:`installing_iris`.
+
+The build can be run from the documentation directory ``docs/src``.
+
+The build output for the html is found in the ``_build/html`` sub directory.
+When updating the documentation ensure the html build has *no errors* or
+*warnings* otherwise it may fail the automated `Iris GitHub Actions`_ build.
+
+Once the build is complete, if it is rerun it will only rebuild the impacted
+build artefacts so should take less time.
+
+There is an option to perform a build but skip the
+:ref:`contributing.documentation.gallery` creation completely. This can be
+achieved via::
+
+ make html-noplot
+
+Another option is to skip the :doc:`../generated/api/iris` documentation creation. This can be
+useful as it reduces the time to build the documentation, however you may have
+some build warnings as there maybe references to the API documentation.
+This can be achieved via::
+
+ make html-noapi
+
+You can combine both the above and skip the
+:ref:`contributing.documentation.gallery` and :doc:`../generated/api/iris`
+documentation completely. This can be achieved via::
+
+ make html-quick
+
+If you wish to run a full clean build you can run::
+
+ make clean
+ make html
+
+This is useful for a final test before committing your changes. Having built
+the documentation, you can view them in your default browser via::
+
+ make show
+
+.. note:: In order to preserve a clean build for the html, all **warnings**
+ have been promoted to be **errors** to ensure they are addressed.
+ This **only** applies when ``make html`` is run.
+
+.. _contributing.documentation.testing:
+
+Testing
+~~~~~~~
+
+There are various ways to test aspects of the documentation.
+
+Each :ref:`contributing.documentation.gallery` entry has a corresponding test.
+To run all the gallery tests::
+
+ pytest -v docs/gallery_tests/test_gallery_examples.py
+
+To run a test for a single gallery example, use the ``pytest -k`` option for
+pattern matching, e.g.::
+
+ pytest -v -k plot_coriolis docs/gallery_tests/test_gallery_examples.py
+
+If a gallery test fails, follow the instructions in :ref:`testing.graphics`.
+
+The ``make`` commands shown below can be run in the ``docs`` or ``docs/src``
+directory.
+
+Many documentation pages includes python code itself that can be run to ensure
+it is still valid or to demonstrate examples. To ensure these tests pass
+run::
+
+ make doctest
+
+See :data:`iris.cube.Cube.data` for an example of using the `doctest`_
+approach.
+
+.. _doctest: https://www.sphinx-doc.org/en/stable/ext/doctest.html
+
+The hyperlinks in the documentation can be checked automatically.
+If there is a link that is known to work it can be excluded from the checks by
+adding it to the ``linkcheck_ignore`` array that is defined in the
+`conf.py`_. The hyperlink check can be run via::
+
+ make linkcheck
+
+If this fails check the output for the text **broken** and then correct
+or ignore the url.
+
+.. note:: In addition to the automated `Iris GitHub Actions`_ build of all the
+ documentation build options above, the
+ https://readthedocs.org/ service is also used. The configuration
+ of this held in a file in the root of the
+ `github Iris project `_ named
+ ``.readthedocs.yml``.
+
+
+.. _conf.py: https://github.com/SciTools/iris/blob/main/docs/src/conf.py
+
+
+.. _contributing.documentation.api:
+
+Generating API Documentation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to auto generate the API documentation based upon the docstrings a
+custom set of python scripts are used, these are located in the directory
+``docs/src/sphinxext``. Once the ``make html`` command has been run,
+the output of these scripts can be found in
+``docs/src/generated/api``.
+
+If there is a particularly troublesome module that breaks the ``make html`` you
+can exclude the module from the API documentation. Add the entry to the
+``exclude_modules`` tuple list in the
+``docs/src/sphinxext/generate_package_rst.py`` file.
+
+
+.. _contributing.documentation.gallery:
+
+Gallery
+~~~~~~~
+
+The Iris :ref:`gallery_index` uses a sphinx extension named
+`sphinx-gallery `_
+that auto generates reStructuredText (rst) files based upon a gallery source
+directory that abides directory and filename convention.
+
+The code for the gallery entries are in ``docs/gallery_code``.
+Each sub directory in this directory is a sub section of the gallery. The
+respective ``README.rst`` in each folder is included in the gallery output.
+
+To add an entry to the gallery simple place your python code into the
+appropriate sub directory and name it with a prefix of ``plot_``. If your
+gallery entry does not fit into any existing sub directories then create a new
+directory and place it in there. A test for the gallery entry will be
+automatically generated (see Testing_ for how to run it). To add a new
+reference image for this test, follow the instructions in
+:ref:`testing.graphics`.
+
+The reStructuredText (rst) output of the gallery is located in
+``docs/src/generated/gallery``.
+
+For more information on the directory structure and options please see the
+`sphinx-gallery getting started
+`_ documentation.
diff --git a/docs/src/developers_guide/contributing_getting_involved.rst b/docs/src/developers_guide/contributing_getting_involved.rst
new file mode 100644
index 0000000000..9da6cd13eb
--- /dev/null
+++ b/docs/src/developers_guide/contributing_getting_involved.rst
@@ -0,0 +1,65 @@
+.. include:: ../common_links.inc
+
+.. _development_where_to_start:
+.. _developers_guide:
+
+Developers Guide
+----------------
+
+Iris_ is an Open Source project hosted on Github and as such anyone with a
+GitHub account may create an `issue`_ on our `Iris GitHub`_ project page for
+raising:
+
+* bug reports
+* feature requests
+* documentation improvements
+
+The `Iris GitHub`_ project has been configured to use templates for each of
+the above issue types when creating a `new issue`_ to ensure the appropriate
+information is provided.
+
+Alternatively, **join the conversation** in Iris `GitHub Discussions`_, when
+you would like the opinions of the Iris community.
+
+A `pull request`_ may also be created by anyone who has become a
+|contributor|_ to Iris_. Permissions to merge pull requests to the
+``main`` branch are only given to |core developers|_ of Iris_, this is
+to ensure a measure of control. All authors on a pull request will
+automatically be asked to sign the
+`SciTools Contributor's License Agreement (CLA)`_, if they have not already
+done so.
+
+To get started we suggest reading recent `issues`_, `GitHub Discussions`_ and
+`pull requests`_ for Iris.
+
+If you are new to using GitHub we recommend reading the
+`GitHub getting started`_
+
+.. _GitHub getting started: https://docs.github.com/en/github/getting-started-with-github
+.. |contributor| replace:: **contributor**
+.. |core developers| replace:: **core developers**
+
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Developers Guide
+ :name: development_index
+ :hidden:
+
+ gitwash/index
+ contributing_documentation
+ contributing_codebase_index
+ contributing_changes
+ github_app
+ release
+
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Reference
+ :hidden:
+
+ ../generated/api/iris
+ ../whatsnew/index
+ ../copyright
+ ../voted_issues
diff --git a/docs/src/developers_guide/contributing_graphics_tests.rst b/docs/src/developers_guide/contributing_graphics_tests.rst
new file mode 100644
index 0000000000..1e42c35ae6
--- /dev/null
+++ b/docs/src/developers_guide/contributing_graphics_tests.rst
@@ -0,0 +1,142 @@
+.. include:: ../common_links.inc
+
+.. _testing.graphics:
+
+Adding or Updating Graphics Tests
+=================================
+
+.. note::
+
+ If a large number of images tests are failing due to an update to the
+ libraries used for image hashing, follow the instructions on
+ :ref:`refresh-imagerepo`.
+
+Generating New Results
+----------------------
+
+When you find that a graphics test in the Iris testing suite has failed,
+following changes in Iris or the run dependencies, this is the process
+you should follow:
+
+#. Create a new, empty directory to store temporary image results, at the path
+ ``lib/iris/tests/result_image_comparison`` in your Iris repository checkout.
+
+#. Run the relevant (failing) tests directly as python scripts, or using
+ ``pytest``.
+
+The results of the failing image tests will now be available in
+``lib/iris/tests/result_image_comparison``.
+
+.. note::
+
+ The ``result_image_comparison`` folder is covered by a project
+ ``.gitignore`` setting, so those files *will not show up* in a
+ ``git status`` check.
+
+Reviewing Failing Tests
+-----------------------
+
+#. Run ``iris/lib/iris/tests/graphics/idiff.py`` with python, e.g.::
+
+ python idiff.py
+
+ This will open a window for you to visually inspect
+ side-by-side **old**, **new** and **difference** images for each failed
+ graphics test. Hit a button to either :guilabel:`accept`,
+ :guilabel:`reject` or :guilabel:`skip` each new result.
+
+ If the change is **accepted**:
+
+ * the imagehash value of the new result image is added into the relevant
+ set of 'valid result hashes' in the image result database file,
+ ``tests/results/imagerepo.json``
+
+ * the relevant output file in ``tests/result_image_comparison`` is renamed
+ according to the test name. A copy of this new PNG file must then be added
+ into the ``iris-test-data`` repository, at
+ https://github.com/SciTools/iris-test-data (See below).
+
+ If a change is **skipped**:
+
+ * no further changes are made in the repo.
+
+ * when you run ``iris/tests/idiff.py`` again, the skipped choice will be
+ presented again.
+
+ If a change is **rejected**:
+
+ * the output image is deleted from ``result_image_comparison``.
+
+ * when you run ``iris/tests/idiff.py`` again, the skipped choice will not
+ appear, unless the relevant failing test is re-run.
+
+#. **Now re-run the tests**. The **new** result should now be recognised and the
+ relevant test should pass. However, some tests can perform *multiple*
+ graphics checks within a single test case function. In those cases, any
+ failing check will prevent the following ones from being run, so a test
+ re-run may encounter further (new) graphical test failures. If that
+ happens, simply repeat the check-and-accept process until all tests pass.
+
+#. You're now ready to :ref:`add-graphics-test-changes`
+
+
+Adding a New Image Test
+-----------------------
+
+If you attempt to run ``idiff.py`` when there are new graphical tests for which
+no baseline yet exists, you will get a warning that ``idiff.py`` is ``Ignoring
+unregistered test result...``. In this case,
+
+#. rename the relevant images from ``iris/tests/result_image_comparison`` by
+
+ * removing the ``result-`` prefix
+
+ * fully qualifying the test name if it isn't already (i.e. it should start
+ ``iris.tests...``or ``gallery_tests...``)
+
+#. run the tests in the mode that lets them create missing data (see
+ :ref:`create-missing`). This will update ``imagerepo.json`` with the new
+ test name and image hash.
+
+#. and then add them to the Iris test data as covered in
+ :ref:`add-graphics-test-changes`.
+
+
+.. _refresh-imagerepo:
+
+Refreshing the Stored Hashes
+----------------------------
+
+From time to time, a new version of the image hashing library will cause all
+image hashes to change. The image hashes stored in
+``tests/results/imagerepo.json`` can be refreshed using the baseline images
+stored in the ``iris-test-data`` repository (at
+https://github.com/SciTools/iris-test-data) using the script
+``tests/graphics/recreate_imagerepo.py``. Use the ``--help`` argument for the
+command line arguments.
+
+
+.. _add-graphics-test-changes:
+
+Add Your Changes to Iris
+------------------------
+
+To add your changes to Iris, you need to make two pull requests (PR).
+
+#. The first PR is made in the ``iris-test-data`` repository, at
+ https://github.com/SciTools/iris-test-data.
+
+ * Add all the newly-generated referenced PNG files into the
+ ``test_data/images`` directory. In your Iris repo, these files are to be found
+ in the temporary results folder ``iris/tests/result_image_comparison``.
+
+ * Create a PR proposing these changes, in the usual way.
+
+#. The second PR is the one that makes the changes you intend to the Iris_ repository.
+ The description box of this pull request should contain a reference to
+ the matching one in ``iris-test-data``.
+
+ * This PR should include updating the version of the test data in
+ ``.github/workflows/ci-tests.yml`` and
+ ``.github/workflows/ci-docs-tests.yml`` to the new version created by the
+ merging of your ``iris-test-data`` PR.
diff --git a/docs/src/developers_guide/contributing_pull_request_checklist.rst b/docs/src/developers_guide/contributing_pull_request_checklist.rst
new file mode 100644
index 0000000000..11d68ace46
--- /dev/null
+++ b/docs/src/developers_guide/contributing_pull_request_checklist.rst
@@ -0,0 +1,61 @@
+.. include:: ../common_links.inc
+
+.. _pr_check:
+
+Pull Request Checklist
+======================
+
+All pull request will be reviewed by a core developer who will manage the
+process of merging. It is the responsibility of the contributor submitting a
+pull request to do their best to deliver a pull request which meets the
+requirements of the project it is submitted to.
+
+This check list summarises criteria which will be checked before a pull request
+is merged. Before submitting a pull request please consider the following:
+
+
+#. **Provide a helpful description** of the Pull Request. This should include:
+
+ * The aim of the change / the problem addressed / a link to the issue.
+ * How the change has been delivered.
+
+#. **Include a "What's New" entry**, if appropriate.
+ See :ref:`whats_new_contributions`.
+
+#. **Check all tests pass**. This includes existing tests and any new tests
+ added for any new functionality. For more information see
+ :ref:`developer_running_tests`.
+
+#. **Check all modified and new source files conform to the required**
+ :ref:`code_formatting`.
+
+#. **Check all new dependencies added to the** `requirements`_ **yaml
+ files.** If dependencies have been added then new nox testing lockfiles
+ should be generated too, see :ref:`gha_test_env`.
+
+#. **Check the source documentation been updated to explain all new or changed
+ features**. Note, we now use numpydoc strings. Any touched code should
+ be updated to use the docstrings formatting. See :ref:`docstrings`.
+
+#. **Include code examples inside the docstrings where appropriate**. See
+ :ref:`contributing.documentation.testing`.
+
+#. **Check the documentation builds without warnings or errors**. See
+ :ref:`contributing.documentation.building`
+
+#. **Check for any new dependencies in the** `readthedocs.yml`_ **file**. This
+ file is used to build the documentation that is served from
+ https://scitools-iris.readthedocs.io/en/latest/
+
+#. **Check for updates needed for supporting projects for test or example
+ data**. For example:
+
+ * `iris-test-data`_ is a github project containing all the data to support
+ the tests.
+ * `iris-sample-data`_ is a github project containing all the data to support
+ the gallery and examples.
+
+ If new files are required by tests or code examples, they must be added to
+ the appropriate supporting project via a suitable pull-request. This pull
+ request should be referenced in the main Iris pull request and must be
+ accepted and merged before the Iris one can be.
diff --git a/docs/src/developers_guide/contributing_running_tests.rst b/docs/src/developers_guide/contributing_running_tests.rst
new file mode 100644
index 0000000000..f60cedba05
--- /dev/null
+++ b/docs/src/developers_guide/contributing_running_tests.rst
@@ -0,0 +1,197 @@
+.. include:: ../common_links.inc
+
+.. _developer_running_tests:
+
+Running the Tests
+*****************
+
+There are two options for running the tests:
+
+* Use an environment you created yourself. This requires more manual steps to
+ set up, but gives you more flexibility. For example, you can run a subset of
+ the tests or use ``python`` interactively to investigate any issues. See
+ :ref:`test manual env`.
+
+* Use ``nox``. This will automatically generate an environment and run test
+ sessions consistent with our GitHub continuous integration. See :ref:`using nox`.
+
+.. _test manual env:
+
+Testing Iris in a Manually Created Environment
+==============================================
+
+To create a suitable environment for running the tests, see :ref:`installing_from_source`.
+
+Many Iris tests will use data that may be defined in the test itself, however
+this is not always the case as sometimes example files may be used. Due to
+the size of some of the files used these are not kept in the Iris repository.
+A separate repository under the `SciTools`_ organisation is used, see
+https://github.com/SciTools/iris-test-data.
+
+In order to run the tests with **all** the test data you must clone the
+``iris-test-data`` repository and then ensure the Iris tests can access
+``iris-test-data/test_data``, using one of two methods:
+
+* Store the path in a shell environment variable named **OVERRIDE_TEST_DATA_REPOSITORY**.
+* Store the path in ``lib/iris/etc/site.cfg`` (see :mod:`iris.config` for more).
+
+The example command below uses ``~/projects`` as the parent directory::
+
+ cd ~/projects
+ git clone git@github.com:SciTools/iris-test-data.git
+ export OVERRIDE_TEST_DATA_REPOSITORY=~/projects/iris-test-data/test_data
+
+All the Iris tests may be run from the root ``iris`` project directory using
+``pytest``. For example::
+
+ pytest -n 2
+
+will run the tests across two processes. For more options, use the command
+``pytest -h``. Below is a trimmed example of the output::
+
+ ============================= test session starts ==============================
+ platform linux -- Python 3.10.5, pytest-7.1.2, pluggy-1.0.0
+ rootdir: /path/to/git/clone/iris, configfile: pyproject.toml, testpaths: lib/iris
+ plugins: xdist-2.5.0, forked-1.4.0
+ gw0 I / gw1 I
+ gw0 [6361] / gw1 [6361]
+
+ ........................................................................ [ 1%]
+ ........................................................................ [ 2%]
+ ........................................................................ [ 3%]
+ ...
+ .......................ssssssssssssssssss............................... [ 99%]
+ ........................ [100%]
+ =============================== warnings summary ===============================
+ ...
+ -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
+ =========================== short test summary info ============================
+ SKIPPED [1] lib/iris/tests/experimental/test_raster.py:152: Test requires 'gdal'.
+ SKIPPED [1] lib/iris/tests/experimental/test_raster.py:155: Test requires 'gdal'.
+ ...
+ ========= 6340 passed, 21 skipped, 1659 warnings in 193.57s (0:03:13) ==========
+
+There may be some tests that have been **skipped**. This is due to a Python
+decorator being present in the test script that will intentionally skip a test
+if a certain condition is not met. In the example output above there are
+**21** skipped tests. At the point in time when this was run this was due to an
+experimental dependency not being present.
+
+.. tip::
+
+ The most common reason for tests to be skipped is when the directory for the
+ ``iris-test-data`` has not been set which would shows output such as::
+
+ SKIPPED [1] lib/iris/tests/unit/fileformats/test_rules.py:157: Test(s) require external data.
+ SKIPPED [1] lib/iris/tests/unit/fileformats/pp/test__interpret_field.py:97: Test(s) require external data.
+ SKIPPED [1] lib/iris/tests/unit/util/test_demote_dim_coord_to_aux_coord.py:29: Test(s) require external data.
+
+ All Python decorators that skip tests will be defined in
+ ``lib/iris/tests/__init__.py`` with a function name with a prefix of
+ ``skip_``.
+
+You can also run a specific test module. The example below runs the tests for
+mapping::
+
+ cd lib/iris/tests
+ python test_mapping.py
+
+When running the test directly as above you can view the command line options
+using the commands ``python test_mapping.py -h`` or
+``python test_mapping.py --help``.
+
+.. tip:: A useful command line option to use is ``-d``. This will display
+ matplotlib_ figures as the tests are run. For example::
+
+ python test_mapping.py -d
+
+.. _using nox:
+
+Using Nox for Testing Iris
+==========================
+
+The `nox`_ tool has for adopted for automated testing on `Iris GitHub Actions`_
+and also locally on the command-line for developers.
+
+`nox`_ is similar to `tox`_, but instead leverages the expressiveness and power of a Python
+configuration file rather than an `.ini` style file. As with `tox`_, `nox`_ can use `virtualenv`_
+to create isolated Python environments, but in addition also supports `conda`_ as a testing
+environment backend.
+
+
+Where is Nox Used?
+------------------
+
+Iris uses `nox`_ as a convenience to fully automate the process of executing the Iris tests, but also
+automates the process of:
+
+* building the documentation and executing the doc-tests
+* building the documentation gallery
+* running the documentation URL link check
+
+You can perform all of these tasks manually yourself, however the onus is on you to first ensure
+that all of the required package dependencies are installed and available in the testing environment.
+
+`Nox`_ has been configured to automatically do this for you, and provides a means to easily replicate
+the remote testing behaviour of `Iris GitHub Actions`_ locally for the developer.
+
+
+Installing Nox
+--------------
+
+We recommend installing `nox`_ using `conda`_. To install `nox`_ in a separate `conda`_ environment::
+
+ conda create -n nox -c conda-forge nox
+ conda activate nox
+
+To install `nox`_ in an existing active `conda`_ environment::
+
+ conda install -c conda-forge nox
+
+The `nox`_ package is also available on PyPI, however `nox`_ has been configured to use the `conda`_
+backend for Iris, so an installation of `conda`_ must always be available.
+
+
+Testing with Nox
+----------------
+
+The `nox`_ configuration file `noxfile.py` is available in the root ``iris`` project directory, and
+defines all the `nox`_ sessions (i.e., tasks) that may be performed. `nox`_ must always be executed
+from the ``iris`` root directory.
+
+To list the configured `nox`_ sessions for Iris::
+
+ nox --list
+
+To run the Iris tests for all configured versions of Python::
+
+ nox --session tests
+
+To build the Iris documentation specifically for Python 3.7::
+
+ nox --session doctest-3.7
+
+To run all the Iris `nox`_ sessions::
+
+ nox
+
+For further `nox`_ command-line options::
+
+ nox --help
+
+.. tip::
+ For `nox`_ sessions that use the `conda`_ backend, you can use the ``-v`` or ``--verbose``
+ flag to display the `nox`_ `conda`_ environment package details and environment info.
+ For example::
+
+ nox --session tests -- --verbose
+
+
+.. note:: `nox`_ will cache its testing environments in the `.nox` root ``iris`` project directory.
+
+
+.. _setuptools: https://setuptools.readthedocs.io/en/latest/
+.. _tox: https://tox.readthedocs.io/en/latest/
+.. _virtualenv: https://virtualenv.pypa.io/en/latest/
+.. _PyPI: https://pypi.org/project/nox/
+.. _v41.5.0: https://setuptools.readthedocs.io/en/latest/history.html#v41-5-0
diff --git a/docs/src/developers_guide/contributing_testing.rst b/docs/src/developers_guide/contributing_testing.rst
new file mode 100644
index 0000000000..a65bcebd55
--- /dev/null
+++ b/docs/src/developers_guide/contributing_testing.rst
@@ -0,0 +1,147 @@
+.. include:: ../common_links.inc
+
+.. _developer_test_categories:
+
+
+Test Categories
+***************
+
+There are two main categories of tests within Iris:
+
+- :ref:`testing.unit_test`
+- :ref:`testing.integration`
+
+Ideally, all code changes should be accompanied by one or more unit
+tests, and by zero or more integration tests.
+
+But if in any doubt about what tests to add or how to write them please
+feel free to submit a pull-request in any state and ask for assistance.
+
+
+.. _testing.unit_test:
+
+Unit Tests
+==========
+
+Code changes should be accompanied by enough unit tests to give a
+high degree of confidence that the change works as expected. In
+addition, the unit tests can help describe the intent behind a change.
+
+The docstring for each test module must state the unit under test.
+For example:
+
+ :literal:`"""Unit tests for the \`iris.experimental.raster.export_geotiff\` function."""`
+
+All unit tests must be placed and named according to the following
+structure:
+
+
+.. _testing.classes:
+
+Classes
+-------
+
+When testing a class all the tests must reside in the module:
+
+ :literal:`lib/iris/tests/unit//test_.py`
+
+Within this test module each tested method must have one or more
+corresponding test classes, for example:
+
+* ``Test_``
+* ``Test___``
+
+And within those test classes, the test methods must be named according
+to the aspect of the tested method which they address.
+
+**Examples**:
+
+All unit tests for :py:class:`iris.cube.Cube` must reside in:
+
+ :literal:`lib/iris/tests/unit/cube/test_Cube.py`
+
+Within that file the tests might look something like:
+
+.. code-block:: python
+
+ # Tests for the Cube.xml() method.
+ class Test_xml(tests.IrisTest):
+ def test_some_general_stuff(self):
+ ...
+
+
+ # Tests for the Cube.xml() method, focussing on the behaviour of
+ # the checksums.
+ class Test_xml__checksum(tests.IrisTest):
+ def test_checksum_ignores_masked_values(self):
+ ...
+
+
+ # Tests for the Cube.add_dim_coord() method.
+ class Test_add_dim_coord(tests.IrisTest):
+ def test_normal_usage(self):
+ ...
+
+ def test_coord_already_present(self):
+ ...
+
+
+.. _testing.functions:
+
+Functions
+---------
+
+When testing a function all the tests must reside in the module:
+
+ :literal:`lib/iris/tests/unit//test_.py`
+
+Within this test module there must be one or more test classes, for example:
+
+* ``Test``
+* ``TestAspectOfFunction``
+
+And within those test classes, the test methods must be named according
+to the aspect of the tested function which they address.
+
+**Examples**:
+
+All unit tests for :py:func:`iris.experimental.raster.export_geotiff`
+must reside in:
+
+ :literal:`lib/iris/tests/unit/experimental/raster/test_export_geotiff.py`
+
+Within that file the tests might look something like:
+
+.. code-block:: python
+
+ # Tests focussing on the handling of different data types.
+ class TestDtypeAndValues(tests.IrisTest):
+ def test_int16(self):
+ ...
+
+ def test_int16_big_endian(self):
+ ...
+
+
+ # Tests focussing on the handling of different projections.
+ class TestProjection(tests.IrisTest):
+ def test_no_ellipsoid(self):
+ ...
+
+
+.. _testing.integration:
+
+Integration Tests
+=================
+
+Some code changes may require tests which exercise several units in
+order to demonstrate an important consequence of their interaction which
+may not be apparent when considering the units in isolation.
+
+These tests must be placed in the ``lib/iris/tests/integration`` folder.
+Unlike unit tests, there is no fixed naming scheme for integration
+tests. But folders and files must be created as required to help
+developers locate relevant tests. It is recommended they are named
+according to the capabilities under test, e.g.
+``metadata/test_pp_preservation.py``, and not named according to the
+module(s) under test.
diff --git a/docs/src/developers_guide/contributing_testing_index.rst b/docs/src/developers_guide/contributing_testing_index.rst
new file mode 100644
index 0000000000..2f5ae411e8
--- /dev/null
+++ b/docs/src/developers_guide/contributing_testing_index.rst
@@ -0,0 +1,14 @@
+.. _testing:
+
+Testing
+=======
+
+.. toctree::
+ :maxdepth: 3
+
+ contributing_testing
+ testing_tools
+ contributing_graphics_tests
+ contributing_running_tests
+ contributing_ci_tests
+ contributing_benchmarks
diff --git a/docs/src/developers_guide/documenting/__init__.py b/docs/src/developers_guide/documenting/__init__.py
new file mode 100644
index 0000000000..6e031999e7
--- /dev/null
+++ b/docs/src/developers_guide/documenting/__init__.py
@@ -0,0 +1 @@
+# noqa: D104
diff --git a/docs/src/developers_guide/documenting/docstrings.rst b/docs/src/developers_guide/documenting/docstrings.rst
new file mode 100644
index 0000000000..86f2c839c1
--- /dev/null
+++ b/docs/src/developers_guide/documenting/docstrings.rst
@@ -0,0 +1,34 @@
+.. _docstrings:
+
+==========
+Docstrings
+==========
+
+Every public object in the Iris package should have an appropriate docstring.
+This is important as the docstrings are used by developers to understand
+the code and may be read directly in the source or via the
+:doc:`../../generated/api/iris`.
+
+.. note::
+ As of April 2022 we are looking to adopt `numpydoc`_ strings as standard.
+ We aim to complete the adoption over time as we do changes to the codebase.
+ For examples of use see `numpydoc`_ and `sphinxcontrib-napoleon`_
+
+For consistency always use:
+
+* ``"""triple double quotes"""`` around docstrings.
+* ``r"""raw triple double quotes"""`` if you use any backslashes in your
+ docstrings.
+* ``u"""Unicode triple-quoted string"""`` for Unicode docstrings
+
+All docstrings can use reST (reStructuredText) markup to augment the
+rendered formatting. See the :ref:`reST_quick_start` for more detail.
+
+For more information including examples pleasee see:
+
+* `numpydoc`_
+* `sphinxcontrib-napoleon`_
+
+
+.. _numpydoc: https://numpydoc.readthedocs.io/en/latest/format.html#style-guide
+.. _sphinxcontrib-napoleon: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html
\ No newline at end of file
diff --git a/docs/src/developers_guide/documenting/docstrings_attribute.py b/docs/src/developers_guide/documenting/docstrings_attribute.py
new file mode 100644
index 0000000000..1714373a62
--- /dev/null
+++ b/docs/src/developers_guide/documenting/docstrings_attribute.py
@@ -0,0 +1,38 @@
+"""Docstring attribute example."""
+
+
+class ExampleClass:
+ """Class Summary."""
+
+ def __init__(self, arg1, arg2):
+ """Purpose section description.
+
+ Description section text.
+
+ Parameters
+ ----------
+ arg1 : int
+ First argument description.
+ arg2 : float
+ Second argument description.
+
+ Returns
+ -------
+ bool
+
+ """
+ self.a = arg1
+ "Attribute arg1 docstring."
+ self.b = arg2
+ "Attribute arg2 docstring."
+
+ @property
+ def square(self):
+ """*(read-only)* Purpose section description.
+
+ Returns
+ -------
+ int
+
+ """
+ return self.a * self.a
diff --git a/docs/src/developers_guide/documenting/docstrings_sample_routine.py b/docs/src/developers_guide/documenting/docstrings_sample_routine.py
new file mode 100644
index 0000000000..7feec6dbd0
--- /dev/null
+++ b/docs/src/developers_guide/documenting/docstrings_sample_routine.py
@@ -0,0 +1,27 @@
+"""Docstring routine example."""
+
+
+def sample_routine(arg1, arg2, kwarg1="foo", kwarg2=None):
+ """Purpose section text goes here.
+
+ Description section longer text goes here.
+
+ Parameters
+ ----------
+ arg1 : numpy.ndarray
+ First argument description.
+ arg2 : numpy.ndarray
+ Second argument description.
+ kwarg1 : str, optional
+ The first keyword argument. This argument description
+ can be multi-lined.
+ **kwarg2 : bool, optional
+ The second keyword argument.
+
+ Returns
+ -------
+ numpy.ndarray
+ A numpy.ndarray of arg1 * arg2.
+
+ """
+ pass
diff --git a/docs/src/developers_guide/documenting/rest_guide.rst b/docs/src/developers_guide/documenting/rest_guide.rst
new file mode 100644
index 0000000000..9e8c1107b0
--- /dev/null
+++ b/docs/src/developers_guide/documenting/rest_guide.rst
@@ -0,0 +1,44 @@
+.. include:: ../../common_links.inc
+
+.. _reST_quick_start:
+
+================
+reST Quick Start
+================
+
+`reST`_ is used to create the documentation for Iris_. It is used to author
+all of the documentation content including use in docstrings where appropriate.
+For more information see :ref:`docstrings`.
+
+reST is a lightweight markup language intended to be highly readable in
+source format. This guide will cover some of the more frequently used advanced
+reST markup syntaxes, for the basics of reST the following links may be useful:
+
+* https://www.sphinx-doc.org/en/master/usage/restructuredtext/
+* https://packages.python.org/an_example_pypi_project/sphinx.html
+
+Reference documentation for reST can be found at https://docutils.sourceforge.net/rst.html.
+
+Creating Links
+--------------
+Basic links can be created with ```Text of the link `_``
+which will look like `Text of the link `_
+
+
+Documents in the same project can be cross referenced with the syntax
+``:doc:`document_name``` for example, to reference the "docstrings" page
+``:doc:`docstrings``` creates the following link :doc:`docstrings`
+
+
+References can be created between sections by first making a "label" where
+you would like the link to point to ``.. _name_of_reference::`` the
+appropriate link can now be created with ``:ref:`name_of_reference```
+(note the trailing underscore on the label)
+
+
+Cross referencing other reference documentation can be achieved with the
+syntax ``:py:class:`zipfile.ZipFile``` which will result in links such as
+:py:class:`zipfile.ZipFile` and :py:class:`numpy.ndarray`.
+
+
+.. _reST: https://en.wikipedia.org/wiki/ReStructuredText
diff --git a/docs/src/developers_guide/documenting/whats_new_contributions.rst b/docs/src/developers_guide/documenting/whats_new_contributions.rst
new file mode 100644
index 0000000000..82569e57a0
--- /dev/null
+++ b/docs/src/developers_guide/documenting/whats_new_contributions.rst
@@ -0,0 +1,145 @@
+.. include:: ../../common_links.inc
+
+.. _whats_new_contributions:
+
+=================================
+Contributing a "What's New" Entry
+=================================
+
+Iris uses a file named ``latest.rst`` to keep a draft of upcoming development
+changes that will form the next stable release. Contributions to the
+:ref:`iris_whatsnew` document are written by the developer most familiar
+with the change made. The contribution should be included as part of
+the Iris Pull Request that introduces the change.
+
+The ``latest.rst`` and the past release notes are kept in the
+``docs/src/whatsnew/`` directory. If you are writing the first contribution after
+an Iris release: **create the new** ``latest.rst`` by copying the content from
+``latest.rst.template`` in the same directory.
+
+Since the `Contribution categories`_ include Internal changes, **all** Iris
+Pull Requests should be accompanied by a "What's New" contribution.
+
+
+Git Conflicts
+=============
+
+If changes to ``latest.rst`` are being suggested in several simultaneous
+Iris Pull Requests, Git will likely encounter merge conflicts. If this
+situation is thought likely (large PR, high repo activity etc.):
+
+* PR author: Do not include a "What's New" entry. Mention in the PR text that a
+ "What's New" entry is pending
+
+* PR reviewer: Review the PR as normal. Once the PR is acceptable, ask that
+ a **new pull request** be created specifically for the "What's New" entry,
+ which references the main pull request and titled (e.g. for PR#9999):
+
+ What's New for #9999
+
+* PR author: create the "What's New" pull request
+
+* PR reviewer: once the "What's New" PR is created, **merge the main PR**.
+ (this will fix any `Iris GitHub Actions`_ linkcheck errors where the links in the
+ "What's New" PR reference new features introduced in the main PR)
+
+* PR reviewer: review the "What's New" PR, merge once acceptable
+
+These measures should mean the suggested ``latest.rst`` changes are outstanding
+for the minimum time, minimising conflicts and minimising the need to rebase or
+merge from trunk.
+
+
+Writing a Contribution
+======================
+
+A contribution is the short description of a change introduced to Iris
+which improved it in some way. As such, a single Iris Pull Request may
+contain multiple changes that are worth highlighting as contributions to the
+what's new document.
+
+The appropriate contribution for a pull request might in fact be an addition or
+change to an existing "What's New" entry.
+
+Each contribution will ideally be written as a single concise entry using a
+reStructuredText auto-enumerated list ``#.`` directive. Where possible do not
+exceed **column 80** and ensure that any subsequent lines of the same entry are
+aligned with the first. The content should target an Iris user as the audience.
+The required content, in order, is as follows:
+
+* Use your discretion to decide on the names of all those that you want to
+ acknowledge as part of your contribution. Also consider the efforts of the
+ reviewer. Please use GitHub user names that link to their GitHub profile
+ e.g.,
+
+ ```@tkknight`_ Lorem ipsum dolor sit amet ...``
+
+ Also add a full reference in the following section at the end of the ``latest.rst``::
+
+ .. comment
+ Whatsnew author names (@github name) in alphabetical order. Note that,
+ core dev names are automatically included by the common_links.inc:
+
+ .. _@tkknight: https://github.com/tkknight
+
+* A succinct summary of the new/changed behaviour.
+
+* Context to the change. Possible examples include: what this fixes, why
+ something was added, issue references (e.g. ``:issue:`9999```), more specific
+ detail on the change itself.
+
+* Pull request references, bracketed, following the final period e.g.,
+ ``(:pull:`1111`, :pull:`9999`)``
+
+* A trailing blank line (standard reStructuredText list format).
+
+For example::
+
+ #. `@tkknight `_ and
+ `@trexfeathers `_ (reviewer) changed
+ argument ``x`` to be optional in :class:`~iris.module.class` and
+ :meth:`iris.module.method`. This allows greater flexibility as requested in
+ :issue:`9999`. (:pull:`1111`, :pull:`9999`)
+
+
+The above example also demonstrates some of the possible syntax for including
+links to code. For more inspiration on possible content and references, please
+examine past what's :ref:`iris_whatsnew` entries.
+
+.. note:: The reStructuredText syntax will be checked as part of building
+ the documentation. Any warnings should be corrected. The
+ `Iris GitHub Actions`_ will automatically build the documentation when
+ creating a pull request, however you can also manually
+ :ref:`build ` the documentation.
+
+
+Contribution Categories
+=======================
+
+The structure of the what's new release note should be easy to read by
+users. To achieve this several categories may be used.
+
+**📢 Announcements**
+ General news and announcements to the Iris community.
+
+**✨ Features**
+ Features that are new or changed to add functionality.
+
+**🐛 Bug Fixes**
+ A bug fix.
+
+**💣 Incompatible Changes**
+ A change that causes an incompatibility with prior versions of Iris.
+
+**🔥 Deprecations**
+ Deprecations of functionality.
+
+**🔗 Dependencies**
+ Additions, removals and version changes in Iris' package dependencies.
+
+**📚 Documentation**
+ Changes to documentation.
+
+**💼 Internal**
+ Changes to any internal or development related topics, such as testing,
+ environment dependencies etc.
diff --git a/docs/src/developers_guide/edit_button.png b/docs/src/developers_guide/edit_button.png
new file mode 100755
index 0000000000..ee2e7858f0
Binary files /dev/null and b/docs/src/developers_guide/edit_button.png differ
diff --git a/docs/src/developers_guide/edit_on_github.png b/docs/src/developers_guide/edit_on_github.png
new file mode 100755
index 0000000000..f802ebd9d6
Binary files /dev/null and b/docs/src/developers_guide/edit_on_github.png differ
diff --git a/docs/src/developers_guide/find_main.png b/docs/src/developers_guide/find_main.png
new file mode 100755
index 0000000000..8a7af306cd
Binary files /dev/null and b/docs/src/developers_guide/find_main.png differ
diff --git a/docs/src/developers_guide/fork_banner.png b/docs/src/developers_guide/fork_banner.png
new file mode 100755
index 0000000000..0d140c9fc6
Binary files /dev/null and b/docs/src/developers_guide/fork_banner.png differ
diff --git a/docs/src/developers_guide/github_app.rst b/docs/src/developers_guide/github_app.rst
new file mode 100644
index 0000000000..402cfe0c75
--- /dev/null
+++ b/docs/src/developers_guide/github_app.rst
@@ -0,0 +1,281 @@
+.. include:: ../common_links.inc
+
+Token GitHub App
+----------------
+
+.. note::
+
+ This section of the documentation is applicable only to GitHub `SciTools`_
+ Organisation **owners** and **administrators**.
+
+.. note::
+
+ The ``iris-actions`` GitHub App has been rebranded with the more generic
+ name ``scitools-ci``, as the app can be used for any `SciTools`_ repository,
+ not just ``iris`` specifically.
+
+ All of the following instructions are still applicable.
+
+
+This section describes how to create, configure, install and use our `SciTools`_
+GitHub App for generating tokens for use with *GitHub Actions* (GHA).
+
+
+Background
+^^^^^^^^^^
+
+Our GitHub *Continuous Integration* (CI) workflows require fully reproducible
+`conda`_ environments to test ``iris`` and build our documentation.
+
+The ``iris`` `refresh-lockfiles`_ GHA workflow uses the `conda-lock`_ package to routinely
+generate a platform specific ``lockfile`` containing all the package dependencies
+required by ``iris`` for a specific version of ``python``.
+
+The environment lockfiles created by the `refresh-lockfiles`_ GHA are contributed
+back to ``iris`` though a pull-request that is automatically generated using the
+third-party `create-pull-request`_ GHA. By default, pull-requests created by such an
+action using the standard ``GITHUB_TOKEN`` **cannot** trigger other workflows, such
+as our CI.
+
+As a result, we use a dedicated authentication **GitHub App** to securely generate tokens
+for the `create-pull-request`_ GHA, which then permits our full suite of CI testing workflows
+to be triggered against the lockfiles pull-request. Ensuring that the CI is triggered gives us
+confidence that the proposed new lockfiles have not introduced a package level incompatibility
+or issue within ``iris``. See :ref:`use gha`.
+
+
+Create GitHub App
+^^^^^^^^^^^^^^^^^
+
+The **GitHub App** is created for the sole purpose of generating tokens for use with actions,
+and **must** be owned by the `SciTools`_ organisation.
+
+To create a minimal `GitHub App`_ for this purpose, perform the following steps:
+
+1. Click the `SciTools`_ organisation ``⚙️ Settings`` option.
+
+.. figure:: assets/scitools-settings.png
+ :alt: SciTools organisation Settings option
+ :align: center
+ :width: 75%
+
+2. Click the ``GitHub Apps`` option from the ``<> Developer settings``
+ section in the left hand sidebar.
+
+.. figure:: assets/developer-settings-github-apps.png
+ :alt: Developer settings, GitHub Apps option
+ :align: center
+ :width: 25%
+
+3. Now click the ``New GitHub App`` button to display the ``Register new GitHub App``
+ form.
+
+Within the ``Register new GitHub App`` form, complete the following fields:
+
+4. Set the **mandatory** ``GitHub App name`` field to be ``iris-actions``.
+5. Set the **mandatory** ``Homepage URL`` field to be ``https://github.com/SciTools/iris``
+6. Under the ``Webhook`` section, **uncheck** the ``Active`` checkbox.
+ Note that, **no** ``Webhook URL`` is required.
+
+.. figure:: assets/webhook-active.png
+ :alt: Webhook active checkbox
+ :align: center
+ :width: 75%
+
+7. Under the ``Repository permissions`` section, set the ``Contents`` field to
+ be ``Access: Read and write``.
+
+.. figure:: assets/repo-perms-contents.png
+ :alt: Repository permissions Contents option
+ :align: center
+ :width: 75%
+
+8. Under the ``Repository permissions`` section, set the ``Pull requests`` field
+ to be ``Access: Read and write``.
+
+.. figure:: assets/repo-perms-pull-requests.png
+ :alt: Repository permissions Pull requests option
+ :align: center
+ :width: 75%
+
+9. Under the ``Organization permissions`` section, set the ``Members`` field to
+ be ``Access: Read-only``.
+
+.. figure:: assets/org-perms-members.png
+ :alt: Organization permissions Members
+ :align: center
+ :width: 75%
+
+10. Under the ``User permissions`` section, for the ``Where can this GitHub App be installed?``
+ field, **check** the ``Only on this account`` radio-button i.e., only allow
+ this GitHub App to be installed on the **SciTools** account.
+
+.. figure:: assets/user-perms.png
+ :alt: User permissions
+ :align: center
+ :width: 75%
+
+11. Finally, click the ``Create GitHub App`` button.
+
+
+Configure GitHub App
+^^^^^^^^^^^^^^^^^^^^
+
+Creating the GitHub App will automatically redirect you to the ``SciTools settings / iris-actions``
+form for the newly created app.
+
+Perform the following GitHub App configuration steps:
+
+.. _app id:
+
+1. Under the ``About`` section, note of the GitHub ``App ID`` as this value is
+ required later. See :ref:`gha secrets`.
+2. Under the ``Display information`` section, optionally upload the ``iris`` logo
+ as a ``png`` image.
+3. Under the ``Private keys`` section, click the ``Generate a private key`` button.
+
+.. figure:: assets/generate-key.png
+ :alt: Private keys Generate a private key
+ :align: center
+ :width: 75%
+
+.. _private key:
+
+GitHub will automatically generate a private key to sign access token requests
+for the app. Also a separate browser pop-up window will appear with the GitHub
+App private key in ``OpenSSL PEM`` format.
+
+.. figure:: assets/download-pem.png
+ :alt: Download OpenSSL PEM file
+ :align: center
+ :width: 50%
+
+.. important::
+
+ Please ensure that you save the ``OpenSSL PEM`` file and **securely** archive
+ its contents. The private key within this file is required later.
+ See :ref:`gha secrets`.
+
+
+Install GitHub App
+^^^^^^^^^^^^^^^^^^
+
+To install the GitHub App:
+
+1. Select the ``Install App`` option from the top left menu of the
+ ``Scitools settings / iris-actions`` form, then click the ``Install`` button.
+
+.. figure:: assets/install-app.png
+ :alt: Private keys Generate a private key
+ :align: center
+ :width: 75%
+
+2. Select the ``Only select repositories`` radio-button from the ``Install iris-actions``
+ form, and choose the ``SciTools/iris`` repository.
+
+.. figure:: assets/install-iris-actions.png
+ :alt: Install iris-actions GitHub App
+ :align: center
+ :width: 75%
+
+3. Click the ``Install`` button.
+
+ The successfully installed ``iris-actions`` GitHub App is now available under
+ the ``GitHub Apps`` option in the ``Integrations`` section of the `SciTools`_
+ organisation ``Settings``. Note that, to reconfigure the installed app click
+ the ``⚙️ App settings`` option.
+
+.. figure:: assets/installed-app.png
+ :alt: Installed GitHub App
+ :align: center
+ :width: 80%
+
+4. Finally, confirm that the ``iris-actions`` GitHub App is now available within
+ the `SciTools/iris`_ repository by clicking the ``GitHub apps`` option in the
+ ``⚙️ Settings`` section.
+
+.. figure:: assets/iris-github-apps.png
+ :alt: Iris installed GitHub App
+ :align: center
+ :width: 80%
+
+
+.. _gha secrets:
+
+Create Repository Secrets
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The GitHub Action that requests an access token from the ``iris-actions``
+GitHub App must be configured with the following information:
+
+* the ``App ID``, and
+* the ``OpenSSL PEM`` private key
+
+associated with the ``iris-actions`` GitHub App. This **sensitive** information is
+made **securely** available by creating `SciTools/iris`_ repository secrets:
+
+1. Click the `SciTools/iris`_ repository ``⚙️ Settings`` option.
+
+.. figure:: assets/iris-settings.png
+ :alt: Iris Settings
+ :align: center
+ :width: 75%
+
+2. Click the ``Actions`` option from the ``Security`` section in the left hand
+ sidebar.
+
+.. figure:: assets/iris-security-actions.png
+ :alt: Iris Settings Security Actions
+ :align: center
+ :width: 25%
+
+3. Click the ``New repository secret`` button.
+
+.. figure:: assets/iris-actions-secret.png
+ :alt: Iris Actions Secret
+ :align: center
+ :width: 75%
+
+4. Complete the ``Actions secrets / New secret`` form for the ``App ID``:
+
+ * Set the ``Name`` field to be ``AUTH_APP_ID``.
+ * Set the ``Value`` field to be the numerical ``iris-actions`` GitHub ``App ID``.
+ See :ref:`here `.
+ * Click the ``Add secret`` button.
+
+5. Click the ``New repository secret`` button again, and complete the form
+ for the ``OpenSSL PEM``:
+
+ * Set the ``Name`` field to be ``AUTH_APP_PRIVATE_KEY``.
+ * Set the ``Value`` field to be the entire contents of the ``OpenSSL PEM`` file.
+ See :ref:`here `.
+ * Click the ``Add secret`` button.
+
+A summary of the newly created `SciTools/iris`_ repository secrets is now available:
+
+.. figure:: assets/iris-secrets-created.png
+ :alt: Iris Secrets created
+ :align: center
+ :width: 75%
+
+
+.. _use gha:
+
+Use GitHub App
+^^^^^^^^^^^^^^
+
+The following example workflow shows how to use the `github-app-token`_ GHA
+to generate a token for use with the `create-pull-request`_ GHA:
+
+.. figure:: assets/gha-token-example.png
+ :alt: GitHub Action token example
+ :align: center
+ :width: 50%
+
+
+.. _GitHub App: https://docs.github.com/en/developers/apps/building-github-apps/creating-a-github-app
+.. _SciTools/iris: https://github.com/SciTools/iris
+.. _conda-lock: https://github.com/conda-incubator/conda-lock
+.. _create-pull-request: https://github.com/peter-evans/create-pull-request
+.. _github-app-token: https://github.com/tibdex/github-app-token
+.. _refresh-lockfiles: https://github.com/SciTools/iris/blob/main/.github/workflows/refresh-lockfiles.yml
diff --git a/docs/src/developers_guide/gitwash/LICENSE b/docs/src/developers_guide/gitwash/LICENSE
new file mode 100644
index 0000000000..cd8441c161
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/LICENSE
@@ -0,0 +1,34 @@
+=========
+ LICENSE
+=========
+
+We release the documents under the Creative Commons attribution license:
+https://creativecommons.org/licenses/by/3.0/
+
+We release the code under the simplified BSD license:
+
+Copyright (c) 2010, Matthew Brett
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/docs/src/developers_guide/gitwash/branch_dropdown.png b/docs/src/developers_guide/gitwash/branch_dropdown.png
new file mode 100644
index 0000000000..6d74f3d643
Binary files /dev/null and b/docs/src/developers_guide/gitwash/branch_dropdown.png differ
diff --git a/docs/iris/src/developers_guide/gitwash/configure_git.rst b/docs/src/developers_guide/gitwash/configure_git.rst
similarity index 88%
rename from docs/iris/src/developers_guide/gitwash/configure_git.rst
rename to docs/src/developers_guide/gitwash/configure_git.rst
index 0e18b666d0..564ae51820 100644
--- a/docs/iris/src/developers_guide/gitwash/configure_git.rst
+++ b/docs/src/developers_guide/gitwash/configure_git.rst
@@ -1,8 +1,10 @@
+.. include:: links.inc
+
.. _configure-git:
-===============
- Configure git
-===============
+=============
+Configure Git
+=============
.. _git-config-basic:
@@ -49,13 +51,13 @@ command::
To set up on another computer, you can copy your ``~/.gitconfig`` file,
or run the commands above.
-In detail
+In Detail
=========
user.name and user.email
------------------------
-It is good practice to tell git_ who you are, for labeling any changes
+It is good practice to tell git_ who you are, for labelling any changes
you make to the code. The simplest way to do this is from the command
line::
@@ -122,7 +124,7 @@ Or from the command line::
.. _fancy-log:
-Fancy log output
+Fancy Log Output
----------------
This is a very nice alias to get a fancy log output; it should go in the
@@ -137,22 +139,18 @@ You use the alias with::
and it gives graph / text output something like this (but with color!)::
* 6d8e1ee - (HEAD, origin/my-fancy-feature, my-fancy-feature) NF - a fancy file (45 minutes ago) [Matthew Brett]
- * d304a73 - (origin/placeholder, placeholder) Merge pull request #48 from hhuuggoo/master (2 weeks ago) [Jonathan Terhorst]
- |\
+ * d304a73 - (origin/placeholder, placeholder) Merge pull request #48 from hhuuggoo/main (2 weeks ago) [Jonathan Terhorst]
+ |\
| * 4aff2a8 - fixed bug 35, and added a test in test_bugfixes (2 weeks ago) [Hugo]
- |/
+ |/
* a7ff2e5 - Added notes on discussion/proposal made during Data Array Summit. (2 weeks ago) [Corran Webster]
- * 68f6752 - Initial implimentation of AxisIndexer - uses 'index_by' which needs to be changed to a call on an Axes object - this is all very sketchy right now. (2 weeks ago) [Corr
- * 376adbd - Merge pull request #46 from terhorst/master (2 weeks ago) [Jonathan Terhorst]
- |\
+ * 68f6752 - Initial implementation of AxisIndexer - uses 'index_by' which needs to be changed to a call on an Axes object - this is all very sketchy right now. (2 weeks ago) [Corr
+ * 376adbd - Merge pull request #46 from terhorst/main (2 weeks ago) [Jonathan Terhorst]
+ |\
| * b605216 - updated joshu example to current api (3 weeks ago) [Jonathan Terhorst]
| * 2e991e8 - add testing for outer ufunc (3 weeks ago) [Jonathan Terhorst]
| * 7beda5a - prevent axis from throwing an exception if testing equality with non-axis object (3 weeks ago) [Jonathan Terhorst]
| * 65af65e - convert unit testing code to assertions (3 weeks ago) [Jonathan Terhorst]
- | * 956fbab - Merge remote-tracking branch 'upstream/master' (3 weeks ago) [Jonathan Terhorst]
- | |\
+ | * 956fbab - Merge remote-tracking branch 'upstream/main' (3 weeks ago) [Jonathan Terhorst]
+ | |\
| |/
-
-Thanks to Yury V. Zaytsev for posting it.
-
-.. include:: links.inc
diff --git a/docs/src/developers_guide/gitwash/development_workflow.rst b/docs/src/developers_guide/gitwash/development_workflow.rst
new file mode 100644
index 0000000000..8545a04308
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/development_workflow.rst
@@ -0,0 +1,245 @@
+.. _development-workflow:
+
+####################
+Development Workflow
+####################
+
+You already have your own forked copy of the `iris`_ repository, by
+following :ref:`forking`. You have :ref:`set-up-fork`. You have configured
+git by following :ref:`configure-git`. Now you are ready for some real work.
+
+Workflow Summary
+================
+
+In what follows we'll refer to the upstream iris ``main`` branch, as
+"trunk".
+
+* Don't use your ``main`` (that is on your fork) branch for development.
+* When you are starting a new set of changes, fetch any changes from trunk,
+ and start a new *feature branch* from that.
+* Make a new branch for each separable set of changes |emdash| "one task, one
+ branch".
+* Name your branch for the purpose of the changes - e.g.
+ ``bugfix-for-issue-14`` or ``refactor-database-code``.
+* If you can possibly avoid it, avoid merging trunk or any other branches into
+ your feature branch while you are working.
+* If you do find yourself merging from trunk, consider :ref:`rebase-on-trunk`
+* Ask on the Iris `GitHub Discussions`_ if you get stuck.
+* Ask for code review!
+
+This way of working helps to keep work well organized, with readable history.
+This in turn makes it easier for project maintainers (that might be you) to see
+what you've done, and why you did it.
+
+See `linux git workflow`_ for some explanation.
+
+.. _update-mirror-trunk:
+
+Update the Mirror of Trunk
+==========================
+
+First make sure you have done :ref:`linking-to-upstream`.
+
+From time to time you should fetch the upstream (trunk) changes from github::
+
+ git fetch upstream
+
+This will pull down any commits you don't have, and set the remote branches to
+point to the right commit. For example, 'trunk' is the branch referred to by
+(remote/branchname) ``upstream/main`` - and if there have been commits since
+you last checked, ``upstream/main`` will change after you do the fetch.
+
+.. _make-feature-branch:
+
+Make a New Feature Branch
+=========================
+
+When you are ready to make some changes to the code, you should start a new
+branch. Branches that are for a collection of related edits are often called
+'feature branches'.
+
+Making an new branch for each set of related changes will make it easier for
+someone reviewing your branch to see what you are doing.
+
+Choose an informative name for the branch to remind yourself and the rest of us
+what the changes in the branch are for. For example ``add-ability-to-fly``, or
+``buxfix-for-issue-42``.
+
+::
+
+ # Update the mirror of trunk
+ git fetch upstream
+ # Make new feature branch starting at current trunk
+ git branch my-new-feature upstream/main
+ git checkout my-new-feature
+
+Generally, you will want to keep your feature branches on your public github_
+fork of `iris`_. To do this, you ``git push`` this new branch up to your
+github repo. Generally (if you followed the instructions in these pages, and by
+default), git will have a link to your github repo, called ``origin``. You push
+up to your own repo on github with::
+
+ git push origin my-new-feature
+
+In git >= 1.7 you can ensure that the link is correctly set by using the
+``--set-upstream`` option::
+
+ git push --set-upstream origin my-new-feature
+
+From now on git will know that ``my-new-feature`` is related to the
+``my-new-feature`` branch in the github repo.
+
+.. _edit-flow:
+
+The Editing Workflow
+====================
+
+Overview
+--------
+
+::
+
+ # hack hack
+ git add my_new_file
+ git commit -am 'NF - some message'
+ git push
+
+In More Detail
+--------------
+
+#. Make some changes
+#. See which files have changed with ``git status``.
+ You'll see a listing like this one::
+
+ # On branch ny-new-feature
+ # Changed but not updated:
+ # (use "git add ..." to update what will be committed)
+ # (use "git checkout -- ..." to discard changes in working directory)
+ #
+ # modified: README
+ #
+ # Untracked files:
+ # (use "git add ..." to include in what will be committed)
+ #
+ # INSTALL
+ no changes added to commit (use "git add" and/or "git commit -a")
+
+#. Check what the actual changes are with ``git diff``.
+#. Add any new files to version control ``git add new_file_name``.
+#. To commit all modified files into the local copy of your repo, do
+ ``git commit -am 'A commit message'``. Note the ``-am`` options to
+ ``commit``. The ``m`` flag just signals that you're going to type a
+ message on the command line. The ``a`` flag will automatically stage
+ all files that have been modified and deleted.
+#. To push the changes up to your forked repo on github, do a ``git
+ push``.
+
+
+Testing Your Changes
+====================
+
+Once you are happy with your changes, work thorough the :ref:`pr_check` and
+make sure your branch passes all the relevant tests.
+
+
+Ask for Your Changes to be Reviewed or Merged
+=============================================
+
+When you are ready to ask for someone to review your code and consider a merge:
+
+#. Go to the URL of your forked repo, say
+ ``https://github.com/your-user-name/iris``.
+#. Use the 'Switch Branches' dropdown menu near the top left of the page to
+ select the branch with your changes:
+
+ .. image:: branch_dropdown.png
+
+#. Click on the 'Pull request' button:
+
+ .. image:: pull_button.png
+
+ Enter a title for the set of changes, and some explanation of what you've
+ done. Say if there is anything you'd like particular attention for - like a
+ complicated change or some code you are not happy with.
+
+ If you don't think your request is ready to be merged, just say so in your
+ pull request message. This is still a good way of getting some preliminary
+ code review.
+
+Some Other Things you Might Want to do
+======================================
+
+Delete a Branch on Github
+-------------------------
+
+::
+
+ git checkout main
+ # delete branch locally
+ git branch -D my-unwanted-branch
+ # delete branch on github
+ git push origin :my-unwanted-branch
+
+Note the colon ``:`` before ``test-branch``. See also:
+https://github.com/guides/remove-a-remote-branch
+
+
+Several People Sharing a Single Repository
+------------------------------------------
+
+If you want to work on some stuff with other people, where you are all
+committing into the same repository, or even the same branch, then just
+share it via github.
+
+First fork iris into your account, as from :ref:`forking`.
+
+Then, go to your forked repository github page, say
+``https://github.com/your-user-name/iris``, select :guilabel:`Settings`,
+:guilabel:`Manage Access` and then :guilabel:`Invite collaborator`.
+
+.. note:: For more information on sharing your repository see the
+ GitHub documentation on `Inviting collaborators`_.
+
+
+.. _Inviting collaborators: https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/inviting-collaborators-to-a-personal-repository
+
+Now all those people can do::
+
+ git clone git@githhub.com:your-user-name/iris.git
+
+Remember that links starting with ``git@`` use the ssh protocol and are
+read-write; links starting with ``git://`` are read-only.
+
+Your collaborators can then commit directly into that repo with the
+usual::
+
+ git commit -am 'ENH - much better code'
+ git push origin main # pushes directly into your repo
+
+Explore Your Repository
+-----------------------
+
+To see a graphical representation of the repository branches and
+commits::
+
+ gitk --all
+
+To see a linear list of commits for this branch::
+
+ git log
+
+Finally the :ref:`fancy-log` ``lg`` alias will give you a reasonable text-based
+graph of the repository.
+
+
+.. _rebase-on-trunk:
+
+Rebasing on Trunk
+-----------------
+
+For more information please see the
+`official github documentation on git rebase`_.
+
+.. _official github documentation on git rebase: https://docs.github.com/en/github/using-git/about-git-rebase
+
+.. include:: links.inc
diff --git a/docs/src/developers_guide/gitwash/forking.rst b/docs/src/developers_guide/gitwash/forking.rst
new file mode 100644
index 0000000000..baeb243c86
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/forking.rst
@@ -0,0 +1,36 @@
+.. include:: links.inc
+
+.. _forking:
+
+===================================
+Making Your own Copy (fork) of Iris
+===================================
+
+You need to do this only once. The instructions here are very similar
+to the instructions at https://help.github.com/forking/, please see
+that page for more detail. We're repeating some of it here just to give the
+specifics for the `Iris`_ project, and to suggest some default names.
+
+
+Set up and Configure a Github Account
+=====================================
+
+If you don't have a github account, go to the github page, and make one.
+
+You then need to configure your account to allow write access, see
+the `generating ssh keys for GitHub`_ help on `github help`_.
+
+
+Create Your own Forked Copy of Iris
+===================================
+
+#. Log into your github account.
+#. Go to the `Iris`_ github home at `Iris github`_.
+#. Click on the *fork* button:
+
+ .. image:: forking_button.png
+
+ Now, after a short pause, you should find yourself at the home page for
+ your own forked copy of `Iris`_.
+
+
diff --git a/docs/src/developers_guide/gitwash/forking_button.png b/docs/src/developers_guide/gitwash/forking_button.png
new file mode 100644
index 0000000000..1eb21051d1
Binary files /dev/null and b/docs/src/developers_guide/gitwash/forking_button.png differ
diff --git a/docs/src/developers_guide/gitwash/git_intro.rst b/docs/src/developers_guide/gitwash/git_intro.rst
new file mode 100644
index 0000000000..dfb64da872
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/git_intro.rst
@@ -0,0 +1,15 @@
+.. include:: links.inc
+
+Introduction
+============
+
+These pages describe a git_ and github_ workflow for the `Iris`_
+project.
+
+This is not a comprehensive git reference, it's just a workflow for our
+own project. It's tailored to the github hosting service. You may well
+find better or quicker ways of getting stuff done with git, but these
+should get you started.
+
+.. tip:: Please see the official `git documentation`_ for a complete list of
+ git **commands** and **cheat sheets**.
\ No newline at end of file
diff --git a/docs/src/developers_guide/gitwash/git_links.inc b/docs/src/developers_guide/gitwash/git_links.inc
new file mode 100644
index 0000000000..3ced13703f
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/git_links.inc
@@ -0,0 +1,18 @@
+.. This (-*- rst -*-) format file contains commonly used link targets
+ and name substitutions. It may be included in many files,
+ therefore it should only contain link targets and name
+ substitutions. Try grepping for "^\.\. _" to find plausible
+ candidates for this list.
+
+.. NOTE: reST targets are
+ __not_case_sensitive__, so only one target definition is needed for
+ nipy, NIPY, Nipy, etc...
+
+.. _git: https://git-scm.com/
+.. _github: https://github.com
+.. _github help: https://help.github.com
+.. _git documentation: https://git-scm.com/docs
+
+.. _linux git workflow: https://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
+
+.. |emdash| unicode:: U+02014
diff --git a/docs/src/developers_guide/gitwash/index.rst b/docs/src/developers_guide/gitwash/index.rst
new file mode 100644
index 0000000000..3cde622583
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/index.rst
@@ -0,0 +1,14 @@
+.. _using-git:
+
+Working With Iris Source Code
+=============================
+
+.. toctree::
+ :maxdepth: 2
+
+ git_intro
+ forking
+ set_up_fork
+ configure_git
+ development_workflow
+
diff --git a/docs/src/developers_guide/gitwash/links.inc b/docs/src/developers_guide/gitwash/links.inc
new file mode 100644
index 0000000000..1d67f20d3a
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/links.inc
@@ -0,0 +1,3 @@
+.. include:: ../../common_links.inc
+
+.. include:: git_links.inc
diff --git a/docs/src/developers_guide/gitwash/pull_button.png b/docs/src/developers_guide/gitwash/pull_button.png
new file mode 100644
index 0000000000..a088e19e95
Binary files /dev/null and b/docs/src/developers_guide/gitwash/pull_button.png differ
diff --git a/docs/src/developers_guide/gitwash/set_up_fork.rst b/docs/src/developers_guide/gitwash/set_up_fork.rst
new file mode 100644
index 0000000000..5318825488
--- /dev/null
+++ b/docs/src/developers_guide/gitwash/set_up_fork.rst
@@ -0,0 +1,70 @@
+.. include:: links.inc
+
+.. _set-up-fork:
+
+================
+Set up Your Fork
+================
+
+First you follow the instructions for :ref:`forking`.
+
+Overview
+========
+
+::
+
+ git clone git@github.com:your-user-name/iris.git
+ cd iris
+ git remote add upstream git@github.com/SciTools/iris.git
+
+In Detail
+=========
+
+Clone Your Fork
+---------------
+
+#. Clone your fork to the local computer with ``git clone
+ git@github.com:your-user-name/iris.git``
+#. Change directory to your new repo: ``cd iris``. Then
+ ``git branch -a`` to show you all branches. You'll get something
+ like::
+
+ * main
+ remotes/origin/main
+
+ This tells you that you are currently on the ``main`` branch, and
+ that you also have a ``remote`` connection to ``origin/main``.
+ What remote repository is ``remote/origin``? Try ``git remote -v`` to
+ see the URLs for the remote. They will point to your github fork.
+
+ Now you want to connect to the upstream `Iris github`_ repository, so
+ you can merge in changes from trunk.
+
+.. _linking-to-upstream:
+
+Linking Your Repository to the Upstream Repo
+--------------------------------------------
+
+::
+
+ cd iris
+ git remote add upstream git://github.com/SciTools/iris.git
+
+``upstream`` here is just the arbitrary name we're using to refer to the
+main `Iris`_ repository at `Iris github`_.
+
+Note that we've used ``git://`` for the URL rather than ``git@``. The
+``git://`` URL is read only. This means we that we can't accidentally
+(or deliberately) write to the upstream repo, and we are only going to
+use it to merge into our own code.
+
+Just for your own satisfaction, show yourself that you now have a new
+'remote', with ``git remote -v``, giving you something like::
+
+ upstream git://github.com/SciTools/iris.git (fetch)
+ upstream git://github.com/SciTools/iris.git (push)
+ origin git@github.com:your-user-name/iris.git (fetch)
+ origin git@github.com:your-user-name/iris.git (push)
+
+
+
diff --git a/docs/src/developers_guide/propose_changes.png b/docs/src/developers_guide/propose_changes.png
new file mode 100755
index 0000000000..d4e367ce1d
Binary files /dev/null and b/docs/src/developers_guide/propose_changes.png differ
diff --git a/docs/src/developers_guide/pull_request.png b/docs/src/developers_guide/pull_request.png
new file mode 100755
index 0000000000..bdc0698f54
Binary files /dev/null and b/docs/src/developers_guide/pull_request.png differ
diff --git a/docs/src/developers_guide/release.rst b/docs/src/developers_guide/release.rst
new file mode 100644
index 0000000000..c7ce230204
--- /dev/null
+++ b/docs/src/developers_guide/release.rst
@@ -0,0 +1,289 @@
+.. include:: ../common_links.inc
+
+.. _iris_development_releases:
+
+Releases
+========
+
+A release of Iris is a `tag on the SciTools/Iris`_ Github repository.
+
+Below is :ref:`iris_development_releases_steps`, followed by some prose on the
+main areas that constitute the release.
+
+
+.. _iris_development_releases_steps:
+
+How to Create an Iris Release
+-----------------------------
+
+The step-by-step process is walked-through by a script at:
+``/tools/release_do_nothing.py``, and also available here:
+:doc:`release_do_nothing`.
+
+
+.. _release_manager:
+
+Release Manager
+---------------
+
+A Release Manager will be nominated for each release of Iris. This role involves:
+
+* deciding which features and bug fixes should be included in the release
+* managing the `GitHub Projects`_ board for the release
+* using :discussion:`GitHub Discussion releases category `
+ for documenting intent and capturing any
+ discussion about the release
+* holding a developer retrospective post release, to look for potential
+ future improvements
+
+The Release Manager will make the release, ensuring that all the steps outlined
+on this page are completed.
+
+
+Versioning
+----------
+
+Iris' version numbers conform to `Semantic Versioning`_ (``MAJOR.MINOR.PATCH``)
+and `PEP 440`_.
+
+Iris uses `setuptools-scm`_ to automatically manage versioning based on Git
+tags. No manual versioning work is required within the files themselves.
+
+
+Release Candidate
+-----------------
+
+Prior to a release, a release candidate tag may be created, marked as a
+pre-release in GitHub, with a tag ending with :literal:`rc` followed by a
+number (0-based), e.g.,:
+
+ :literal:`v1.9.0rc0`
+
+If created, the pre-release shall be available for a minimum of 2 weeks
+prior to the release being cut. However a 4 week period should be the goal
+to allow user groups to be notified of the existence of the pre-release and
+encouraged to test the functionality.
+
+A pre-release is expected for a major or minor release, but not for a
+patch release.
+
+If new features are required for a release after a release candidate has been
+cut, a new pre-release shall be issued first.
+
+Release candidates are made available as a conda package on the
+`conda-forge Anaconda channel`_ using the `rc_iris`_ label. This is achieved via
+the `conda-forge iris-feedstock`_ following `CFEP-05`_. For further information
+see the `conda-forge User Documentation`_.
+
+
+Patch Releases
+--------------
+
+Patch releases may be implemented to fix problems with previous major or minor
+releases. E.g. ``v1.9.1`` to fix a problem in ``v1.9.0``, both being part of
+the ``v1.9`` series.
+
+New features shall not be included in a patch release, these are for bug fixes.
+
+A patch release does not require a release candidate, but the rest of the
+release process is to be followed.
+
+
+Before Release
+--------------
+
+Deprecations
+~~~~~~~~~~~~
+
+Any behaviour which has been deprecated for the correct number of
+previous releases is now finally changed. More detail, including the correct
+number of releases, is in :ref:`iris_development_deprecations`.
+
+Standard Names
+~~~~~~~~~~~~~~
+
+The file ``etc/cf-standard-name-table.xml`` is updated to the latest CF standard names,
+from the `latest CF standard names`_.
+( This is used during build to automatically generate the sourcefile
+``lib/iris/std_names.py``. )
+
+
+The Release
+-----------
+
+Release Branch
+~~~~~~~~~~~~~~
+
+Once the features intended for the release are on ``main``, a release branch
+should be created, in the ``SciTools/iris`` repository. This will have the name:
+
+ :literal:`v{major release number}.{minor release number}.x`
+
+for example:
+
+ :literal:`v1.9.x`
+
+This branch shall be used to finalise the release details in preparation for
+the release candidate.
+
+Changes for a **patch release** should target to the same release branch as the
+rest of the series. For example, a fix
+for a problem with the ``v1.9.0`` release will be merged into ``v1.9.x`` release
+branch, and then released with the tag ``v1.9.1``.
+
+Documentation
+~~~~~~~~~~~~~
+
+The documentation should include a dedicated What's New file for this release
+series (e.g. ``v1.9.rst``), incorporating all of the What's New entries for the release.
+This content should be reviewed and adapted as required, including highlights
+at the top of the What's New document.
+
+What's New entries for **patch releases** should be added to the existing file
+for that release series (e.g. ``v1.9.1`` section in the ``v1.9.rst`` file).
+
+A template for What's New formatting can be found in the
+``docs/src/whatsnew/latest.rst.template`` file.
+
+
+Tagging
+~~~~~~~
+
+Once all checks are complete, the release is published from the release
+branch - via the GitHub release functionality in the ``SciTools/iris``
+repository - which simultaneously creates a Git tag for the release.
+
+
+Post Release
+------------
+
+PyPI
+~~~~
+Iris is available on PyPI as ``scitools-iris``.
+
+Iris' Continuous-Integration (CI) includes the automatic building and publishing of
+PyPI artifacts in a dedicated GitHub Action.
+
+Legacy manual instructions are appended to this page for reference purposes
+(:ref:`update_pypi`)
+
+conda-forge
+~~~~~~~~~~~
+
+Iris is available on conda-forge as ``iris``.
+
+This is managed via the the Iris conda recipe on the
+`conda-forge iris-feedstock`_, which is updated after the release is cut on
+GitHub, followed by automatic build and publish of the
+conda package on the `conda-forge Anaconda channel`_.
+
+Announcement
+~~~~~~~~~~~~
+
+Iris uses Twitter (`@scitools_iris`_) to announce new releases, as well as any
+internal message boards that are accessible (e.g. at the UK Met Office).
+Announcements usually include a highlighted feature to hook readers' attention.
+
+Citation
+~~~~~~~~
+
+``docs/src/userguide/citation.rst`` is updated to include
+the latest [non-release-candidate] version, date and `Zenodo DOI`_
+of the new release. Ideally this would be updated before the release, but
+the DOI for the new version is only available once the release has been
+created in GitHub.
+
+Merge Back
+~~~~~~~~~~
+
+After any release is published, **including patch releases**, the changes from the
+release branch should be merged back onto the ``SciTools/iris`` ``main`` branch.
+
+
+Appendices
+----------
+
+.. _update_pypi:
+
+Updating PyPI Manually
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. note::
+
+ As part of our Continuous-Integration (CI), the building and publishing of
+ PyPI artifacts is now automated by a dedicated GitHub Action.
+
+ The following instructions **no longer** require to be performed manually,
+ but remain part of the documentation for reference purposes only.
+
+Update the `scitools-iris`_ project on PyPI with the latest Iris release.
+
+To do this perform the following steps.
+
+Create a conda environment with the appropriate conda packages to build the
+source distribution (``sdist``) and pure Python wheel (``bdist_wheel``)::
+
+ > conda create -n iris-pypi -c conda-forge --yes build twine
+ > . activate iris-pypi
+
+Checkout the appropriate Iris ```` tag from the appropriate ````.
+For example, to checkout tag ``v1.0`` from ``upstream``::
+
+ > git fetch upstream --tags
+ > git checkout v1.0
+
+Build the source distribution and wheel from the Iris root directory::
+
+ > python -m build
+
+This ``./dist`` directory should now be populated with the source archive
+``.tar.gz`` file, and built distribution ``.whl`` file.
+
+Check that the package description will render properly on PyPI for each
+of the built artifacts::
+
+ > python -m twine check dist/*
+
+To list and check the contents of the binary wheel::
+
+ > python -m zipfile --list dist/*.whl
+
+If all seems well, sufficient maintainer privileges will be required to
+upload these artifacts to `scitools-iris`_ on PyPI::
+
+ > python -m twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
+
+Ensure that the artifacts are successfully uploaded and available on
+`scitools-iris`_ before creating a conda test environment to install Iris
+from PyPI::
+
+ > conda deactivate
+ > conda env create --file ./requirements/iris.yml
+ > . activate iris-dev
+ > python -m pip install --no-deps scitools-iris
+
+For further details on how to test Iris, see :ref:`developer_running_tests`.
+
+.. seealso::
+
+ For further information on packaging and uploading a project to PyPI, please
+ refer to `Generating Distribution Archives`_ and `Packaging Your Project`_.
+
+.. _SciTools/iris: https://github.com/SciTools/iris
+.. _tag on the SciTools/Iris: https://github.com/SciTools/iris/releases
+.. _conda-forge Anaconda channel: https://anaconda.org/conda-forge/iris
+.. _conda-forge iris-feedstock: https://github.com/conda-forge/iris-feedstock
+.. _CFEP-05: https://github.com/conda-forge/cfep/blob/master/cfep-05.md
+.. _conda-forge User Documentation: https://conda-forge.org/docs/user/00_intro.html
+.. _Active Versions: https://readthedocs.org/projects/scitools-iris/versions/
+.. _Editing v3.0.0rc0: https://readthedocs.org/dashboard/scitools-iris/version/v3.0.0rc0/edit
+.. _rc_iris: https://anaconda.org/conda-forge/iris/labels
+.. _Generating Distribution Archives: https://packaging.python.org/tutorials/packaging-projects/#generating-distribution-archives
+.. _Packaging Your Project: https://packaging.python.org/guides/distributing-packages-using-setuptools/#packaging-your-project
+.. _latest CF standard names: https://cfconventions.org/Data/cf-standard-names/current/src/cf-standard-name-table.xml
+.. _setuptools-scm: https://github.com/pypa/setuptools_scm
+.. _Semantic Versioning: https://semver.org/
+.. _PEP 440: https://peps.python.org/pep-0440/
+.. _@scitools_iris: https://twitter.com/scitools_iris
+.. _GitHub Projects: https://github.com/SciTools/iris/projects
+.. _Zenodo DOI: https://doi.org/10.5281/zenodo.595182
diff --git a/docs/src/developers_guide/release_do_nothing.rst b/docs/src/developers_guide/release_do_nothing.rst
new file mode 100644
index 0000000000..1f72827184
--- /dev/null
+++ b/docs/src/developers_guide/release_do_nothing.rst
@@ -0,0 +1,12 @@
+:orphan:
+
+Release Do-Nothing Script
+-------------------------
+
+Rendered from the original ``/tools/release_do_nothing.py``.
+
+`Read more about do-nothing scripts
+`_
+
+.. literalinclude:: ../../../tools/release_do_nothing.py
+ :language: python
diff --git a/docs/src/developers_guide/testing_tools.rst b/docs/src/developers_guide/testing_tools.rst
new file mode 100755
index 0000000000..dd628d37fc
--- /dev/null
+++ b/docs/src/developers_guide/testing_tools.rst
@@ -0,0 +1,80 @@
+.. include:: ../common_links.inc
+
+.. _testing_tools:
+
+Testing tools
+*************
+
+Iris has various internal convenience functions and utilities available to
+support writing tests. Using these makes tests quicker and easier to write, and
+also consistent with the rest of Iris (which makes it easier to work with the
+code). Most of these conveniences are accessed through the
+:class:`iris.tests.IrisTest` class, from
+which Iris' test classes then inherit.
+
+.. tip::
+
+ All functions listed on this page are defined within
+ :mod:`iris.tests.__init__.py` as methods of
+ :class:`iris.tests.IrisTest_nometa` (which :class:`iris.tests.IrisTest`
+ inherits from). They can be accessed within a test using
+ ``self.exampleFunction``.
+
+Custom assertions
+=================
+
+:class:`iris.tests.IrisTest` supports a variety of custom unittest-style
+assertions, such as :meth:`~iris.tests.IrisTest_nometa.assertArrayEqual`,
+:meth:`~iris.tests.IrisTest_nometa.assertArrayAlmostEqual`.
+
+.. _create-missing:
+
+Saving results
+--------------
+
+Some tests compare the generated output to the expected result contained in a
+file. Custom assertions for this include
+:meth:`~iris.tests.IrisTest_nometa.assertCMLApproxData`
+:meth:`~iris.tests.IrisTest_nometa.assertCDL`
+:meth:`~iris.tests.IrisTest_nometa.assertCML` and
+:meth:`~iris.tests.IrisTest_nometa.assertTextFile`. See docstrings for more
+information.
+
+.. note::
+
+ Sometimes code changes alter the results expected from a test containing the
+ above methods. These can be updated by removing the existing result files
+ and then running the file containing the test with a ``--create-missing``
+ command line argument, or setting the ``IRIS_TEST_CREATE_MISSING``
+ environment variable to anything non-zero. This will create the files rather
+ than erroring, allowing you to commit the updated results.
+
+Context managers
+================
+
+Capturing exceptions and logging
+--------------------------------
+
+:class:`iris.tests.IrisTest` includes several context managers that can be used
+to make test code tidier and easier to read. These include
+:meth:`~iris.tests.IrisTest_nometa.assertWarnsRegexp` and
+:meth:`~iris.tests.IrisTest_nometa.assertLogs`.
+
+Temporary files
+---------------
+
+It's also possible to generate temporary files in a concise fashion with
+:meth:`~iris.tests.IrisTest_nometa.temp_filename`.
+
+Patching
+========
+
+:meth:`~iris.tests.IrisTest_nometa.patch` is a wrapper around ``unittest.patch``
+that will be automatically cleaned up at the end of the test.
+
+Graphic tests
+=============
+
+As a package capable of generating graphical outputs, Iris has utilities for
+creating and updating graphical tests - see :ref:`testing.graphics` for more
+information.
\ No newline at end of file
diff --git a/docs/src/further_topics/dask_best_practices/dask_bags_and_greed.rst b/docs/src/further_topics/dask_best_practices/dask_bags_and_greed.rst
new file mode 100644
index 0000000000..272ea6fc08
--- /dev/null
+++ b/docs/src/further_topics/dask_best_practices/dask_bags_and_greed.rst
@@ -0,0 +1,235 @@
+.. _examples_bags_greed:
+
+3. Dask Bags and Greedy Parallelism
+-----------------------------------
+
+Here is a journey that demonstrates:
+
+* How to apply dask.bags to an existing script
+* The equal importance of optimisation of non-parallel parts of a script
+* Protection against multiple software trying to manage parallelism
+ simultaneously
+
+
+3.1 The Problem - Slow Loading
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+We have ~7000 GRIB files spread between 256 dated directories::
+
+ .
+ |-- 20180401
+ | |-- gfs.t00z.icing.0p25.grb2f006
+ | |-- gfs.t00z.icing.0p25.grb2f006.1
+ | |-- gfs.t00z.icing.0p25.grb2f012
+ | |-- gfs.t00z.icing.0p25.grb2f018
+ | |-- gfs.t00z.icing.0p25.grb2f024
+ | |-- gfs.t00z.icing.0p25.grb2f030
+ | `-- gfs.t00z.icing.0p25.grb2f036
+ |-- 20180402
+ | `-- gfs.t00z.icing.0p25.grb2f006
+ |-- 20180403
+ | |-- gfs.t12z.icing.0p25.grb2f006
+ | |-- gfs.t12z.icing.0p25.grb2f012
+
+With this script, a sample of 11 GRIB files takes ~600secs to load::
+
+ import iris
+ import glob
+
+ fpaths=glob.glob('20190416/*t18z*f???')
+ cubes = iris.load(fpaths, callback=callback)
+
+ def callback(cube, field, fname):
+ if field.sections[5]['bitsPerValue'] == 0:
+ raise iris.exceptions.IgnoreCubeException
+ if field.sections[4]['parameterNumber'] == 20:
+ raise iris.exceptions.IgnoreCubeException
+ elif field.sections[4]['parameterNumber'] == 234:
+ cube.long_name = 'Icing Severity'
+
+3.2 Parallelisation
+^^^^^^^^^^^^^^^^^^^
+We'll try using `dask.bag `_ to
+parallelise the function calls. It's important that Dask is given the freedom
+to break the task down in an efficient manner - the function that is mapped
+across the bag should only load a single file, and the bag itself can
+iterate through the list of files. Here's the restructured script::
+
+ import glob
+ import multiprocessing
+ import os
+
+ import dask
+ import dask.bag as db
+ import iris
+
+ def callback(cube, field, fname):
+ if field.sections[5]['bitsPerValue'] == 0:
+ raise iris.exceptions.IgnoreCubeException
+ if field.sections[4]['parameterNumber'] == 20:
+ raise iris.exceptions.IgnoreCubeException
+ elif field.sections[4]['parameterNumber'] == 234:
+ cube.long_name = 'Icing Severity'
+
+ def func(fname):
+ return iris.load_cube(fname, callback=callback)
+
+ fpaths = list(glob.glob('20190416/*t18z*f???'))
+
+ # Determine the number of processors visible ..
+ cpu_count = multiprocessing.cpu_count()
+
+ # .. or as given by slurm allocation.
+ # Only relevant when using Slurm for job scheduling
+ if 'SLURM_NTASKS' in os.environ:
+ cpu_count = os.environ['SLURM_NTASKS']
+
+ # Do not exceed the number of CPUs available, leaving 1 for the system.
+ num_workers = cpu_count - 1
+ print('Using {} workers from {} CPUs...'.format(num_workers, cpu_count))
+
+ # Now do the parallel load.
+ with dask.config.set(num_workers=num_workers):
+ bag = db.from_sequence(fpaths).map(func)
+ cubes = iris.cube.CubeList(bag.compute()).merge()
+
+This achieves approximately a 10-fold improvement if enough CPUs are
+available to have one per file. See this benchmarking:
+
++---------------+-----------------------+---------------+---------------+
+| Machine | CPUs Available | CPUs Used | Time Taken |
++===============+=======================+===============+===============+
+| A | 4 | 3 | 4m 05s |
+| | +---------------+---------------+
+| | | 4 | 3m 22s |
++---------------+-----------------------+---------------+---------------+
+| B | 8 | 1 | 9m 10s |
+| | +---------------+---------------+
+| | | 7 | 2m 35s |
+| | +---------------+---------------+
+| | | 8 | 2m 20s |
++---------------+-----------------------+---------------+---------------+
+
+
+.. _examples_bags_greed_profile:
+
+3.3 Profiling
+^^^^^^^^^^^^^
+1m 10s is still a surprisingly long time. When faced with a mystery like
+this it is helpful to profile the script to see if there are any steps that
+are taking more time than we would expect. For this we use a tool called
+`kapture `_ to produce a
+flame chart visualising the time spent performing each call:
+
+.. image:: images/grib-bottleneck.png
+ :width: 1000
+ :align: center
+
+From this we see that 96% of the runtime is taken by this call::
+
+ res = gribapi.grib_get_array(self._message_id, key)
+
+This is the call being used during the ``callback`` function when it uses
+GRIB messages to filter out cubes with certain unwanted properties.
+
+3.4 Improving GRIB Key Handling
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Even with parallelisation, we are still limited by the time it takes to run
+a single instance of a function. This is going to become much more important
+when running 7000 files instead of 11, since there will be nowhere near
+enough CPUs even on a large multi-processing system, meaning each CPU will be running many instances
+of the function. **Parallelisation can only go so far to solving speed issues** --
+it's effectively the 'brute force' method.
+
+:ref:`examples_bags_greed_profile` showed us where the major bottleneck is. To improve efficiency
+we can re-write the script to filter on GRIB messages *before* converting
+the GRIB file to a cube::
+
+ import dask
+ import dask.bag as db
+ import glob
+ import iris
+ import multiprocessing
+ import os
+
+ def func(fname):
+ import iris
+ from iris_grib import load_pairs_from_fields
+ from iris_grib.message import GribMessage # perform GRIB message level filtering...
+ filtered_messages = []
+ for message in GribMessage.messages_from_filename(fname):
+ if (message.sections[5]['bitsPerValue'] != 0 and
+ message.sections[4]['parameterNumber'] == 234):
+ filtered_messages.append(message) # now convert the messages to cubes...
+ cubes = [cube for cube, message in load_pairs_from_fields(filtered_messages)]
+ return iris.cube.CubeList(cubes).merge_cube()
+
+ fpaths = list(glob.glob('/scratch/frcz/ICING/GFS_DATA/20190416/*t18z*f???'))
+ cpu_count = multiprocessing.cpu_count()
+
+ # Only relevant when using Slurm for job scheduling
+ if 'SLURM_NTASKS' in os.environ:
+ cpu_count = os.environ['SLURM_NTASKS']
+
+ num_workers = cpu_count - 1
+
+ print('Using {} workers from {} CPUs...'.format(num_workers, cpu_count))
+ with dask.config.set(num_workers=num_workers):
+ bag = db.from_sequence(fpaths).map(func)
+ cubes = iris.cube.CubeList(bag.compute())
+
+This achieves a significant performance improvement - more than twice as
+fast as the previous benchmarks:
+
++---------------+-----------------------+---------------+---------------+-----------+
+| Machine | CPUs Available | CPUs Used | Previous Time | New Time |
++===============+=======================+===============+===============+===========+
+| Example | 8 | 7 | 2m 35s | 1m 05s |
+| | +---------------+---------------+-----------+
+| | | 8 | 2m 20s | 1m 03s |
++---------------+-----------------------+---------------+---------------+-----------+
+
+3.5 Managing External Factors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The speed will still need to be further improved before we can process 7000
+files. The main gains we can achieve are by making sure it is **only Dask**
+that manages multi-processing - if multi-processing is coming from more
+than one place there are predictable clashes.
+
+First, NumPy must be prevented from performing it's own multi-processing by
+adding the following **before** ``import numpy`` is called. You can read more
+about this in :ref:`numpy_threads`.
+
+::
+
+ import os
+
+ os.environ["OMP_NUM_THREADS"] = "1"
+ os.environ["OPENBLAS_NUM_THREADS"] = "1"
+ os.environ["MKL_NUM_THREADS"] = "1"
+ os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
+ os.environ["NUMEXPR_NUM_THREADS"] = "1"
+
+Lastly, if you are using SLURM on the computing cluster then SLURM must be configured to prevent it
+optimising the number of cores necessary for the job. See the SLURM commands
+below, to be added before running the python script. It's important that
+``ntasks`` matches the number of CPUs specified in the python script. You
+can read more about these points in :ref:`multi-pro_slurm`.
+
+::
+
+ #SBATCH --ntasks=12
+ #SBATCH --ntasks-per-core=1
+
+This has all been based on a real example. Once all the above had been set
+up correctly, the completion time had dropped from an estimated **55 days**
+to **less than 1 day**.
+
+3.6 Lessons
+^^^^^^^^^^^
+* Dask isn't a magic switch - it's important to write your script so that
+ there is a way to create small sub-tasks. In this case by providing
+ dask.bag with the file list and the function separated
+* Parallelism is not the only performance improvement to try - the script
+ will still be slow if the individual function is slow
+* All multi-processing needs to be managed by Dask. Several other factors
+ may introduce multi-processing and these need to be configured not to
diff --git a/docs/src/further_topics/dask_best_practices/dask_parallel_loop.rst b/docs/src/further_topics/dask_best_practices/dask_parallel_loop.rst
new file mode 100644
index 0000000000..836503314c
--- /dev/null
+++ b/docs/src/further_topics/dask_best_practices/dask_parallel_loop.rst
@@ -0,0 +1,169 @@
+.. _examples_parallel_loop:
+
+2. Parallelising a Loop of Multiple Calls to a Third Party Library
+------------------------------------------------------------------
+
+Whilst Iris does provide extensive functionality for performing statistical and
+mathematical operations on your data, it is sometimes necessary to use a third
+party library.
+
+The following example describes a real world use case of how to parallelise
+multiple calls to a third party library using dask bags.
+
+2.1 The Problem - Parallelising a Loop
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In this particular example, the user is calculating a sounding parcel for each
+column in their dataset. The cubes that are used are of shape::
+
+ (model_level_number: 20; grid_latitude: 1536; grid_longitude: 1536)
+
+As a sounding is calculated for each column, this means there are 1536x1536
+individual calculations.
+
+In Python, it is common practice to vectorize the calculation of for loops.
+Vectorising is done by using NumPy to operate on the whole array at once rather
+than a single element at a time. Unfortunately, not all operations are
+vectorisable, including the calculation in this example, and so we look to
+other methods to improve the performance.
+
+2.2 Original Code with Loop
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+We start out by loading cubes of pressure, temperature, dewpoint temperature and height::
+
+ import iris
+ import numpy as np
+ from skewt import SkewT as sk
+
+ pressure = iris.load_cube('a.press.19981109.pp')
+ temperature = iris.load_cube('a.temp.19981109.pp')
+ dewpoint = iris.load_cube('a.dewp.19981109.pp')
+ height = iris.load_cube('a.height.19981109.pp')
+
+We set up the NumPy arrays we will be filling with the output data::
+
+ output_arrays = [np.zeros(pressure.shape[0]) for _ in range(6)]
+ cape, cin, lcl, lfc, el, tpw = output_data
+
+Now we loop over the columns in the data to calculate the soundings::
+
+ for y in range(nlim):
+ for x in range(nlim):
+ mydata = {'pres': pressure[:, y, x],
+ 'temp': temperature[:, y, x],
+ 'dwpt': dewpoint[:, y, x],
+ 'hght': height[:, y, x]}
+
+ # Calculate the sounding with the selected column of data.
+ S = sk.Sounding(soundingdata=mydata)
+ try:
+ startp, startt, startdp, type_ = S.get_parcel(parcel_def)
+ P_lcl, P_lfc, P_el, CAPE, CIN = S.get_cape(
+ startp, startt, startdp, totalcape='tot')
+ TPW = S.precipitable_water()
+ except:
+ P_lcl, P_lfc, P_el, CAPE, CIN, TPW = [
+ np.ma.masked for _ in range(6)]
+
+ # Fill the output arrays with the results
+ cape[y,x] = CAPE
+ cin[y,x] = CIN
+ lcl[y,x] = P_lcl
+ lfc[y,x] = P_lfc
+ el[y,x] = P_el
+ tpw[y,x] = TPW
+
+2.3 Profiling the Code with Kapture
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Kapture is a useful statistical profiler. For more information see `the
+Kapture repo `_.
+
+Results below:
+
+.. image:: images/loop_third_party_kapture_results.png
+ :width: 1000
+ :align: center
+
+As we can see above, (looking at the highlighted section of the red bar) it spends most of the time in the call to ::
+
+ S.get_parcel(parcel_def)
+
+As there are over two million columns in the data, we would greatly benefit
+from parallelising this work.
+
+2.4 Parallelising with Dask Bags
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Dask bags are collections of Python objects that you can map a computation over
+in a parallel manner.
+
+For more information about dask bags, see the `Dask Bag Documentation
+`_.
+
+Dask bags work best with lightweight objects, so we will create a collection of
+indices into our data arrays.
+
+First, we put the loop into a function that takes a slice object to index the
+appropriate section of the array.::
+
+ def calculate_sounding(y_slice):
+ for y in range(y_slice.stop-y_slice.start):
+ for x in range(nlim):
+ mydata = {'pres': pressure[:, y_slice][:, y, x],
+ 'temp': temperature[:, y_slice][:, y, x],
+ 'dwpt': dewpoint[:, y_slice][:, y, x],
+ 'hght': height[:, y_slice][:, y, x]}
+
+ # Calculate the sounding with the selected column of data.
+ S = sk.Sounding(soundingdata=mydata)
+ try:
+ startp, startt, startdp, type_ = S.get_parcel(parcel_def)
+ P_lcl, P_lfc, P_el, CAPE, CIN = S.get_cape(
+ startp, startt, startdp, totalcape=total_cape)
+ TPW = S.precipitable_water()
+ except:
+ P_lcl, P_lfc, P_el, CAPE, CIN, TPW = [
+ np.ma.masked for _ in range(6)]
+
+ # Fill the output arrays with the results
+ cape[:, y_slice][y,x] = CAPE
+ cin[:, y_slice][y,x] = CIN
+ lcl[:, y_slice][y,x] = P_lcl
+ lfc[:, y_slice][y,x] = P_lfc
+ el[:, y_slice][y,x] = P_el
+ tpw[:, y_slice][y,x] = TPW
+
+Then we create a dask bag of slice objects that will create multiple partitions
+along the y axis.::
+
+ num_of_workers = 4
+ len_of_y_axis = pressure.shape[1]
+
+ part_loc = [int(loc) for loc in np.floor(np.linspace(0, len_of_y_axis,
+ num_of_workers + 1))]
+
+ dask_bag = db.from_sequence(
+ [slice(part_loc[i], part_loc[i+1]) for i in range(num_of_workers)])
+
+ with dask.config.set(scheduler='processes'):
+ dask_bag.map(calculate_sounding).compute()
+
+When this was run on a machine with 4 workers, a speedup of ~4x was achieved,
+as expected.
+
+Note that if using the processes scheduler this is some extra time spent
+serialising the data to pass it between workers. For more information on the
+different schedulers available in Dask, see `Dask Scheduler Overview
+`_.
+
+For more speed up, it is possible to run the same code on a multi-processing
+system where you will have access to more CPUs.
+
+In this particular example, we are handling multiple numpy arrays and so we use
+dask bags. If working with a single numpy array, it may be more appropriate to
+use Dask Arrays (see `Dask Arrays
+`_ for more information).
+
+
+2.5 Lessons
+^^^^^^^^^^^
+* If possible, dask bags should contain lightweight objects
+* Minimise the number of tasks that are created
diff --git a/docs/src/further_topics/dask_best_practices/dask_pp_to_netcdf.rst b/docs/src/further_topics/dask_best_practices/dask_pp_to_netcdf.rst
new file mode 100644
index 0000000000..28784154b4
--- /dev/null
+++ b/docs/src/further_topics/dask_best_practices/dask_pp_to_netcdf.rst
@@ -0,0 +1,92 @@
+.. _examples_pp_to_ff:
+
+1. Speed up Converting PP Files to NetCDF
+-----------------------------------------
+
+Here is an example of how dask objects can be tuned for better performance.
+
+1.1 The Problem - Slow Saving
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+We have ~300 PP files which we load as follows:
+
+.. code-block:: python
+
+ import iris
+ import glob
+
+ files = glob.glob("pp_files/*.pp")
+ cube = iris.load_cube(files, "mass_fraction_of_ozone_in_air")
+
+Note that loading here may also be parallelised in a similar manner as
+described in :ref:`examples_bags_greed`. Either way, the resulting cube looks
+as follows:
+
+.. code-block:: text
+
+ mass_fraction_of_ozone_in_air / (kg kg-1) (time: 276; model_level_number: 85; latitude: 144; longitude: 192)
+ Dimension coordinates:
+ time x - - -
+ model_level_number - x - -
+ latitude - - x -
+ longitude - - - x
+ Auxiliary coordinates:
+ forecast_period x - - -
+ level_height - x - -
+ sigma - x - -
+ Scalar coordinates:
+ forecast_reference_time: 1850-01-01 00:00:00
+ Attributes:
+ STASH: m01s34i001
+ source: Data from Met Office Unified Model
+ um_version: 10.9
+ Cell methods:
+ mean: time (1 hour)
+
+The cube is then immediately saved as a netCDF file.
+
+.. code-block:: python
+
+ nc_chunks = [chunk[0] for chunk in cube.lazy_data().chunks]
+ iris.save(cube, "outfile.nc", nc_chunks)
+
+This operation was taking longer than expected and we would like to improve
+performance. Note that when this cube is being saved, the data is still lazy,
+data is both read and written at the saving step and is done so in chunks.
+The way this data is divided into chunks can affect performance. By tweaking
+the way these chunks are structured it may be possible to improve performance
+when saving.
+
+
+.. _dask_rechunking:
+
+1.2 Rechunking
+^^^^^^^^^^^^^^
+We may inspect the cube's lazy data before saving:
+
+.. code-block:: python
+
+ # We can access the cubes Dask array
+ lazy_data = cube.lazy_data()
+ # We can find the shape of the chunks
+ # Note that the chunksize of a Dask array is the shape of the chunk
+ # as a tuple.
+ print(lazy_data.chunksize)
+
+Doing so, we find that the chunks currently have the shape::
+
+(1, 1, 144, 192)
+
+This is significantly smaller than the `size which Dask recommends
+`_. Bear in mind that the
+ideal chunk size depends on the platform you are running on (for this example,
+the code is being run on a desktop with 8 CPUs). In this case, we have 23460
+small chunks. We can reduce the number of chunks by rechunking before saving:
+
+.. code-block:: python
+
+ lazy_data = cube.lazy_data()
+ lazy_data = lazy_data.rechunk(1, 85, 144, 192)
+ cube.data = lazy_data
+
+We now have 276 moderately sized chunks. When we try saving again, we find
+that it is approximately 4 times faster, saving in 2m13s rather than 10m33s.
diff --git a/docs/src/further_topics/dask_best_practices/images/grib-bottleneck.png b/docs/src/further_topics/dask_best_practices/images/grib-bottleneck.png
new file mode 100644
index 0000000000..c029d57e5e
Binary files /dev/null and b/docs/src/further_topics/dask_best_practices/images/grib-bottleneck.png differ
diff --git a/docs/src/further_topics/dask_best_practices/images/loop_third_party_kapture_results.png b/docs/src/further_topics/dask_best_practices/images/loop_third_party_kapture_results.png
new file mode 100644
index 0000000000..8f388bb89c
Binary files /dev/null and b/docs/src/further_topics/dask_best_practices/images/loop_third_party_kapture_results.png differ
diff --git a/docs/src/further_topics/dask_best_practices/index.rst b/docs/src/further_topics/dask_best_practices/index.rst
new file mode 100644
index 0000000000..f126427d3f
--- /dev/null
+++ b/docs/src/further_topics/dask_best_practices/index.rst
@@ -0,0 +1,221 @@
+.. _dask best practices:
+
+Dask Best Practices
+*******************
+
+This section outlines some of the best practices when using Dask with Iris. These
+practices involve improving performance through rechunking, making the best use of
+computing clusters and avoiding parallelisation conflicts between Dask and NumPy.
+
+
+.. note::
+
+ Here, we have collated advice and a handful of examples, from the topics most
+ relevant when using Dask with Iris, that we hope will assist users to make
+ the best start when using Dask. It is *not* a fully comprehensive guide
+ encompassing all best practices. You can find more general dask information in the
+ `official Dask Documentation `_.
+
+
+Introduction
+============
+
+`Dask `_ is a powerful tool for speeding up data handling
+via lazy loading and parallel processing. To get the full benefit of using
+Dask, it is important to configure it correctly and supply it with
+appropriately structured data. For example, we may need to "chunk" data arrays
+into smaller pieces to process, read and write it; getting the "chunking" right
+can make a significant different to performance!
+
+
+.. _numpy_threads:
+
+NumPy Threads
+=============
+
+In certain scenarios NumPy will attempt to perform threading using an
+external library - typically OMP, MKL or openBLAS - making use of **every**
+CPU available. This interacts badly with Dask:
+
+* Dask may create multiple instances of NumPy, each generating enough
+ threads to use **all** the available CPUs. The resulting sharing of CPUs
+ between threads greatly reduces performance. The more cores there are, the
+ more pronounced this problem is.
+* NumPy will generate enough threads to use all available CPUs even
+ if Dask is deliberately configured to only use a subset of CPUs. The
+ resulting sharing of CPUs between threads greatly reduces performance.
+* `Dask is already designed to parallelise with NumPy arrays `_, so adding NumPy's 'competing' layer of
+ parallelisation could cause unpredictable performance.
+
+Therefore it is best to prevent NumPy performing its own parallelisation, `a
+suggestion made in Dask's own documentation `_.
+The following commands will ensure this in all scenarios:
+
+in Python...
+
+::
+
+ # Must be run before importing NumPy.
+ import os
+ os.environ["OMP_NUM_THREADS"] = "1"
+ os.environ["OPENBLAS_NUM_THREADS"] = "1"
+ os.environ["MKL_NUM_THREADS"] = "1"
+ os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
+ os.environ["NUMEXPR_NUM_THREADS"] = "1"
+
+or in Linux command line...
+
+::
+
+ export OMP_NUM_THREADS=1
+ export OPENBLAS_NUM_THREADS=1
+ export MKL_NUM_THREADS=1
+ export VECLIB_MAXIMUM_THREADS=1
+ export NUMEXPR_NUM_THREADS=1
+
+
+.. _multi-pro_systems:
+
+Dask on Computing Clusters
+==========================
+
+Dask is well suited for use on computing clusters, but there are some important factors you must be
+aware of. In particular, you will always need to explicitly control parallel
+operation, both in Dask and likewise in NumPy.
+
+
+.. _multi-pro_slurm:
+
+CPU Allocation
+--------------
+
+When running on a computing cluster, unless configured otherwise, Dask will attempt to create
+one parallel 'worker' task for each CPU. However, when using a job scheduler such as Slurm, only *some* of
+these CPUs are actually accessible -- often, and by default, only one. This leads to a serious
+over-commitment unless it is controlled.
+
+So, **whenever Iris is used on a computing cluster, you must always control the number
+of dask workers to a sensible value**, matching the slurm allocation. You do
+this with::
+
+ dask.config.set(num_workers=N)
+
+For an example, see :doc:`dask_bags_and_greed`.
+
+Alternatively, when there is only one CPU allocated, it may actually be more
+efficient to use a "synchronous" scheduler instead, with::
+
+ dask.config.set(scheduler='synchronous')
+
+See the Dask documentation on `Single thread synchronous scheduler
+`_.
+
+
+.. _multi-pro_numpy:
+
+NumPy Threading
+---------------
+
+NumPy also interrogates the visible number of CPUs to multi-thread its operations.
+The large number of CPUs available in a computing cluster will thus cause confusion if NumPy
+attempts its own parallelisation, so this must be prevented. Refer back to
+:ref:`numpy_threads` for more detail.
+
+
+Distributed
+-----------
+
+Even though allocations on a computing cluster are generally restricted to a single node, there
+are still good reasons for using 'dask.distributed' in many cases. See `Single Machine: dask.distributed
+`_ in the Dask documentation.
+
+
+Chunking
+========
+
+Dask breaks down large data arrays into chunks, allowing efficient
+parallelisation by processing several smaller chunks simultaneously. For more
+information, see the documentation on
+`Dask Array `_.
+
+Iris provides a basic chunking shape to Dask, attempting to set the shape for
+best performance. The chunking that is used can depend on the file format that
+is being loaded. See below for how chunking is performed for:
+
+* :ref:`chunking_netcdf`
+* :ref:`chunking_pp_ff`
+
+It can in some cases be beneficial to re-chunk the arrays in Iris cubes.
+For information on how to do this, see :ref:`dask_rechunking`.
+
+
+.. _chunking_netcdf:
+
+NetCDF Files
+------------
+
+NetCDF files can include their own chunking specification. This is either
+specified when creating the file, or is automatically assigned if one or
+more of the dimensions is `unlimited `_.
+Importantly, netCDF chunk shapes are **not optimised for Dask
+performance**.
+
+Chunking can be set independently for any variable in a netCDF file.
+When a netCDF variable uses an unlimited dimension, it is automatically
+chunked: the chunking is the shape of the whole variable, but with '1' instead
+of the length in any unlimited dimensions.
+
+When chunking is specified for netCDF data, Iris will set the dask chunking
+to an integer multiple or fraction of that shape, such that the data size is
+near to but not exceeding the dask array chunk size.
+
+
+.. _chunking_pp_ff:
+
+PP and Fieldsfiles
+------------------
+
+PP and Fieldsfiles contain multiple 2D fields of data. When loading PP or
+Fieldsfiles into Iris cubes, the chunking will automatically be set to a chunk
+per field.
+
+For example, if a PP file contains 2D lat-lon fields for each of the
+85 model level numbers, it will load in a cube that looks as follows::
+
+ (model_level_number: 85; latitude: 144; longitude: 192)
+
+The data in this cube will be partitioned with chunks of shape
+:code:`(1, 144, 192)`.
+
+If the file(s) being loaded contain multiple fields, this can lead to an
+excessive amount of chunks which will result in poor performance.
+
+When the default chunking is not appropriate, it is possible to rechunk.
+:doc:`dask_pp_to_netcdf` provides a detailed demonstration of how Dask can optimise
+that process.
+
+
+Examples
+========
+
+We have written some examples of use cases for using Dask, that come with advice and
+explanations for why and how the tasks are performed the way they are.
+
+If you feel you have an example of a Dask best practice that you think may be helpful to others,
+please share them with us by raising a new `discussion on the Iris repository `_.
+
+* :doc:`dask_pp_to_netcdf`
+* :doc:`dask_parallel_loop`
+* :doc:`dask_bags_and_greed`
+
+.. toctree::
+ :hidden:
+ :maxdepth: 1
+
+ dask_pp_to_netcdf
+ dask_parallel_loop
+ dask_bags_and_greed
diff --git a/docs/src/further_topics/filtering_warnings.rst b/docs/src/further_topics/filtering_warnings.rst
new file mode 100644
index 0000000000..ef8701f951
--- /dev/null
+++ b/docs/src/further_topics/filtering_warnings.rst
@@ -0,0 +1,271 @@
+.. _filtering-warnings:
+
+==================
+Filtering Warnings
+==================
+
+Since Iris cannot predict your specific needs, it by default raises Warnings
+for anything that might be a problem for **any** user, and is designed to work with
+you to ``ignore`` Warnings which you do not find helpful.
+
+.. testsetup:: filtering_warnings
+
+ from pathlib import Path
+ import sys
+ import warnings
+
+ import iris
+ import iris.coord_systems
+ import iris.warnings
+
+ # Hack to ensure doctests actually see Warnings that are raised, and that
+ # they have a relative path (so a test pass is not machine-dependent).
+ warnings.filterwarnings("default")
+ IRIS_FILE = Path(iris.__file__)
+ def custom_warn(message, category, filename, lineno, file=None, line=None):
+ filepath = Path(filename)
+ filename = str(filepath.relative_to(IRIS_FILE.parents[1]))
+ sys.stdout.write(warnings.formatwarning(message, category, filename, lineno))
+ warnings.showwarning = custom_warn
+
+ geog_cs_globe = iris.coord_systems.GeogCS(6400000)
+ orthographic_coord_system = iris.coord_systems.Orthographic(
+ longitude_of_projection_origin=0,
+ latitude_of_projection_origin=0,
+ ellipsoid=geog_cs_globe,
+ )
+
+
+ def my_operation():
+ geog_cs_globe.inverse_flattening = 0.1
+ _ = orthographic_coord_system.as_cartopy_crs()
+
+Here is a hypothetical operation - ``my_operation()`` - which raises two
+Warnings:
+
+.. doctest:: filtering_warnings
+
+ >>> my_operation()
+ ...
+ iris/coord_systems.py:445: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance.
+ warnings.warn(wmsg, category=iris.warnings.IrisUserWarning)
+ iris/coord_systems.py:771: IrisDefaultingWarning: Discarding false_easting and false_northing that are not used by Cartopy.
+ warnings.warn(
+
+Warnings can be suppressed using the Python warnings filter with the ``ignore``
+action. Detailed information is available in the Python documentation:
+:external+python:mod:`warnings`.
+
+The key points are:
+
+- :ref:`When`: a warnings filter can be applied
+ either from the command line or from within Python.
+- :ref:`What`: a warnings filter accepts
+ various arguments to specify which Warnings are being filtered. Both broad
+ and narrow filters are possible.
+
+.. _warning-filter-application:
+
+**When** a Warnings Filter can be Applied
+-----------------------------------------
+
+- **Command line:** setting the :external+python:envvar:`PYTHONWARNINGS`
+ environment variable.
+- **Command line:** the `python -W `_
+ command line argument.
+- **Within Python:** use :func:`warnings.filterwarnings` .
+
+The :ref:`warning-filter-specificity` section demonstrates using
+:func:`warnings.filterwarnings`, and shows the equivalent **command line**
+approaches.
+
+
+.. _warning-filter-specificity:
+
+**What** Warnings will be Filtered
+----------------------------------
+
+.. note::
+
+ For all of these examples we are using the
+ :class:`~warnings.catch_warnings` context manager to ensure any changes to
+ settings are temporary.
+
+ This should always work fine for the ``ignore``
+ warning filter action, but note that some of the other actions
+ may not behave correctly with all Iris operations, as
+ :class:`~warnings.catch_warnings` is not thread-safe (e.g. using the
+ ``once`` action may cause 1 warning per chunk of lazy data).
+
+Specific Warnings
+~~~~~~~~~~~~~~~~~
+
+**When you do not want a specific warning, but still want all others.**
+
+You can target specific Warning messages, e.g.
+
+.. doctest:: filtering_warnings
+
+ >>> with warnings.catch_warnings():
+ ... warnings.filterwarnings("ignore", message="Discarding false_easting")
+ ... my_operation()
+ ...
+ iris/coord_systems.py:445: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance.
+ warnings.warn(wmsg, category=iris.warnings.IrisUserWarning)
+
+::
+
+ python -W ignore:"Discarding false_easting"
+ export PYTHONWARNINGS=ignore:"Discarding false_easting"
+
+----
+
+Or you can target Warnings raised by specific lines of specific modules, e.g.
+
+.. doctest:: filtering_warnings
+
+ >>> with warnings.catch_warnings():
+ ... warnings.filterwarnings("ignore", module="iris.coord_systems", lineno=445)
+ ... my_operation()
+ ...
+ iris/coord_systems.py:771: IrisDefaultingWarning: Discarding false_easting and false_northing that are not used by Cartopy.
+ warnings.warn(
+
+::
+
+ python -W ignore:::iris.coord_systems:445
+ export PYTHONWARNINGS=ignore:::iris.coord_systems:445
+
+Warnings from a Common Source
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+**When you do not want ANY warnings raised by a module, or collection of
+modules.**
+
+E.g. filtering the ``coord_systems`` module:
+
+.. doctest:: filtering_warnings
+
+ >>> with warnings.catch_warnings():
+ ... warnings.filterwarnings("ignore", module="iris.coord_systems")
+ ... my_operation()
+
+::
+
+ python -W ignore:::iris.coord_systems
+ export PYTHONWARNINGS=ignore:::iris.coord_systems
+
+----
+
+If using :func:`warnings.filterwarnings` , you can also use partial
+definitions. The below example will ``ignore`` all Warnings from ``iris`` as a
+whole.
+
+.. doctest:: filtering_warnings
+
+ >>> with warnings.catch_warnings():
+ ... warnings.filterwarnings("ignore", module="iris")
+ ... my_operation()
+
+The above 'partial' filter is not available with the command line approaches.
+
+Warnings of a Common Type
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+**When you do not want any Warnings of the same nature, from anywhere in the
+code you are calling.**
+
+The below example will ``ignore`` any
+:class:`~iris.warnings.IrisDefaultingWarning` that gets raised by *any*
+module during execution:
+
+.. doctest:: filtering_warnings
+
+ >>> with warnings.catch_warnings():
+ ... warnings.filterwarnings(
+ ... "ignore",
+ ... category=iris.warnings.IrisDefaultingWarning
+ ... )
+ ... my_operation()
+ ...
+ iris/coord_systems.py:445: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance.
+ warnings.warn(wmsg, category=iris.warnings.IrisUserWarning)
+
+----
+
+Using :class:`~iris.warnings.IrisUserWarning` in the filter will ``ignore``
+both Warnings, since :class:`~iris.warnings.IrisDefaultingWarning` subclasses
+:class:`~iris.warnings.IrisUserWarning` :
+
+.. doctest:: filtering_warnings
+
+ >>> with warnings.catch_warnings():
+ ... warnings.filterwarnings(
+ ... "ignore",
+ ... category=iris.warnings.IrisUserWarning
+ ... )
+ ... my_operation()
+
+----
+
+The command line approaches can only handle the built-in Warning
+categories (`cpython#66733`_)::
+
+ python -W ignore::UserWarning
+ export PYTHONWARNINGS=ignore::UserWarning
+
+----
+
+There are several built-in Python warning categories that can be used here
+(:class:`DeprecationWarning` being a popular example, see
+:external+python:mod:`warnings` for more). Since Iris has
+so many different warnings that might be raised, Iris subclasses
+:class:`UserWarning` to :class:`~iris.warnings.IrisUserWarning`, which itself
+has **many** specialised subclasses. These subclasses exist to give you more
+granularity in your warning filtering; you can see the full list by
+viewing the :mod:`iris.warnings` module.
+
+.. attention::
+
+ If you have ideas for adding/altering Iris' warning categories, please
+ :ref:`get in touch`! The categories exist to
+ make your life easier, and it is simple to make modifications.
+
+
+More Detail
+-----------
+
+Different people use Iris for very different purposes, from quick file
+visualisation to extract-transform-load to statistical analysis. These
+contrasting priorities mean disagreement on which Iris problems can be ignored
+and which are critically important.
+
+For problems that prevent Iris functioning: **Concrete Exceptions** are raised, which
+stop code from running any further - no debate here. For less catastrophic
+problems: **Warnings** are raised,
+which notify you (in ``stderr``) but allow code to continue running. The Warnings are
+there because Iris may **OR may not** function in the way you expect,
+depending on what you need - e.g. a problem might prevent data being saved to
+NetCDF, but statistical analysis will still work fine.
+
+Examples of Iris Warnings
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- If you attempt to plot un-bounded point data as a ``pcolormesh``: Iris will
+ guess appropriate bounds around each point so that quadrilaterals can be
+ plotted. This permanently modifies the relevant coordinates, so the you are
+ warned in case downstream operations assume un-bounded coordinates.
+- If you load a NetCDF file where a CF variable references another variable -
+ e.g. ``my_var:coordinates = "depth_var" ;`` - but the referenced variable
+ (``depth_var``) is not in the file: Iris will still construct
+ its data model, but without this reference relationship. You are warned since
+ the file includes an error and the loaded result might therefore not be as
+ expected.
+
+
+.. testcleanup:: filtering_warnings
+
+ warnings.filterwarnings("ignore")
+
+
+.. _cpython#66733: https://github.com/python/cpython/issues/66733
diff --git a/docs/src/further_topics/index.rst b/docs/src/further_topics/index.rst
new file mode 100644
index 0000000000..73ce3d55e7
--- /dev/null
+++ b/docs/src/further_topics/index.rst
@@ -0,0 +1,21 @@
+.. _further_topics_index:
+
+
+Further Topics
+===============
+
+Extra information on specific technical issues.
+
+.. toctree::
+ :maxdepth: 1
+
+ filtering_warnings
+ metadata
+ lenient_metadata
+ lenient_maths
+ um_files_loading
+ missing_data_handling
+ netcdf_io
+ dask_best_practices/index
+ ugrid/index
+ which_regridder_to_use
\ No newline at end of file
diff --git a/docs/src/further_topics/lenient_maths.rst b/docs/src/further_topics/lenient_maths.rst
new file mode 100644
index 0000000000..51f77fb956
--- /dev/null
+++ b/docs/src/further_topics/lenient_maths.rst
@@ -0,0 +1,281 @@
+.. _lenient maths:
+
+Lenient Cube Maths
+******************
+
+This section provides an overview of lenient cube maths. In particular, it explains
+what lenient maths involves, clarifies how it differs from normal or strict cube
+maths, and demonstrates how you can exercise fine control over whether your cube
+maths operations are lenient or strict.
+
+Note that, lenient cube maths is the default behaviour of Iris from version
+``3.0.0``.
+
+
+Introduction
+============
+
+Lenient maths stands somewhat on the shoulders of giants. If you've not already
+done so, you may want to recap the material discussed in the following sections,
+
+- :ref:`cube maths`,
+- :ref:`metadata`,
+- :ref:`lenient metadata`
+
+In addition to this, cube maths leans heavily on the :mod:`~iris.common.resolve`
+module, which provides the necessary infrastructure required by Iris to analyse
+and combine each :class:`~iris.cube.Cube` operand involved in a maths operation
+into the resultant :class:`~iris.cube.Cube`. It may be worth while investing
+some time to understand how the :class:`~iris.common.resolve.Resolve` class
+underpins cube maths, and consider how it may be used in general to combine
+or resolve cubes together.
+
+Given these prerequisites, recall that :ref:`lenient behaviour `
+introduced and discussed the concept of lenient metadata; a more pragmatic and
+forgiving approach to :ref:`comparing `,
+:ref:`combining ` and understanding the
+:ref:`differences ` between your metadata
+(:ref:`metadata members table`). The lenient metadata philosophy introduced
+there is extended to cube maths, with the view to also preserving as much common
+coordinate (:ref:`metadata classes table`) information, as well as common
+metadata, between the participating :class:`~iris.cube.Cube` operands as possible.
+
+Let's consolidate our understanding of lenient and strict cube maths through
+a practical worked example, which we'll explore together next.
+
+
+.. _lenient example:
+
+Lenient Example
+===============
+
+.. testsetup:: lenient-example
+
+ import iris
+ from iris.common import LENIENT
+ experiment = iris.load_cube(iris.sample_data_path("hybrid_height.nc"), "air_potential_temperature")
+ control = experiment[0]
+ control.remove_aux_factory(control.aux_factory())
+ for coord in ["sigma", "forecast_reference_time", "forecast_period", "atmosphere_hybrid_height_coordinate", "surface_altitude"]:
+ control.remove_coord(coord)
+ control.attributes["Conventions"] = "CF-1.7"
+ experiment.attributes["experiment-id"] = "RT3 50"
+
+Consider the following :class:`~iris.cube.Cube` of ``air_potential_temperature``,
+which has an `atmosphere hybrid height parametric vertical coordinate`_, and
+represents the output of an low-resolution global atmospheric ``experiment``,
+
+.. doctest:: lenient-example
+
+ >>> print(experiment)
+ air_potential_temperature / (K) (model_level_number: 15; grid_latitude: 100; grid_longitude: 100)
+ Dimension coordinates:
+ model_level_number x - -
+ grid_latitude - x -
+ grid_longitude - - x
+ Auxiliary coordinates:
+ atmosphere_hybrid_height_coordinate x - -
+ sigma x - -
+ surface_altitude - x x
+ Derived coordinates:
+ altitude x x x
+ Scalar coordinates:
+ forecast_period 0.0 hours
+ forecast_reference_time 2009-09-09 17:10:00
+ time 2009-09-09 17:10:00
+ Attributes:
+ Conventions 'CF-1.5'
+ STASH m01s00i004
+ experiment-id 'RT3 50'
+ source 'Data from Met Office Unified Model 7.04'
+
+Consider also the following :class:`~iris.cube.Cube`, which has the same global
+spatial extent, and acts as a ``control``,
+
+.. doctest:: lenient-example
+
+ >>> print(control)
+ air_potential_temperature / (K) (grid_latitude: 100; grid_longitude: 100)
+ Dimension coordinates:
+ grid_latitude x -
+ grid_longitude - x
+ Scalar coordinates:
+ model_level_number 1
+ time 2009-09-09 17:10:00
+ Attributes:
+ Conventions 'CF-1.7'
+ STASH m01s00i004
+ source 'Data from Met Office Unified Model 7.04'
+
+Now let's subtract these cubes in order to calculate a simple ``difference``,
+
+.. doctest:: lenient-example
+
+ >>> difference = experiment - control
+ >>> print(difference)
+ unknown / (K) (model_level_number: 15; grid_latitude: 100; grid_longitude: 100)
+ Dimension coordinates:
+ model_level_number x - -
+ grid_latitude - x -
+ grid_longitude - - x
+ Auxiliary coordinates:
+ atmosphere_hybrid_height_coordinate x - -
+ sigma x - -
+ surface_altitude - x x
+ Derived coordinates:
+ altitude x x x
+ Scalar coordinates:
+ forecast_period 0.0 hours
+ forecast_reference_time 2009-09-09 17:10:00
+ time 2009-09-09 17:10:00
+ Attributes:
+ experiment-id 'RT3 50'
+ source 'Data from Met Office Unified Model 7.04'
+
+Note that, cube maths automatically takes care of broadcasting the
+dimensionality of the ``control`` up to that of the ``experiment``, in order to
+calculate the ``difference``. This is performed only after ensuring that both
+the **dimension coordinates** ``grid_latitude`` and ``grid_longitude`` are first
+:ref:`leniently equivalent `.
+
+As expected, the resultant ``difference`` contains the
+:class:`~iris.aux_factory.HybridHeightFactory` and all it's associated **auxiliary
+coordinates**. However, the **scalar coordinates** have been leniently combined to
+preserve as much coordinate information as possible, and the ``attributes``
+dictionaries have also been leniently combined. In addition, see what further
+:ref:`rationalisation ` is always performed by cube maths on
+the resultant metadata and coordinates.
+
+Also, note that the ``model_level_number`` **scalar coordinate** from the
+``control`` has be superseded by the similarly named **dimension coordinate**
+from the ``experiment`` in the resultant ``difference``.
+
+Now let's compare and contrast this lenient result with the strict alternative.
+But before we do so, let's first clarify how to control the behaviour of cube maths.
+
+
+Control the Behaviour
+=====================
+
+As stated earlier, lenient cube maths is the default behaviour from Iris ``3.0.0``.
+However, this behaviour may be controlled via the thread-safe ``LENIENT["maths"]``
+runtime option,
+
+.. doctest:: lenient-example
+
+ >>> from iris.common import LENIENT
+ >>> print(LENIENT)
+ Lenient(maths=True)
+
+Which may be set and applied globally thereafter for Iris within the current
+thread of execution,
+
+.. doctest:: lenient-example
+
+ >>> LENIENT["maths"] = False # doctest: +SKIP
+ >>> print(LENIENT) # doctest: +SKIP
+ Lenient(maths=False)
+
+Or alternatively, temporarily alter the behaviour of cube maths only within the
+scope of the ``LENIENT`` `context manager`_,
+
+.. doctest:: lenient-example
+
+ >>> print(LENIENT)
+ Lenient(maths=True)
+ >>> with LENIENT.context(maths=False):
+ ... print(LENIENT)
+ ...
+ Lenient(maths=False)
+ >>> print(LENIENT)
+ Lenient(maths=True)
+
+
+Strict Example
+==============
+
+Now that we know how to control the underlying behaviour of cube maths,
+let's return to our :ref:`lenient example `, but this
+time perform **strict** cube maths instead,
+
+.. doctest:: lenient-example
+
+ >>> with LENIENT.context(maths=False):
+ ... difference = experiment - control
+ ...
+ >>> print(difference)
+ unknown / (K) (model_level_number: 15; grid_latitude: 100; grid_longitude: 100)
+ Dimension coordinates:
+ model_level_number x - -
+ grid_latitude - x -
+ grid_longitude - - x
+ Auxiliary coordinates:
+ atmosphere_hybrid_height_coordinate x - -
+ sigma x - -
+ surface_altitude - x x
+ Derived coordinates:
+ altitude x x x
+ Scalar coordinates:
+ time 2009-09-09 17:10:00
+ Attributes:
+ source 'Data from Met Office Unified Model 7.04'
+
+Although the numerical result of this strict cube maths operation is identical,
+it is not as rich in metadata as the :ref:`lenient alternative `.
+In particular, it does not contain the ``forecast_period`` and ``forecast_reference_time``
+**scalar coordinates**, or the ``experiment-id`` in the ``attributes`` dictionary.
+
+This is because strict cube maths, in general, will only return common metadata
+and common coordinates that are :ref:`strictly equivalent `.
+
+
+Finer Detail
+============
+
+In general, if you want to preserve as much metadata and coordinate information as
+possible during cube maths, then opt to use the default lenient behaviour. Otherwise,
+favour the strict alternative if you require to enforce precise metadata and
+coordinate commonality.
+
+The following information may also help you decide whether lenient cube maths best
+suits your use case,
+
+- lenient behaviour uses :ref:`lenient equality ` to match the
+ metadata of coordinates, which is more tolerant to certain metadata differences,
+- lenient behaviour uses :ref:`lenient combination