From 8557ede4993e73d5bf84b3c05cc7ac6f656bea2f Mon Sep 17 00:00:00 2001 From: "Zhian N. Kamvar" Date: Fri, 21 Apr 2023 10:09:32 -0700 Subject: [PATCH] [automation] transform lesson to sandpaper --- .editorconfig | 26 ++ .github/workflows/README.md | 198 ++++++++++++++++ .github/workflows/pr-close-signal.yaml | 23 ++ .github/workflows/pr-comment.yaml | 185 +++++++++++++++ .github/workflows/pr-post-remove-branch.yaml | 32 +++ .github/workflows/pr-preflight.yaml | 39 +++ .github/workflows/pr-receive.yaml | 131 ++++++++++ .github/workflows/sandpaper-main.yaml | 61 +++++ .github/workflows/sandpaper-version.txt | 1 + .github/workflows/update-cache.yaml | 125 ++++++++++ .github/workflows/update-workflows.yaml | 66 ++++++ .github/workflows/workbench-beta-phase.yml | 60 +++++ .gitignore | 55 +++++ CODE_OF_CONDUCT.md | 13 + CONTRIBUTING.md | 121 ++++++++++ LICENSE.md | 79 ++++++ README.md | 17 +- _extras/guide.md | 119 ---------- config.yaml | 88 +++++++ episodes/00-intro.md | 134 ++++++----- episodes/01-format-data.md | 157 ++++++------ episodes/02-common-mistakes.md | 164 ++++++------- episodes/03-dates-as-data.md | 224 ++++++++++-------- episodes/04-quality-control.md | 195 ++++++++------- episodes/05-exporting-data.md | 113 +++++---- episodes/06-data-formats-caveats.md | 40 +++- .../data}/training_attendance.xlsx | Bin {fig => episodes/fig}/1_helpful_clippy.jpg | Bin {fig => episodes/fig}/2_Multiple_Tables.png | Bin {fig => episodes/fig}/2_datasheet_example.jpg | Bin {fig => episodes/fig}/3_Dates_as_Columns.png | Bin {fig => episodes/fig}/3_white_table_1.jpg | Bin {fig => episodes/fig}/4-sort-len_hours.png | Bin {fig => episodes/fig}/4-sorted-len_hours.png | Bin .../fig}/4_conditional-formatting.png | Bin .../fig}/4_data-validation-alert.png | Bin .../fig}/4_data-validation-auto-complete.png | Bin .../fig}/4_data-validation-error-msg.png | Bin .../fig}/4_data-validation-input-message.png | Bin .../fig}/4_data-validation-whole-num.png | Bin {fig => episodes/fig}/4_merged_cells.jpg | Bin {fig => episodes/fig}/5_excel_dates_1.jpg | Bin {fig => episodes/fig}/6_excel_dates_2.jpg | Bin {fig => episodes/fig}/7_excel_dates_3.jpg | Bin {fig => episodes/fig}/NewLine_example.png | Bin {fig => episodes/fig}/NewLine_example2.png | Bin {fig => episodes/fig}/csv-mistake.png | Bin {fig => episodes/fig}/data_validation.png | Bin .../fig}/data_validation_window.png | Bin {fig => episodes/fig}/drop_down_list.png | Bin {fig => episodes/fig}/error_alert.png | Bin {fig => episodes/fig}/excel-to-csv.png | Bin .../fig}/excel_tables_example.png | Bin .../fig}/excel_tables_example1.png | Bin {fig => episodes/fig}/formatting.png | Bin {fig => episodes/fig}/good_formatting.png | Bin {fig => episodes/fig}/input_message.png | Bin {fig => episodes/fig}/invalid_value.png | Bin {fig => episodes/fig}/multiple-info.png | Bin {fig => episodes/fig}/plot_validation.png | Bin {fig => episodes/fig}/single-info.png | Bin .../fig}/solution_exercise_1_dates.png | Bin {fig => episodes/fig}/sort-date.png | Bin {fig => episodes/fig}/sorting.png | Bin {fig => episodes/fig}/spreadsheet-setup.png | Bin index.md | 49 ++-- instructors/instructor-notes.md | 122 ++++++++++ {_extras => learners}/discuss.md | 3 +- learners/reference.md | 9 + learners/setup.md | 81 +++++++ profiles/learner-profiles.md | 5 + reference.md | 4 - setup.md | 77 ------ site/README.md | 2 + 74 files changed, 2151 insertions(+), 667 deletions(-) create mode 100644 .editorconfig create mode 100755 .github/workflows/README.md create mode 100755 .github/workflows/pr-close-signal.yaml create mode 100755 .github/workflows/pr-comment.yaml create mode 100755 .github/workflows/pr-post-remove-branch.yaml create mode 100755 .github/workflows/pr-preflight.yaml create mode 100755 .github/workflows/pr-receive.yaml create mode 100755 .github/workflows/sandpaper-main.yaml create mode 100644 .github/workflows/sandpaper-version.txt create mode 100755 .github/workflows/update-cache.yaml create mode 100755 .github/workflows/update-workflows.yaml create mode 100644 .github/workflows/workbench-beta-phase.yml create mode 100644 .gitignore create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE.md delete mode 100644 _extras/guide.md create mode 100644 config.yaml rename {data => episodes/data}/training_attendance.xlsx (100%) rename {fig => episodes/fig}/1_helpful_clippy.jpg (100%) rename {fig => episodes/fig}/2_Multiple_Tables.png (100%) rename {fig => episodes/fig}/2_datasheet_example.jpg (100%) rename {fig => episodes/fig}/3_Dates_as_Columns.png (100%) rename {fig => episodes/fig}/3_white_table_1.jpg (100%) rename {fig => episodes/fig}/4-sort-len_hours.png (100%) rename {fig => episodes/fig}/4-sorted-len_hours.png (100%) rename {fig => episodes/fig}/4_conditional-formatting.png (100%) rename {fig => episodes/fig}/4_data-validation-alert.png (100%) rename {fig => episodes/fig}/4_data-validation-auto-complete.png (100%) rename {fig => episodes/fig}/4_data-validation-error-msg.png (100%) rename {fig => episodes/fig}/4_data-validation-input-message.png (100%) rename {fig => episodes/fig}/4_data-validation-whole-num.png (100%) rename {fig => episodes/fig}/4_merged_cells.jpg (100%) rename {fig => episodes/fig}/5_excel_dates_1.jpg (100%) rename {fig => episodes/fig}/6_excel_dates_2.jpg (100%) rename {fig => episodes/fig}/7_excel_dates_3.jpg (100%) rename {fig => episodes/fig}/NewLine_example.png (100%) rename {fig => episodes/fig}/NewLine_example2.png (100%) rename {fig => episodes/fig}/csv-mistake.png (100%) rename {fig => episodes/fig}/data_validation.png (100%) rename {fig => episodes/fig}/data_validation_window.png (100%) rename {fig => episodes/fig}/drop_down_list.png (100%) rename {fig => episodes/fig}/error_alert.png (100%) rename {fig => episodes/fig}/excel-to-csv.png (100%) rename {fig => episodes/fig}/excel_tables_example.png (100%) rename {fig => episodes/fig}/excel_tables_example1.png (100%) rename {fig => episodes/fig}/formatting.png (100%) rename {fig => episodes/fig}/good_formatting.png (100%) rename {fig => episodes/fig}/input_message.png (100%) rename {fig => episodes/fig}/invalid_value.png (100%) rename {fig => episodes/fig}/multiple-info.png (100%) rename {fig => episodes/fig}/plot_validation.png (100%) rename {fig => episodes/fig}/single-info.png (100%) rename {fig => episodes/fig}/solution_exercise_1_dates.png (100%) rename {fig => episodes/fig}/sort-date.png (100%) rename {fig => episodes/fig}/sorting.png (100%) rename {fig => episodes/fig}/spreadsheet-setup.png (100%) create mode 100644 instructors/instructor-notes.md rename {_extras => learners}/discuss.md (97%) create mode 100644 learners/reference.md create mode 100644 learners/setup.md create mode 100644 profiles/learner-profiles.md delete mode 100644 reference.md delete mode 100644 setup.md create mode 100644 site/README.md diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..5bf4860 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +root = true + +[*] +charset = utf-8 +insert_final_newline = true +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space +max_line_length = 100 # Please keep this in sync with bin/lesson_check.py! +trim_trailing_whitespace = false # keep trailing spaces in markdown - 2+ spaces are translated to a hard break (
) + +[*.r] +max_line_length = 80 + +[*.py] +indent_size = 4 +indent_style = space +max_line_length = 79 + +[*.sh] +end_of_line = lf + +[Makefile] +indent_style = tab diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100755 index 0000000..101967e --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,198 @@ +# Carpentries Workflows + +This directory contains workflows to be used for Lessons using the {sandpaper} +lesson infrastructure. Two of these workflows require R (`sandpaper-main.yaml` +and `pr-recieve.yaml`) and the rest are bots to handle pull request management. + +These workflows will likely change as {sandpaper} evolves, so it is important to +keep them up-to-date. To do this in your lesson you can do the following in your +R console: + +```r +# Install/Update sandpaper +options(repos = c(carpentries = "https://carpentries.r-universe.dev/", + CRAN = "https://cloud.r-project.org")) +install.packages("sandpaper") + +# update the workflows in your lesson +library("sandpaper") +update_github_workflows() +``` + +Inside this folder, you will find a file called `sandpaper-version.txt`, which +will contain a version number for sandpaper. This will be used in the future to +alert you if a workflow update is needed. + +What follows are the descriptions of the workflow files: + +## Deployment + +### 01 Build and Deploy (sandpaper-main.yaml) + +This is the main driver that will only act on the main branch of the repository. +This workflow does the following: + + 1. checks out the lesson + 2. provisions the following resources + - R + - pandoc + - lesson infrastructure (stored in a cache) + - lesson dependencies if needed (stored in a cache) + 3. builds the lesson via `sandpaper:::ci_deploy()` + +#### Caching + +This workflow has two caches; one cache is for the lesson infrastructure and +the other is for the the lesson dependencies if the lesson contains rendered +content. These caches are invalidated by new versions of the infrastructure and +the `renv.lock` file, respectively. If there is a problem with the cache, +manual invaliation is necessary. You will need maintain access to the repository +and you can either go to the actions tab and [click on the caches button to find +and invalidate the failing cache](https://github.blog/changelog/2022-10-20-manage-caches-in-your-actions-workflows-from-web-interface/) +or by setting the `CACHE_VERSION` secret to the current date (which will +invalidate all of the caches). + +## Updates + +### Setup Information + +These workflows run on a schedule and at the maintainer's request. Because they +create pull requests that update workflows/require the downstream actions to run, +they need a special repository/organization secret token called +`SANDPAPER_WORKFLOW` and it must have the `public_repo` and `workflow` scope. + +This can be an individual user token, OR it can be a trusted bot account. If you +have a repository in one of the official Carpentries accounts, then you do not +need to worry about this token being present because the Carpentries Core Team +will take care of supplying this token. + +If you want to use your personal account: you can go to + +to create a token. Once you have created your token, you should copy it to your +clipboard and then go to your repository's settings > secrets > actions and +create or edit the `SANDPAPER_WORKFLOW` secret, pasting in the generated token. + +If you do not specify your token correctly, the runs will not fail and they will +give you instructions to provide the token for your repository. + +### 02 Maintain: Update Workflow Files (update-workflow.yaml) + +The {sandpaper} repository was designed to do as much as possible to separate +the tools from the content. For local builds, this is absolutely true, but +there is a minor issue when it comes to workflow files: they must live inside +the repository. + +This workflow ensures that the workflow files are up-to-date. The way it work is +to download the update-workflows.sh script from GitHub and run it. The script +will do the following: + +1. check the recorded version of sandpaper against the current version on github +2. update the files if there is a difference in versions + +After the files are updated, if there are any changes, they are pushed to a +branch called `update/workflows` and a pull request is created. Maintainers are +encouraged to review the changes and accept the pull request if the outputs +are okay. + +This update is run ~~weekly or~~ on demand. + +### 03 Maintain: Update Pacakge Cache (update-cache.yaml) + +For lessons that have generated content, we use {renv} to ensure that the output +is stable. This is controlled by a single lockfile which documents the packages +needed for the lesson and the version numbers. This workflow is skipped in +lessons that do not have generated content. + +Because the lessons need to remain current with the package ecosystem, it's a +good idea to make sure these packages can be updated periodically. The +update cache workflow will do this by checking for updates, applying them in a +branch called `updates/packages` and creating a pull request with _only the +lockfile changed_. + +From here, the markdown documents will be rebuilt and you can inspect what has +changed based on how the packages have updated. + +## Pull Request and Review Management + +Because our lessons execute code, pull requests are a secruity risk for any +lesson and thus have security measures associted with them. **Do not merge any +pull requests that do not pass checks and do not have bots commented on them.** + +This series of workflows all go together and are described in the following +diagram and the below sections: + +![Graph representation of a pull request](https://carpentries.github.io/sandpaper/articles/img/pr-flow.dot.svg) + +### Pre Flight Pull Request Validation (pr-preflight.yaml) + +This workflow runs every time a pull request is created and its purpose is to +validate that the pull request is okay to run. This means the following things: + +1. The pull request does not contain modified workflow files +2. If the pull request contains modified workflow files, it does not contain + modified content files (such as a situation where @carpentries-bot will + make an automated pull request) +3. The pull request does not contain an invalid commit hash (e.g. from a fork + that was made before a lesson was transitioned from styles to use the + workbench). + +Once the checks are finished, a comment is issued to the pull request, which +will allow maintainers to determine if it is safe to run the +"Receive Pull Request" workflow from new contributors. + +### Recieve Pull Request (pr-recieve.yaml) + +**Note of caution:** This workflow runs arbitrary code by anyone who creates a +pull request. GitHub has safeguarded the token used in this workflow to have no +priviledges in the repository, but we have taken precautions to protect against +spoofing. + +This workflow is triggered with every push to a pull request. If this workflow +is already running and a new push is sent to the pull request, the workflow +running from the previous push will be cancelled and a new workflow run will be +started. + +The first step of this workflow is to check if it is valid (e.g. that no +workflow files have been modified). If there are workflow files that have been +modified, a comment is made that indicates that the workflow is not run. If +both a workflow file and lesson content is modified, an error will occurr. + +The second step (if valid) is to build the generated content from the pull +request. This builds the content and uploads three artifacts: + +1. The pull request number (pr) +2. A summary of changes after the rendering process (diff) +3. The rendered files (build) + +Because this workflow builds generated content, it follows the same general +process as the `sandpaper-main` workflow with the same caching mechanisms. + +The artifacts produced are used by the next workflow. + +### Comment on Pull Request (pr-comment.yaml) + +This workflow is triggered if the `pr-recieve.yaml` workflow is successful. +The steps in this workflow are: + +1. Test if the workflow is valid and comment the validity of the workflow to the + pull request. +2. If it is valid: create an orphan branch with two commits: the current state + of the repository and the proposed changes. +3. If it is valid: update the pull request comment with the summary of changes + +Importantly: if the pull request is invalid, the branch is not created so any +malicious code is not published. + +From here, the maintainer can request changes from the author and eventually +either merge or reject the PR. When this happens, if the PR was valid, the +preview branch needs to be deleted. + +### Send Close PR Signal (pr-close-signal.yaml) + +Triggered any time a pull request is closed. This emits an artifact that is the +pull request number for the next action + +### Remove Pull Request Branch (pr-post-remove-branch.yaml) + +Tiggered by `pr-close-signal.yaml`. This removes the temporary branch associated with +the pull request (if it was created). diff --git a/.github/workflows/pr-close-signal.yaml b/.github/workflows/pr-close-signal.yaml new file mode 100755 index 0000000..9b129d5 --- /dev/null +++ b/.github/workflows/pr-close-signal.yaml @@ -0,0 +1,23 @@ +name: "Bot: Send Close Pull Request Signal" + +on: + pull_request: + types: + [closed] + +jobs: + send-close-signal: + name: "Send closing signal" + runs-on: ubuntu-latest + if: ${{ github.event.action == 'closed' }} + steps: + - name: "Create PRtifact" + run: | + mkdir -p ./pr + printf ${{ github.event.number }} > ./pr/NUM + - name: Upload Diff + uses: actions/upload-artifact@v3 + with: + name: pr + path: ./pr + diff --git a/.github/workflows/pr-comment.yaml b/.github/workflows/pr-comment.yaml new file mode 100755 index 0000000..bb2eb03 --- /dev/null +++ b/.github/workflows/pr-comment.yaml @@ -0,0 +1,185 @@ +name: "Bot: Comment on the Pull Request" + +# read-write repo token +# access to secrets +on: + workflow_run: + workflows: ["Receive Pull Request"] + types: + - completed + +concurrency: + group: pr-${{ github.event.workflow_run.pull_requests[0].number }} + cancel-in-progress: true + + +jobs: + # Pull requests are valid if: + # - they match the sha of the workflow run head commit + # - they are open + # - no .github files were committed + test-pr: + name: "Test if pull request is valid" + runs-on: ubuntu-latest + if: > + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' + outputs: + is_valid: ${{ steps.check-pr.outputs.VALID }} + payload: ${{ steps.check-pr.outputs.payload }} + number: ${{ steps.get-pr.outputs.NUM }} + msg: ${{ steps.check-pr.outputs.MSG }} + steps: + - name: 'Download PR artifact' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'pr' + + - name: "Get PR Number" + if: ${{ steps.dl.outputs.success == 'true' }} + id: get-pr + run: | + unzip pr.zip + echo "NUM=$(<./NR)" >> $GITHUB_OUTPUT + + - name: "Fail if PR number was not present" + id: bad-pr + if: ${{ steps.dl.outputs.success != 'true' }} + run: | + echo '::error::A pull request number was not recorded. The pull request that triggered this workflow is likely malicious.' + exit 1 + - name: "Get Invalid Hashes File" + id: hash + run: | + echo "json<> $GITHUB_OUTPUT + - name: "Check PR" + id: check-pr + if: ${{ steps.dl.outputs.success == 'true' }} + uses: carpentries/actions/check-valid-pr@main + with: + pr: ${{ steps.get-pr.outputs.NUM }} + sha: ${{ github.event.workflow_run.head_sha }} + headroom: 3 # if it's within the last three commits, we can keep going, because it's likely rapid-fire + invalid: ${{ fromJSON(steps.hash.outputs.json)[github.repository] }} + fail_on_error: true + + # Create an orphan branch on this repository with two commits + # - the current HEAD of the md-outputs branch + # - the output from running the current HEAD of the pull request through + # the md generator + create-branch: + name: "Create Git Branch" + needs: test-pr + runs-on: ubuntu-latest + if: ${{ needs.test-pr.outputs.is_valid == 'true' }} + env: + NR: ${{ needs.test-pr.outputs.number }} + permissions: + contents: write + steps: + - name: 'Checkout md outputs' + uses: actions/checkout@v3 + with: + ref: md-outputs + path: built + fetch-depth: 1 + + - name: 'Download built markdown' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'built' + + - if: ${{ steps.dl.outputs.success == 'true' }} + run: unzip built.zip + + - name: "Create orphan and push" + if: ${{ steps.dl.outputs.success == 'true' }} + run: | + cd built/ + git config --local user.email "actions@github.com" + git config --local user.name "GitHub Actions" + CURR_HEAD=$(git rev-parse HEAD) + git checkout --orphan md-outputs-PR-${NR} + git add -A + git commit -m "source commit: ${CURR_HEAD}" + ls -A | grep -v '^.git$' | xargs -I _ rm -r '_' + cd .. + unzip -o -d built built.zip + cd built + git add -A + git commit --allow-empty -m "differences for PR #${NR}" + git push -u --force --set-upstream origin md-outputs-PR-${NR} + + # Comment on the Pull Request with a link to the branch and the diff + comment-pr: + name: "Comment on Pull Request" + needs: [test-pr, create-branch] + runs-on: ubuntu-latest + if: ${{ needs.test-pr.outputs.is_valid == 'true' }} + env: + NR: ${{ needs.test-pr.outputs.number }} + permissions: + pull-requests: write + steps: + - name: 'Download comment artifact' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'diff' + + - if: ${{ steps.dl.outputs.success == 'true' }} + run: unzip ${{ github.workspace }}/diff.zip + + - name: "Comment on PR" + id: comment-diff + if: ${{ steps.dl.outputs.success == 'true' }} + uses: carpentries/actions/comment-diff@main + with: + pr: ${{ env.NR }} + path: ${{ github.workspace }}/diff.md + + # Comment if the PR is open and matches the SHA, but the workflow files have + # changed + comment-changed-workflow: + name: "Comment if workflow files have changed" + needs: test-pr + runs-on: ubuntu-latest + if: ${{ always() && needs.test-pr.outputs.is_valid == 'false' }} + env: + NR: ${{ github.event.workflow_run.pull_requests[0].number }} + body: ${{ needs.test-pr.outputs.msg }} + permissions: + pull-requests: write + steps: + - name: 'Check for spoofing' + id: dl + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: 'built' + + - name: 'Alert if spoofed' + id: spoof + if: ${{ steps.dl.outputs.success == 'true' }} + run: | + echo 'body<> $GITHUB_ENV + echo '' >> $GITHUB_ENV + echo '## :x: DANGER :x:' >> $GITHUB_ENV + echo 'This pull request has modified workflows that created output. Close this now.' >> $GITHUB_ENV + echo '' >> $GITHUB_ENV + echo 'EOF' >> $GITHUB_ENV + + - name: "Comment on PR" + id: comment-diff + uses: carpentries/actions/comment-diff@main + with: + pr: ${{ env.NR }} + body: ${{ env.body }} + diff --git a/.github/workflows/pr-post-remove-branch.yaml b/.github/workflows/pr-post-remove-branch.yaml new file mode 100755 index 0000000..62c2e98 --- /dev/null +++ b/.github/workflows/pr-post-remove-branch.yaml @@ -0,0 +1,32 @@ +name: "Bot: Remove Temporary PR Branch" + +on: + workflow_run: + workflows: ["Bot: Send Close Pull Request Signal"] + types: + - completed + +jobs: + delete: + name: "Delete branch from Pull Request" + runs-on: ubuntu-latest + if: > + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' + permissions: + contents: write + steps: + - name: 'Download artifact' + uses: carpentries/actions/download-workflow-artifact@main + with: + run: ${{ github.event.workflow_run.id }} + name: pr + - name: "Get PR Number" + id: get-pr + run: | + unzip pr.zip + echo "NUM=$(<./NUM)" >> $GITHUB_OUTPUT + - name: 'Remove branch' + uses: carpentries/actions/remove-branch@main + with: + pr: ${{ steps.get-pr.outputs.NUM }} diff --git a/.github/workflows/pr-preflight.yaml b/.github/workflows/pr-preflight.yaml new file mode 100755 index 0000000..d0d7420 --- /dev/null +++ b/.github/workflows/pr-preflight.yaml @@ -0,0 +1,39 @@ +name: "Pull Request Preflight Check" + +on: + pull_request_target: + branches: + ["main"] + types: + ["opened", "synchronize", "reopened"] + +jobs: + test-pr: + name: "Test if pull request is valid" + if: ${{ github.event.action != 'closed' }} + runs-on: ubuntu-latest + outputs: + is_valid: ${{ steps.check-pr.outputs.VALID }} + permissions: + pull-requests: write + steps: + - name: "Get Invalid Hashes File" + id: hash + run: | + echo "json<> $GITHUB_OUTPUT + - name: "Check PR" + id: check-pr + uses: carpentries/actions/check-valid-pr@main + with: + pr: ${{ github.event.number }} + invalid: ${{ fromJSON(steps.hash.outputs.json)[github.repository] }} + fail_on_error: true + - name: "Comment result of validation" + id: comment-diff + if: ${{ always() }} + uses: carpentries/actions/comment-diff@main + with: + pr: ${{ github.event.number }} + body: ${{ steps.check-pr.outputs.MSG }} diff --git a/.github/workflows/pr-receive.yaml b/.github/workflows/pr-receive.yaml new file mode 100755 index 0000000..371ef54 --- /dev/null +++ b/.github/workflows/pr-receive.yaml @@ -0,0 +1,131 @@ +name: "Receive Pull Request" + +on: + pull_request: + types: + [opened, synchronize, reopened] + +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + +jobs: + test-pr: + name: "Record PR number" + if: ${{ github.event.action != 'closed' }} + runs-on: ubuntu-latest + outputs: + is_valid: ${{ steps.check-pr.outputs.VALID }} + steps: + - name: "Record PR number" + id: record + if: ${{ always() }} + run: | + echo ${{ github.event.number }} > ${{ github.workspace }}/NR # 2022-03-02: artifact name fixed to be NR + - name: "Upload PR number" + id: upload + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: pr + path: ${{ github.workspace }}/NR + - name: "Get Invalid Hashes File" + id: hash + run: | + echo "json<> $GITHUB_OUTPUT + - name: "echo output" + run: | + echo "${{ steps.hash.outputs.json }}" + - name: "Check PR" + id: check-pr + uses: carpentries/actions/check-valid-pr@main + with: + pr: ${{ github.event.number }} + invalid: ${{ fromJSON(steps.hash.outputs.json)[github.repository] }} + + build-md-source: + name: "Build markdown source files if valid" + needs: test-pr + runs-on: ubuntu-latest + if: ${{ needs.test-pr.outputs.is_valid == 'true' }} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + RENV_PATHS_ROOT: ~/.local/share/renv/ + CHIVE: ${{ github.workspace }}/site/chive + PR: ${{ github.workspace }}/site/pr + MD: ${{ github.workspace }}/site/built + steps: + - name: "Check Out Main Branch" + uses: actions/checkout@v3 + + - name: "Check Out Staging Branch" + uses: actions/checkout@v3 + with: + ref: md-outputs + path: ${{ env.MD }} + + - name: "Set up R" + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-r: false + + - name: "Set up Pandoc" + uses: r-lib/actions/setup-pandoc@v2 + + - name: "Setup Lesson Engine" + uses: carpentries/actions/setup-sandpaper@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Setup Package Cache" + uses: carpentries/actions/setup-lesson-deps@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Validate and Build Markdown" + id: build-site + run: | + sandpaper::package_cache_trigger(TRUE) + sandpaper::validate_lesson(path = '${{ github.workspace }}') + sandpaper:::build_markdown(path = '${{ github.workspace }}', quiet = FALSE) + shell: Rscript {0} + + - name: "Generate Artifacts" + id: generate-artifacts + run: | + sandpaper:::ci_bundle_pr_artifacts( + repo = '${{ github.repository }}', + pr_number = '${{ github.event.number }}', + path_md = '${{ env.MD }}', + path_pr = '${{ env.PR }}', + path_archive = '${{ env.CHIVE }}', + branch = 'md-outputs' + ) + shell: Rscript {0} + + - name: "Upload PR" + uses: actions/upload-artifact@v3 + with: + name: pr + path: ${{ env.PR }} + + - name: "Upload Diff" + uses: actions/upload-artifact@v3 + with: + name: diff + path: ${{ env.CHIVE }} + retention-days: 1 + + - name: "Upload Build" + uses: actions/upload-artifact@v3 + with: + name: built + path: ${{ env.MD }} + retention-days: 1 + + - name: "Teardown" + run: sandpaper::reset_site() + shell: Rscript {0} diff --git a/.github/workflows/sandpaper-main.yaml b/.github/workflows/sandpaper-main.yaml new file mode 100755 index 0000000..e17707a --- /dev/null +++ b/.github/workflows/sandpaper-main.yaml @@ -0,0 +1,61 @@ +name: "01 Build and Deploy Site" + +on: + push: + branches: + - main + - master + schedule: + - cron: '0 0 * * 2' + workflow_dispatch: + inputs: + name: + description: 'Who triggered this build?' + required: true + default: 'Maintainer (via GitHub)' + reset: + description: 'Reset cached markdown files' + required: false + default: false + type: boolean +jobs: + full-build: + name: "Build Full Site" + runs-on: ubuntu-latest + permissions: + checks: write + contents: write + pages: write + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + RENV_PATHS_ROOT: ~/.local/share/renv/ + steps: + + - name: "Checkout Lesson" + uses: actions/checkout@v3 + + - name: "Set up R" + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-r: false + + - name: "Set up Pandoc" + uses: r-lib/actions/setup-pandoc@v2 + + - name: "Setup Lesson Engine" + uses: carpentries/actions/setup-sandpaper@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Setup Package Cache" + uses: carpentries/actions/setup-lesson-deps@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: "Deploy Site" + run: | + reset <- "${{ github.event.inputs.reset }}" == "true" + sandpaper::package_cache_trigger(TRUE) + sandpaper:::ci_deploy(reset = reset) + shell: Rscript {0} diff --git a/.github/workflows/sandpaper-version.txt b/.github/workflows/sandpaper-version.txt new file mode 100644 index 0000000..4aa0906 --- /dev/null +++ b/.github/workflows/sandpaper-version.txt @@ -0,0 +1 @@ +0.11.15 diff --git a/.github/workflows/update-cache.yaml b/.github/workflows/update-cache.yaml new file mode 100755 index 0000000..676d742 --- /dev/null +++ b/.github/workflows/update-cache.yaml @@ -0,0 +1,125 @@ +name: "03 Maintain: Update Package Cache" + +on: + workflow_dispatch: + inputs: + name: + description: 'Who triggered this build (enter github username to tag yourself)?' + required: true + default: 'monthly run' + schedule: + # Run every tuesday + - cron: '0 0 * * 2' + +jobs: + preflight: + name: "Preflight Check" + runs-on: ubuntu-latest + outputs: + ok: ${{ steps.check.outputs.ok }} + steps: + - id: check + run: | + if [[ ${{ github.event_name }} == 'workflow_dispatch' ]]; then + echo "ok=true" >> $GITHUB_OUTPUT + echo "Running on request" + # using single brackets here to avoid 08 being interpreted as octal + # https://github.com/carpentries/sandpaper/issues/250 + elif [ `date +%d` -le 7 ]; then + # If the Tuesday lands in the first week of the month, run it + echo "ok=true" >> $GITHUB_OUTPUT + echo "Running on schedule" + else + echo "ok=false" >> $GITHUB_OUTPUT + echo "Not Running Today" + fi + + check_renv: + name: "Check if We Need {renv}" + runs-on: ubuntu-latest + needs: preflight + if: ${{ needs.preflight.outputs.ok == 'true'}} + outputs: + needed: ${{ steps.renv.outputs.exists }} + steps: + - name: "Checkout Lesson" + uses: actions/checkout@v3 + - id: renv + run: | + if [[ -d renv ]]; then + echo "exists=true" >> $GITHUB_OUTPUT + fi + + check_token: + name: "Check SANDPAPER_WORKFLOW token" + runs-on: ubuntu-latest + needs: check_renv + if: ${{ needs.check_renv.outputs.needed == 'true' }} + outputs: + workflow: ${{ steps.validate.outputs.wf }} + repo: ${{ steps.validate.outputs.repo }} + steps: + - name: "validate token" + id: validate + uses: carpentries/actions/check-valid-credentials@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + + update_cache: + name: "Update Package Cache" + needs: check_token + if: ${{ needs.check_token.outputs.repo== 'true' }} + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + RENV_PATHS_ROOT: ~/.local/share/renv/ + steps: + + - name: "Checkout Lesson" + uses: actions/checkout@v3 + + - name: "Set up R" + uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + install-r: false + + - name: "Update {renv} deps and determine if a PR is needed" + id: update + uses: carpentries/actions/update-lockfile@main + with: + cache-version: ${{ secrets.CACHE_VERSION }} + + - name: Create Pull Request + id: cpr + if: ${{ steps.update.outputs.n > 0 }} + uses: carpentries/create-pull-request@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + delete-branch: true + branch: "update/packages" + commit-message: "[actions] update ${{ steps.update.outputs.n }} packages" + title: "Update ${{ steps.update.outputs.n }} packages" + body: | + :robot: This is an automated build + + This will update ${{ steps.update.outputs.n }} packages in your lesson with the following versions: + + ``` + ${{ steps.update.outputs.report }} + ``` + + :stopwatch: In a few minutes, a comment will appear that will show you how the output has changed based on these updates. + + If you want to inspect these changes locally, you can use the following code to check out a new branch: + + ```bash + git fetch origin update/packages + git checkout update/packages + ``` + + - Auto-generated by [create-pull-request][1] on ${{ steps.update.outputs.date }} + + [1]: https://github.com/carpentries/create-pull-request/tree/main + labels: "type: package cache" + draft: false diff --git a/.github/workflows/update-workflows.yaml b/.github/workflows/update-workflows.yaml new file mode 100755 index 0000000..288bcd1 --- /dev/null +++ b/.github/workflows/update-workflows.yaml @@ -0,0 +1,66 @@ +name: "02 Maintain: Update Workflow Files" + +on: + workflow_dispatch: + inputs: + name: + description: 'Who triggered this build (enter github username to tag yourself)?' + required: true + default: 'weekly run' + clean: + description: 'Workflow files/file extensions to clean (no wildcards, enter "" for none)' + required: false + default: '.yaml' + schedule: + # Run every Tuesday + - cron: '0 0 * * 2' + +jobs: + check_token: + name: "Check SANDPAPER_WORKFLOW token" + runs-on: ubuntu-latest + outputs: + workflow: ${{ steps.validate.outputs.wf }} + repo: ${{ steps.validate.outputs.repo }} + steps: + - name: "validate token" + id: validate + uses: carpentries/actions/check-valid-credentials@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + + update_workflow: + name: "Update Workflow" + runs-on: ubuntu-latest + needs: check_token + if: ${{ needs.check_token.outputs.workflow == 'true' }} + steps: + - name: "Checkout Repository" + uses: actions/checkout@v3 + + - name: Update Workflows + id: update + uses: carpentries/actions/update-workflows@main + with: + clean: ${{ github.event.inputs.clean }} + + - name: Create Pull Request + id: cpr + if: "${{ steps.update.outputs.new }}" + uses: carpentries/create-pull-request@main + with: + token: ${{ secrets.SANDPAPER_WORKFLOW }} + delete-branch: true + branch: "update/workflows" + commit-message: "[actions] update sandpaper workflow to version ${{ steps.update.outputs.new }}" + title: "Update Workflows to Version ${{ steps.update.outputs.new }}" + body: | + :robot: This is an automated build + + Update Workflows from sandpaper version ${{ steps.update.outputs.old }} -> ${{ steps.update.outputs.new }} + + - Auto-generated by [create-pull-request][1] on ${{ steps.update.outputs.date }} + + [1]: https://github.com/carpentries/create-pull-request/tree/main + labels: "type: template and tools" + draft: false diff --git a/.github/workflows/workbench-beta-phase.yml b/.github/workflows/workbench-beta-phase.yml new file mode 100644 index 0000000..2faa25d --- /dev/null +++ b/.github/workflows/workbench-beta-phase.yml @@ -0,0 +1,60 @@ +name: "Deploy to AWS" + +on: + workflow_run: + workflows: ["01 Build and Deploy Site"] + types: + - completed + workflow_dispatch: + +jobs: + preflight: + name: "Preflight Check" + runs-on: ubuntu-latest + outputs: + ok: ${{ steps.check.outputs.ok }} + folder: ${{ steps.check.outputs.folder }} + steps: + - id: check + run: | + if [[ -z "${{ secrets.DISTRIBUTION }}" || -z "${{ secrets.AWS_ACCESS_KEY_ID }}" || -z "${{ secrets.AWS_SECRET_ACCESS_KEY }}" ]]; then + echo ":information_source: No site configured" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo 'To deploy the preview on AWS, you need the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `DISTRIBUTION` secrets set up' >> $GITHUB_STEP_SUMMARY + else + echo "::set-output name=folder::"$(sed -E 's^.+/(.+)^\1^' <<< ${{ github.repository }}) + echo "::set-output name=ok::true" + fi + + full-build: + name: "Deploy to AWS" + needs: [preflight] + if: ${{ needs.preflight.outputs.ok }} + runs-on: ubuntu-latest + steps: + + - name: "Checkout site folder" + uses: actions/checkout@v3 + with: + ref: 'gh-pages' + path: 'source' + + - name: "Deploy to Bucket" + uses: jakejarvis/s3-sync-action@v0.5.1 + with: + args: --acl public-read --follow-symlinks --delete --exclude '.git/*' + env: + AWS_S3_BUCKET: preview.carpentries.org + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + SOURCE_DIR: 'source' + DEST_DIR: ${{ needs.preflight.outputs.folder }} + + - name: "Invalidate CloudFront" + uses: chetan/invalidate-cloudfront-action@master + env: + PATHS: /* + AWS_REGION: 'us-east-1' + DISTRIBUTION: ${{ secrets.DISTRIBUTION }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b8ab706 --- /dev/null +++ b/.gitignore @@ -0,0 +1,55 @@ +# sandpaper files +episodes/*html +site/* +!site/README.md + +# History files +.Rhistory +.Rapp.history +# Session Data files +.RData +# User-specific files +.Ruserdata +# Example code in package build process +*-Ex.R +# Output files from R CMD build +/*.tar.gz +# Output files from R CMD check +/*.Rcheck/ +# RStudio files +.Rproj.user/ +# produced vignettes +vignettes/*.html +vignettes/*.pdf +# OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 +.httr-oauth +# knitr and R markdown default cache directories +*_cache/ +/cache/ +# Temporary files created by R markdown +*.utf8.md +*.knit.md +# R Environment Variables +.Renviron +# pkgdown site +docs/ +# translation temp files +po/*~ +# renv detritus +renv/sandbox/ +*.pyc +*~ +.DS_Store +.ipynb_checkpoints +.sass-cache +.jekyll-cache/ +.jekyll-metadata +__pycache__ +_site +.Rproj.user +.bundle/ +.vendor/ +vendor/ +.docker-vendor/ +Gemfile.lock +.*history diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f19b804 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,13 @@ +--- +title: "Contributor Code of Conduct" +--- + +As contributors and maintainers of this project, +we pledge to follow the [The Carpentries Code of Conduct][coc]. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by following our [reporting guidelines][coc-reporting]. + + +[coc-reporting]: https://docs.carpentries.org/topic_folders/policies/incident-reporting.html +[coc]: https://docs.carpentries.org/topic_folders/policies/code-of-conduct.html diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..ec44704 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,121 @@ +## Contributing + +[The Carpentries][cp-site] ([Software Carpentry][swc-site], [Data +Carpentry][dc-site], and [Library Carpentry][lc-site]) are open source +projects, and we welcome contributions of all kinds: new lessons, fixes to +existing material, bug reports, and reviews of proposed changes are all +welcome. + +### Contributor Agreement + +By contributing, you agree that we may redistribute your work under [our +license](LICENSE.md). In exchange, we will address your issues and/or assess +your change proposal as promptly as we can, and help you become a member of our +community. Everyone involved in [The Carpentries][cp-site] agrees to abide by +our [code of conduct](CODE_OF_CONDUCT.md). + +### How to Contribute + +The easiest way to get started is to file an issue to tell us about a spelling +mistake, some awkward wording, or a factual error. This is a good way to +introduce yourself and to meet some of our community members. + +1. If you do not have a [GitHub][github] account, you can [send us comments by + email][contact]. However, we will be able to respond more quickly if you use + one of the other methods described below. + +2. If you have a [GitHub][github] account, or are willing to [create + one][github-join], but do not know how to use Git, you can report problems + or suggest improvements by [creating an issue][issues]. This allows us to + assign the item to someone and to respond to it in a threaded discussion. + +3. If you are comfortable with Git, and would like to add or change material, + you can submit a pull request (PR). Instructions for doing this are + [included below](#using-github). + +Note: if you want to build the website locally, please refer to [The Workbench +documentation][template-doc]. + +### Where to Contribute + +1. If you wish to change this lesson, add issues and pull requests here. +2. If you wish to change the template used for workshop websites, please refer + to [The Workbench documentation][template-doc]. + + +### What to Contribute + +There are many ways to contribute, from writing new exercises and improving +existing ones to updating or filling in the documentation and submitting [bug +reports][issues] about things that do not work, are not clear, or are missing. +If you are looking for ideas, please see [the list of issues for this +repository][repo], or the issues for [Data Carpentry][dc-issues], [Library +Carpentry][lc-issues], and [Software Carpentry][swc-issues] projects. + +Comments on issues and reviews of pull requests are just as welcome: we are +smarter together than we are on our own. **Reviews from novices and newcomers +are particularly valuable**: it's easy for people who have been using these +lessons for a while to forget how impenetrable some of this material can be, so +fresh eyes are always welcome. + +### What *Not* to Contribute + +Our lessons already contain more material than we can cover in a typical +workshop, so we are usually *not* looking for more concepts or tools to add to +them. As a rule, if you want to introduce a new idea, you must (a) estimate how +long it will take to teach and (b) explain what you would take out to make room +for it. The first encourages contributors to be honest about requirements; the +second, to think hard about priorities. + +We are also not looking for exercises or other material that only run on one +platform. Our workshops typically contain a mixture of Windows, macOS, and +Linux users; in order to be usable, our lessons must run equally well on all +three. + +### Using GitHub + +If you choose to contribute via GitHub, you may want to look at [How to +Contribute to an Open Source Project on GitHub][how-contribute]. In brief, we +use [GitHub flow][github-flow] to manage changes: + +1. Create a new branch in your desktop copy of this repository for each + significant change. +2. Commit the change in that branch. +3. Push that branch to your fork of this repository on GitHub. +4. Submit a pull request from that branch to the [upstream repository][repo]. +5. If you receive feedback, make changes on your desktop and push to your + branch on GitHub: the pull request will update automatically. + +NB: The published copy of the lesson is usually in the `main` branch. + +Each lesson has a team of maintainers who review issues and pull requests or +encourage others to do so. The maintainers are community volunteers, and have +final say over what gets merged into the lesson. + +### Other Resources + +The Carpentries is a global organisation with volunteers and learners all over +the world. We share values of inclusivity and a passion for sharing knowledge, +teaching and learning. There are several ways to connect with The Carpentries +community listed at including via social +media, slack, newsletters, and email lists. You can also [reach us by +email][contact]. + +[repo]: https://example.com/FIXME +[contact]: mailto:team@carpentries.org +[cp-site]: https://carpentries.org/ +[dc-issues]: https://github.com/issues?q=user%3Adatacarpentry +[dc-lessons]: https://datacarpentry.org/lessons/ +[dc-site]: https://datacarpentry.org/ +[discuss-list]: https://lists.software-carpentry.org/listinfo/discuss +[github]: https://github.com +[github-flow]: https://guides.github.com/introduction/flow/ +[github-join]: https://github.com/join +[how-contribute]: https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github +[issues]: https://carpentries.org/help-wanted-issues/ +[lc-issues]: https://github.com/issues?q=user%3ALibraryCarpentry +[swc-issues]: https://github.com/issues?q=user%3Aswcarpentry +[swc-lessons]: https://software-carpentry.org/lessons/ +[swc-site]: https://software-carpentry.org/ +[lc-site]: https://librarycarpentry.org/ +[template-doc]: https://carpentries.github.io/workbench/ diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..7632871 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,79 @@ +--- +title: "Licenses" +--- + +## Instructional Material + +All Carpentries (Software Carpentry, Data Carpentry, and Library Carpentry) +instructional material is made available under the [Creative Commons +Attribution license][cc-by-human]. The following is a human-readable summary of +(and not a substitute for) the [full legal text of the CC BY 4.0 +license][cc-by-legal]. + +You are free: + +- to **Share**---copy and redistribute the material in any medium or format +- to **Adapt**---remix, transform, and build upon the material + +for any purpose, even commercially. + +The licensor cannot revoke these freedoms as long as you follow the license +terms. + +Under the following terms: + +- **Attribution**---You must give appropriate credit (mentioning that your work + is derived from work that is Copyright (c) The Carpentries and, where + practical, linking to ), provide a [link to the + license][cc-by-human], and indicate if changes were made. You may do so in + any reasonable manner, but not in any way that suggests the licensor endorses + you or your use. + +- **No additional restrictions**---You may not apply legal terms or + technological measures that legally restrict others from doing anything the + license permits. With the understanding that: + +Notices: + +* You do not have to comply with the license for elements of the material in + the public domain or where your use is permitted by an applicable exception + or limitation. +* No warranties are given. The license may not give you all of the permissions + necessary for your intended use. For example, other rights such as publicity, + privacy, or moral rights may limit how you use the material. + +## Software + +Except where otherwise noted, the example programs and other software provided +by The Carpentries are made available under the [OSI][osi]-approved [MIT +license][mit-license]. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Trademark + +"The Carpentries", "Software Carpentry", "Data Carpentry", and "Library +Carpentry" and their respective logos are registered trademarks of [Community +Initiatives][ci]. + +[cc-by-human]: https://creativecommons.org/licenses/by/4.0/ +[cc-by-legal]: https://creativecommons.org/licenses/by/4.0/legalcode +[mit-license]: https://opensource.org/licenses/mit-license.html +[ci]: https://communityin.org/ +[osi]: https://opensource.org diff --git a/README.md b/README.md index 9a01c1b..1971fc6 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ +> **ATTENTION** This is an experimental test of [The Carpentries Workbench](https://carpentries.github.io/workbench) lesson infrastructure. +> It was automatically converted from the source lesson via [the lesson transition script](https://github.com/carpentries/lesson-transition/). +> +> If anything seems off, please contact Zhian Kamvar [zkamvar@carpentries.org](mailto:zkamvar@carpentries.org) + [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3954067.svg)](https://doi.org/10.5281/zenodo.3954067) # Library Carpentry @@ -6,9 +11,9 @@ The Library Carpentry module '[Tidy data for librarians](https://librarycarpentr ## Background -Library Carpentry is a software skills training programme aimed at library and information professions. It builds on the work of [Software Carpentry](http://software-carpentry.org/) and [Data Carpentry](http://www.datacarpentry.org/). +Library Carpentry is a software skills training programme aimed at library and information professions. It builds on the work of [Software Carpentry](https://software-carpentry.org/) and [Data Carpentry](https://www.datacarpentry.org/). -Library Carpentry is in the commons and for the commons. It is not tied to any institution of person. For more information on Library Carpentry, see our website [librarycarpentry.github.io](http://librarycarpentry.github.io/). +Library Carpentry is in the commons and for the commons. It is not tied to any institution of person. For more information on Library Carpentry, see our website [librarycarpentry.github.io](https://librarycarpentry.github.io/). ## Contribution @@ -18,16 +23,18 @@ Library Carpentry is in the commons and for the commons. It is not tied to any i ## Code of Conduct -All participants should agree to abide by the [Software Carpentry Code of Conduct](http://software-carpentry.org/conduct/). +All participants should agree to abide by the [Software Carpentry Code of Conduct](https://software-carpentry.org/conduct/). ## Authors Library Carpentry is authored and maintained by the [community](https://github.com/LibraryCarpentry/lc-spreadsheets/network/members). -This module in particular is heavily based on the [Data Carpentry Spreadsheets for Ecology](http://www.datacarpentry.org/spreadsheet-ecology-lesson/) lesson maintained by Aleksandra Pawlik and Tracy Teal, with contributions from Christie Bahlai, Aleksandra Pawlik, Jennifer Bryan, Alexander Duryee, Jeffrey Hollister, Daisie Huang, Owen Jones, Ben Marwick, and Tracy Teal. +This module in particular is heavily based on the [Data Carpentry Spreadsheets for Ecology](https://www.datacarpentry.org/spreadsheet-ecology-lesson/) lesson maintained by Aleksandra Pawlik and Tracy Teal, with contributions from Christie Bahlai, Aleksandra Pawlik, Jennifer Bryan, Alexander Duryee, Jeffrey Hollister, Daisie Huang, Owen Jones, Ben Marwick, and Tracy Teal. ## Citation Please cite as: ->Sherry Lake, Tim Dennis, Jez Cope, Francois Michonneau, Christopher Erdmann, erikamias, … yvonnemery. (2020, July). LibraryCarpentry/lc-spreadsheets: LibraryCarpentry/lc-spreadsheets: Library Carpentry: Tidy data for Librarians, July 2020 (Version v2020.07.1). Zenodo. http://doi.org/10.5281/zenodo.3954067 +> Sherry Lake, Tim Dennis, Jez Cope, Francois Michonneau, Christopher Erdmann, erikamias, … yvonnemery. (2020, July). LibraryCarpentry/lc-spreadsheets: LibraryCarpentry/lc-spreadsheets: Library Carpentry: Tidy data for Librarians, July 2020 (Version v2020.07.1). Zenodo. [http://doi.org/10.5281/zenodo.3954067](https://doi.org/10.5281/zenodo.3954067) + + diff --git a/_extras/guide.md b/_extras/guide.md deleted file mode 100644 index 1e963fe..0000000 --- a/_extras/guide.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -layout: page -title: "Instructor Notes" ---- - -## Instructor notes - -### Lesson purpose - -The purpose of this lesson is not to teach how to do data analysis in spreadsheets, -but to teach good data organization and how to do some data cleaning and -quality control in a spreadsheet program. - -### Narrative - -#### [Introduction](../00-intro) - -* Introduce that we're teaching data organization, and that we're using -spreadsheets, because most people do data entry in spreadsheets or -have data in spreadsheets. -* Emphasize that we are teaching good practice in data organization and that -this is the foundation of their research practice. Without organized and clean -data, it will be difficult for them to apply the things we're teaching in the -rest of the workshop to their data. -* Much of their lives as a researcher will be spent on this 'data wrangling' stage, but -some of it can be prevented with good strategies for data collection up front. -* Tell that we're not teaching data analysis or plotting in spreadsheets, because it's -very manual and also not reproducible. That's why we're teaching SQL, R, Python! -* Now let's talk about spreadsheets, and when we say spreadsheets, we mean any program that -does spreadsheets like Excel, LibreOffice, OpenOffice. Most learners are probably using Excel. -* Ask the audience any things they've accidentally done in spreadsheets. Talk about an example of your own, like that you accidentally sorted only a single column and not the rest -of the data in the spreadsheet. What are the pain points!? -* As people answer highlight some of these issues with spreadsheets - -#### [Formatting data](../01-format-data) - -* Go through the point about keeping track of your steps and keeping raw data raw -* Go through the cardinal rule of spreadsheets about columns, rows and cells -* Hand them a messy data file and have them pair up and work together to clean up the data. -*Give them 15 minutes to do this.* -* Ask for what people did to clean the data. As they bring up different points you can -refer to them in the 02-common-mistakes.md file, or expand a bit on the point they brought up. -If you are just teaching the lesson, it would be good to familiarize yourself with -the set of mistakes in 02-common-mistakes. All these mistakes are present in the messy -dataset. -* If you get a response where they've fixed the date, you can pause and go to the -03-dates-as-data.md lesson. Or you can say you'll come back to dates at the end. -There's an exercise in that file about how to change the -date into three columns using Excel's built in MONTH, DAY, YEAR functions. Have them -run through that exercise. - -#### [Common formatting problems](../02-common-mistakes) - -* **Don't go through this chapter** except to refer to as responses to the exercise in -the previous chapter. - -#### [Dates as data](../03-dates-as-data) - -* Do the exercise and make the point about dates either in response to a learner bringing -up date as an issue during the responses, or at the end of the response time. - -#### [Quality control](../04-quality-control) -*This lesson is optional* - -The challenge with this lesson is that the instructor's version of the spreadsheet software is going to look different than about half the room's. It makes -it challenging to show where you can find menu options and navigate through. - -Instead discuss the concepts of quality control, and how things like sorting can help you find outliers in your data. - -#### [Exporting data](../05-exporting-data) - -* Have the students export their cleaned data as `.csv`. Reiterate again the need for -data in this format for the other tools we'll be using. - -#### [Data Format Caveats](../06-data-formats-caveats) -*This lesson is for reference* - -* This is mainly here as a reference if people have questions about different file formats. -You don't need to go through this. - -#### Concluding points - -* Now your data is organized so that a computer can read and understand it. This -let's you use the full power of the computer for your analyses as we'll see in the -rest of the workshop. -* While your data is now neatly organized, it still might have errors or missing data -or other problems. It's like you put all your data in the right drawers, but the -drawers might still be messy. The next lesson is going to teach you OpenRefine which -is great for data cleaning and for some of the quality control that we touched on -in this lesson. It also has the advantage that it automatically keeps track of the -steps you take. - -## Technical tips and tricks - -Provide information on setting up your environment for learners to view your -live coding (increasing text size, changing text color, etc), as well as -general recommendations for working with coding tools to best suit the -learning environment. - -### Potential issues - -#### Excel looks and acts different on different operating systems - -The main challenge with this lesson is that Excel looks very different and how you -do things is even different between Mac and PC, and between different versions of -Excel. So, the presenter's environment will only be the same as some of the learners. - -We need better notes and screenshots of how things work on both Mac and PC. But we -likely won't be able to cover all the different versions of Excel. - -If you have a helper who has experience with the other OS than you, it would be good -to prep them to help with this lesson and tell how people to do things in the other OS. - -#### People are not interactive or responsive on the Exercise - -This lesson depends on people working on the exercise and responding with things -that are fixed. If your audience is reluctant to participate, start out with -some things on your own, or ask a helper for their answers. This generally gets -even a reluctant audience started. diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..efc2f0b --- /dev/null +++ b/config.yaml @@ -0,0 +1,88 @@ +#------------------------------------------------------------ +# Values for this lesson. +#------------------------------------------------------------ + +# Which carpentry is this (swc, dc, lc, or cp)? +# swc: Software Carpentry +# dc: Data Carpentry +# lc: Library Carpentry +# cp: Carpentries (to use for instructor training for instance) +# incubator: The Carpentries Incubator +carpentry: 'lc' + +# Overall title for pages. +title: 'Tidy data for librarians' + +# Date the lesson was created (YYYY-MM-DD, this is empty by default) +created: + +# Comma-separated list of keywords for the lesson +keywords: 'software, data, lesson, The Carpentries' + +# Life cycle stage of the lesson +# possible values: pre-alpha, alpha, beta, stable +life_cycle: 'stable' + +# License of the lesson materials (recommended CC-BY 4.0) +license: 'CC-BY 4.0' + +# Link to the source repository for this lesson +source: 'https://github.com/fishtree-attempt/lc-spreadsheets/' + +# Default branch of your lesson +branch: 'main' + +# Who to contact if there are any issues +contact: 'team@carpentries.org' + +# Navigation ------------------------------------------------ +# +# Use the following menu items to specify the order of +# individual pages in each dropdown section. Leave blank to +# include all pages in the folder. +# +# Example ------------- +# +# episodes: +# - introduction.md +# - first-steps.md +# +# learners: +# - setup.md +# +# instructors: +# - instructor-notes.md +# +# profiles: +# - one-learner.md +# - another-learner.md + +# Order of episodes in your lesson +episodes: +- 00-intro.md +- 01-format-data.md +- 02-common-mistakes.md +- 03-dates-as-data.md +- 04-quality-control.md +- 05-exporting-data.md +- 06-data-formats-caveats.md + +# Information for Learners +learners: + +# Information for Instructors +instructors: + +# Learner Profiles +profiles: + +# Customisation --------------------------------------------- +# +# This space below is where custom yaml items (e.g. pinning +# sandpaper and varnish versions) should live + + +url: https://preview.carpentries.org/lc-spreadsheets +analytics: carpentries +lang: en +workbench-beta: 'true' diff --git a/episodes/00-intro.md b/episodes/00-intro.md index 6b1d38c..7e69ff1 100644 --- a/episodes/00-intro.md +++ b/episodes/00-intro.md @@ -2,31 +2,35 @@ title: Using spreadsheet programs for data organization teaching: 10 exercises: 5 -questions: - - What are good data practices for using spreadsheets for organizing data? -objectives: - - Understanding some drawbacks and advantages of using spreadsheet programs - - Distinguish machine readable tidy data from data that is easy to read for humans -keypoints: - - We will discuss good practices for data entry and formatting - - We will not discuss analysis or visualisation - authors: - - Jez Cope - - Christie Bahlai - - Aleksandra Pawlik +- Jez Cope +- Christie Bahlai +- Aleksandra Pawlik contributors: - - Jennifer Bryan - - Alexander Duryee - - Jeffrey Hollister - - Daisie Huang - - Owen Jones - - Clare Sloggett - - Harriet Dashnow - - Ben Marwick - - Sherry Lake +- Jennifer Bryan +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Clare Sloggett +- Harriet Dashnow +- Ben Marwick +- Sherry Lake --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Understanding some drawbacks and advantages of using spreadsheet programs +- Distinguish machine readable tidy data from data that is easy to read for humans + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- What are good data practices for using spreadsheets for organizing data? + +:::::::::::::::::::::::::::::::::::::::::::::::::: + Good **data organization** is the foundation of much of our day-to-day work in libraries. Most **librarians** have data or do data entry in spreadsheets. Spreadsheet programs are very **useful graphical @@ -42,7 +46,7 @@ to be able to do as librarians. We can use them for: - Statistics - Plotting ---- +*** ### Spreadsheet outline @@ -54,14 +58,12 @@ In this lesson, we will look at: - Basic quality control and data manipulation in spreadsheets - Exporting data from spreadsheets - **Much of your time when you're producing a report will be spent in this 'data wrangling' stage.** It's not the most fun, but it's necessary. We'll teach you how to think about data organization and some practices for more effective data wrangling. - ---- +*** ### What this lesson will not teach you @@ -72,7 +74,7 @@ some practices for more effective data wrangling. If you're looking to do this, a good reference is [Head First Excel by O'Reilly Media](https://www.amazon.com/Head-First-Excel-learners-spreadsheets/dp/0596807694). ---- +*** ### Why aren't we teaching data analysis in spreadsheets @@ -85,7 +87,6 @@ If you're looking to do this, a good reference is analyses** done in spreadsheet programs when you want to go back to your work or someone asks for details of your analysis. - ### Spreadsheet programs There are a number of spreadsheet programs available for use on a desktop or web browser: @@ -101,24 +102,31 @@ Commands may differ a bit between programs, but the general idea is the same. In this lesson, we will assume that you are most likely using Excel as your primary spreadsheet program. There are others with similar functionality, including Gnumeric, OpenOffice Calc, and Google Sheets, but Excel is the package you're most likely to have available on your work computer. ---- +*** -> ## Questions: -> -> - How many people have used spreadsheets in their work? -> - What kind of operations do you do in spreadsheets? -> - Which ones do you think spreadsheets are good for? -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge +## Questions: ---- +- How many people have used spreadsheets in their work? +- What kind of operations do you do in spreadsheets? +- Which ones do you think spreadsheets are good for? + -> ## Question -> -> - Spreadsheets can be very useful, but they can also be frustrating and even sometimes give us incorrect results. What are some things that you've accidentally done in a spreadsheet, or have been frustrated that you can't do easily? -{: .challenge} +:::::::::::::::::::::::::::::::::::::::::::::::::: ---- +*** + +::::::::::::::::::::::::::::::::::::::: challenge + +## Question + +- Spreadsheets can be very useful, but they can also be frustrating and even sometimes give us incorrect results. What are some things that you've accidentally done in a spreadsheet, or have been frustrated that you can't do easily? + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +*** ## Problems with Spreadsheets @@ -128,38 +136,46 @@ to create data tables for publications, to generate summary statistics, and make figures. Generating **tables for reports** in a spreadsheet is not optimal - -often, when formatting a data table for publication, we’re reporting +often, when formatting a data table for publication, we're reporting key summary statistics in a way that is **not really meant to be read as data**, and often involves **special formatting** (merging cells, creating borders, making it pretty). We advise you to do this sort of operation within your document editing software. -The latter two applications, **generating statistics and figures**, should -be used with caution: because of the graphical, drag and drop nature of -spreadsheet programs, it can be very difficult, if not impossible, to -replicate your steps (much less retrace anyone else's), particularly if your -stats or figures require you to do more complex calculations. Furthermore, -in doing calculations in a spreadsheet, it’s easy to accidentally apply a -slightly different formula to multiple adjacent cells. When using a -command-line based statistics program like R or SAS, it’s practically -impossible to accidentally apply a calculation to one observation in your -dataset but not another unless you’re doing it on purpose. +The latter two applications, **generating statistics and figures**, should +be used with caution: because of the graphical, drag and drop nature of +spreadsheet programs, it can be very difficult, if not impossible, to +replicate your steps (much less retrace anyone else's), particularly if your +stats or figures require you to do more complex calculations. Furthermore, +in doing calculations in a spreadsheet, it's easy to accidentally apply a +slightly different formula to multiple adjacent cells. When using a +command-line based statistics program like R or SAS, it's practically +impossible to accidentally apply a calculation to one observation in your +dataset but not another unless you're doing it on purpose. ### Using Spreadsheets for Data Entry and Cleaning **HOWEVER**, there are circumstances where you might want to use a -spreadsheet program to produce “quick and dirty” calculations or +spreadsheet program to produce "quick and dirty" calculations or figures, and some of these features can be used in **data cleaning**, prior to importation into a statistical analysis program. We will show you how to use some features of spreadsheet programs to check your data quality along the way and produce preliminary summary statistics. - In this lesson, we're going to talk about: -1. [Formatting data tables in spreadsheets](../01-format-data) -2. [Formatting problems](../02-common-mistakes) -3. [Dates as data](../03-dates-as-data) -4. [Basic quality control and data manipulation in spreadsheets](../04-quality-control) -5. [Exporting data from spreadsheets](../05-exporting-data) -6. [Data export formats caveats](../06-data-formats-caveats) +1. [Formatting data tables in spreadsheets](01-format-data.md) +2. [Formatting problems](02-common-mistakes.md) +3. [Dates as data](03-dates-as-data.md) +4. [Basic quality control and data manipulation in spreadsheets](04-quality-control.md) +5. [Exporting data from spreadsheets](05-exporting-data.md) +6. [Data export formats caveats](06-data-formats-caveats.md) + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- We will discuss good practices for data entry and formatting +- We will not discuss analysis or visualisation + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/01-format-data.md b/episodes/01-format-data.md index fa495d2..404bd0e 100644 --- a/episodes/01-format-data.md +++ b/episodes/01-format-data.md @@ -2,34 +2,33 @@ title: Formatting data tables in Spreadsheets teaching: 10 exercises: 20 -questions: - - "How should data be formatted in spreadsheets?" -objectives: - - "Describe best practices for data entry and formatting in spreadsheets." - - "Apply best practices to arrange variables and observations in a spreadsheet." - -keypoints: - - Use one column for one variable - - Use one row for one observation - - Use one cell for one value - - Never modify your raw data. Always make a copy before making any changes. - - Keep all of the steps you take to clean your data in a plain text file. - authors: - - Jez Cope - - Christie Bahlai - - Aleksandra Pawlik - - Sherry Lake +- Jez Cope +- Christie Bahlai +- Aleksandra Pawlik +- Sherry Lake contributors: - - Jennifer Bryan - - Alexander Duryee - - Jeffrey Hollister - - Daisie Huang - - Owen Jones - - Ben Marwick - - Sebastian Kupny +- Jennifer Bryan +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Ben Marwick +- Sebastian Kupny --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Describe best practices for data entry and formatting in spreadsheets. +- Apply best practices to arrange variables and observations in a spreadsheet. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How should data be formatted in spreadsheets? + +:::::::::::::::::::::::::::::::::::::::::::::::::: The most common mistake made is treating the program like it is a notebook by relying on context, notes in the margin, spatial layout of data and fields to convey information. As humans, @@ -45,19 +44,23 @@ but to use that power, we have to set up our data for the computer to be able to understand it (and computers are very literal). -This is why it’s extremely important to set up well-formatted tables from the outset **before** you even start collecting data to analyse. +This is why it's extremely important to set up well-formatted tables from the outset **before** you even start collecting data to analyse. **Data organization is the foundation of your data-related work.** Unorganized data can make it harder to work with your data, so you should be mindful of your data organization when doing your data entry. You'll want to organize your data in a way that allows other programs and people to easily understand and use the data. -> ## Callout -> -> **Note:** the best layouts/formats (as well as software and -> interfaces) for **data entry** and **data analysis** might be -> different. It is important to take this into account, and ideally -> automate the conversion from one to another. -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Callout + +**Note:** the best layouts/formats (as well as software and +interfaces) for **data entry** and **data analysis** might be +different. It is important to take this into account, and ideally +automate the conversion from one to another. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: ### Keeping track of your analyses @@ -74,40 +77,38 @@ you **must:** You should track these steps as a scientist would each step in an experiment. You can do this in another text file, or a good option is to create a new tab in your spreadsheet with your notes. This way - the notes and data stay together. Be sure you're saving your spreadsheet + the notes and data stay together. Be sure you're saving your spreadsheet with a file format compatible with multiple tabs, if you do this! This might be an example of a spreadsheet setup: -![spreadsheet setup](../fig/spreadsheet-setup.png) +![](fig/spreadsheet-setup.png){alt='spreadsheet setup'} We will put these principles into practice today during your exercises. - ### Structuring data in spreadsheets - The cardinal rules of using spreadsheet programs for data: 1. Put all your **variables in columns** - the thing you're measuring, - like 'length' or 'attendance'. + like 'length' or 'attendance'. 2. Put each **observation in its own row**. 3. **Don't combine multiple pieces of information in one - cell**. Sometimes it just seems like one thing, but think if that's - the only way you'll want to be able to use or sort that data. + cell**. Sometimes it just seems like one thing, but think if that's + the only way you'll want to be able to use or sort that data. 4. **Leave the raw data raw** - don't mess with it! 5. Export the cleaned data to a **text based format** like CSV. This - ensures that anyone can use the data, and is the format required by - most data repositories. + ensures that anyone can use the data, and is the format required by + most data repositories. For instance, we have data from attendance and instruction for previous -research data management workshops. Different people have entered data into a -single spreadsheet. They keep track of things like date, number of attendees, and +research data management workshops. Different people have entered data into a +single spreadsheet. They keep track of things like date, number of attendees, and who delivered the workshop. If they were to keep track of the data like this: -![multiple-info example](../fig/multiple-info.png) +![](fig/multiple-info.png){alt='multiple-info example'} the problem is that the number of attendees of different types (post-graduate researcher (PGR), post-doctoral research associate (PDRA), and other) are in @@ -123,41 +124,63 @@ variables**, **rows = observations**, **cells = data** (values). So, instead we should have: -![single-info example](../fig/single-info.png) +![](fig/single-info.png){alt='single-info example'} + +::::::::::::::::::::::::::::::::::::::: challenge + +## Exercise -> ## Exercise -> -> We're going to take a messy version of some library training data and clean it up. -> -> 1. First [download the data](../data/training_attendance.xlsx) -> 2. Open up the data in a spreadsheet program. -> 1. You can see that there are three tabs. Various people have recorded +We're going to take a messy version of some library training data and clean it up. + +1. First [download the data](data/training_attendance.xlsx) +2. Open up the data in a spreadsheet program. +3. You can see that there are three tabs. Various people have recorded training attendance statistics over 2016 and 2017, and they have kept track of the data in their own way. Now you're being asked to evaluate the training programme and you want to be able to start doing statistics with the data. -> 1. With the person next to you, work on the messy data so that a +4. With the person next to you, work on the messy data so that a computer will be able to understand it. Clean up the 2016 and 2017 tabs, and put them all together in one spreadsheet. -> 1. After you go through this exercise, we'll discuss as a group what you think was wrong -with this data and how you fixed it. -{: .challenge} +5. After you go through this exercise, we'll discuss as a group what you think was wrong + with this data and how you fixed it. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +::::::::::::::::::::::::::::::::::::::::: callout + +## Important +Do not forget of our first piece of advice: +**create a new file** for the cleaned data, and **never +modify the original (raw) data**. -> ## Important -> -> Do not forget of our first piece of advice: -> **create a new file** for the cleaned data, and **never -> modify the original (raw) data**. -{: .callout} +:::::::::::::::::::::::::::::::::::::::::::::::::: An excellent reference, in particular with regard to R scripting is -> ## Resource -> -> Hadley Wickham, *Tidy Data*, Vol. 59, Issue 10, Sep 2014, Journal of -> Statistical Software. [http://www.jstatsoft.org/v59/i10](http://www.jstatsoft.org/v59/i10). -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Resource + +Hadley Wickham, *Tidy Data*, Vol. 59, Issue 10, Sep 2014, Journal of +Statistical Software. [http://www.jstatsoft.org/v59/i10](https://www.jstatsoft.org/v59/i10). + + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Use one column for one variable +- Use one row for one observation +- Use one cell for one value +- Never modify your raw data. Always make a copy before making any changes. +- Keep all of the steps you take to clean your data in a plain text file. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/02-common-mistakes.md b/episodes/02-common-mistakes.md index d2fa509..3acb2a5 100644 --- a/episodes/02-common-mistakes.md +++ b/episodes/02-common-mistakes.md @@ -2,36 +2,31 @@ title: Formatting problems teaching: 20 exercises: 0 -questions: - - What common mistakes are made when formatting spreadsheets? -objectives: - - Recognize and resolve common spreadsheet formatting problems. -keypoints: - - "Don't use multiple tables in one sheet" - - "Don't use multiple tabs in a file" - - Fill in zero when you mean zero - - Use an appropriate null value to record missing data - - "Don't use formatting to convey information or make the spreadsheet look pretty" - - "Don't put units or comments in cells" - - "Don't combine several values in one cell" - - "Take care over column names" - - "Avoid including special characters in your data file" - - "Put metadata (units, legends etc.) in a separate file" - authors: - - Jez Cope - - Christie Bahlai - - Aleksandra Pawlik - - Sherry Lake +- Jez Cope +- Christie Bahlai +- Aleksandra Pawlik +- Sherry Lake contributors: - - Jennifer Bryan - - Alexander Duryee - - Jeffrey Hollister - - Daisie Huang - - Owen Jones - - Ben Marwick +- Jennifer Bryan +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Ben Marwick --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Recognize and resolve common spreadsheet formatting problems. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- What common mistakes are made when formatting spreadsheets? + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Common Spreadsheet Errors @@ -46,34 +41,33 @@ contributors: - [Field name problems](#field_name) - [Special characters in data](#special) - [Inclusion of metadata in data table](#metadata) -- [Date formatting]({{ site.baseurl }}{% link _episodes/03-dates-as-data.md %}) +- [Date formatting](03-dates-as-data.md) ---- +*** ## Multiple tables {#tables} A common strategy is creating multiple data tables within one spreadsheet. **This confuses the computer, so don't do this!** When -you create multiple tables within one spreadsheet, you’re drawing +you create multiple tables within one spreadsheet, you're drawing false associations between things for the computer, which sees each -row as an observation. You’re also potentially using the same field +row as an observation. You're also potentially using the same field name in multiple places, which will make it harder to clean your data up into a usable form. The example below depicts the problem: -![Screengrab of spreadsheet showing formatting errors - multiple tables in one sheet](../fig/2_Multiple_Tables.png) - +![](fig/2_Multiple_Tables.png){alt='Screengrab of spreadsheet showing formatting errors - multiple tables in one sheet'} ## Multiple tabs {#tabs} But what about worksheet tabs? That seems like an easy way to organize data, right? Well, yes and no. When you create extra tabs, you fail to allow the computer to see connections in the data that are there (you have to introduce spreadsheet application-specific functions or scripting to ensure this connection). Say, for instance, you make a separate tab for each year. This is bad practice for two reasons: -**1)** you are more likely to accidentally add inconsistencies to your data if each time you take a measurement, you start recording data in a new tab, and -**2)** even if you manage to prevent all inconsistencies from creeping in, you will add an extra step for yourself before you analyze the data because you will have to combine these data into a single datatable. You will have to explicitly tell the computer how to combine tabs - and if the tabs are inconsistently formatted, you might even have to do it by hand! +**1\)** you are more likely to accidentally add inconsistencies to your data if each time you take a measurement, you start recording data in a new tab, and +**2\)** even if you manage to prevent all inconsistencies from creeping in, you will add an extra step for yourself before you analyze the data because you will have to combine these data into a single datatable. You will have to explicitly tell the computer how to combine tabs - and if the tabs are inconsistently formatted, you might even have to do it by hand! -The next time you’re entering data, and you go to create another tab or table, I want you to ask yourself “Self, could I avoid adding this tab by adding another column to my original spreadsheet?” +The next time you're entering data, and you go to create another tab or table, I want you to ask yourself "Self, could I avoid adding this tab by adding another column to my original spreadsheet?" -Your data sheet might get very long over the course of recording data. This makes it harder to enter data if you can’t see your headers at the top of the spreadsheet. But do NOT repeat headers. These can easily get mixed into the data, leading to problems down the road. +Your data sheet might get very long over the course of recording data. This makes it harder to enter data if you can't see your headers at the top of the spreadsheet. But do NOT repeat headers. These can easily get mixed into the data, leading to problems down the road. Instead you can Freeze the column headers. @@ -83,94 +77,85 @@ Instead you can Freeze the column headers. [Documentation on how to freeze column headers in Google Sheets](https://support.google.com/docs/answer/9060449?co=GENIE.Platform%3DDesktop&hl=en) - - ## Not filling in zeroes {#zeros} It might be that when you're measuring something, it's usually a zero, say the number of participants at a training event. Why bother writing in the number zero in that column, when it's mostly zeros? - - However, there's a difference between a zero and a blank cell in a spreadsheet. To the computer, a zero is actually data. You measured or counted it. A blank cell means that it wasn't measured and the computer will interpret it as a null value. The spreadsheets or statistical programs will likely mis-interpret blank cells that are meant to be zero. This is equivalent to leaving out data. Zero observations are real data! Leaving zero data blank is not good in a written format, but NEVER okay when you move your data into a digital format. - ## Using bad null values {#null} **Example**: using -999, other numerical values, zero, or text to represent missing values. -Whatever the reason, it’s a problem if unknown or missing data is recorded as -999, 999, or 0. -Many statistical programs will not recognize that these are intended to represent missing (null) values. -How these values are interpreted will depend on the software you use to analyze your data. +Whatever the reason, it's a problem if unknown or missing data is recorded as -999, 999, or 0. +Many statistical programs will not recognize that these are intended to represent missing (null) values. +How these values are interpreted will depend on the software you use to analyze your data. -**Solution**: A solution will depend on the final application of your data and how you intend to analyse it, -but it is essential to use a clearly defined and CONSISTENT null indicator. Blank cells are the best choices for most applications; +**Solution**: A solution will depend on the final application of your data and how you intend to analyse it, +but it is essential to use a clearly defined and CONSISTENT null indicator. Blank cells are the best choices for most applications; when working in R, `NA` may be an acceptable null value choice. -There are many reasons that null values may be represented differently within a dataset. Sometimes confusing null values are automatically recorded from the measuring device. -In that case, there’s not much you can do, but such inconsistencies can often be addressed in data cleaning -with a tool like [OpenRefine](https://librarycarpentry.org/lc-open-refine/) before using or sharing the data. -In other cases, null values may convey different reasons why the data is missing. -It may be useful to capture these reasons, but if you use the same column it is effectively storing two pieces of information in one column. +There are many reasons that null values may be represented differently within a dataset. Sometimes confusing null values are automatically recorded from the measuring device. +In that case, there's not much you can do, but such inconsistencies can often be addressed in data cleaning +with a tool like [OpenRefine](https://librarycarpentry.org/lc-open-refine/) before using or sharing the data. +In other cases, null values may convey different reasons why the data is missing. +It may be useful to capture these reasons, but if you use the same column it is effectively storing two pieces of information in one column. In such a case, it would be good to create a new column like `data_missing`, then to use that column to capture the reasons for missing data. -| Null Values | Problems | Compatibility | Recommendation | -|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------|----------------| -| 0 | Indistinguishable from a true zero | | NEVER use | -| Blank | Hard to distinguish values that are missing from those overlooked on entry. Hard to distinguish blanks from spaces, which behave differently. | R, Python, SQL, Excel | Best option | -| -999, 999 | Not recognized as null by many programs without user input. Can be inadvertently entered into calculations. | | Avoid | -| NA, na | Can also be an abbreviation (e.g., North America), can cause problems with data type (turn a numerical column into a text column). NA is more commonly recognized than na. | R | Good option | -| N/A | An alternate form of NA, but often not compatible with software. | | Avoid | -| NULL | Can cause problems with data type. | SQL | Good option | -| None | Uncommon. Can cause problems with data type. | Python | Avoid | -| No data | Uncommon. Can cause problems with data type, contains a space. | | Avoid | -| Missing | Uncommon. Can cause problems with data type. | | Avoid | -| -, +, . | Uncommon. Can cause problems with data type. | | Avoid | - -Choices for representing null values, as illustrated in the table above, are proposed and explained further by White and others in their article: [Nine simple ways to make it easier to (re)use your data.](https://ojs.library.queensu.ca/index.php/IEE/article/view/4608) Ideas in Ecology and Evolution 6 (2013): 1-10. DOI: [10.4033/iee.2013.6b.6.f](http://10.4033/iee.2013.6b.6.f) +| Null Values | Problems | Compatibility | Recommendation | +| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -------------- | +| 0 | Indistinguishable from a true zero | | NEVER use | +| Blank | Hard to distinguish values that are missing from those overlooked on entry. Hard to distinguish blanks from spaces, which behave differently. | R, Python, SQL, Excel | Best option | +| \-999, 999 | Not recognized as null by many programs without user input. Can be inadvertently entered into calculations. | | Avoid | +| NA, na | Can also be an abbreviation (e.g., North America), can cause problems with data type (turn a numerical column into a text column). NA is more commonly recognized than na. | R | Good option | +| N/A | An alternate form of NA, but often not compatible with software. | | Avoid | +| NULL | Can cause problems with data type. | SQL | Good option | +| None | Uncommon. Can cause problems with data type. | Python | Avoid | +| No data | Uncommon. Can cause problems with data type, contains a space. | | Avoid | +| Missing | Uncommon. Can cause problems with data type. | | Avoid | +| \-, +, . | Uncommon. Can cause problems with data type. | | Avoid | + +Choices for representing null values, as illustrated in the table above, are proposed and explained further by White and others in their article: [Nine simple ways to make it easier to (re)use your data.](https://ojs.library.queensu.ca/index.php/IEE/article/view/4608) Ideas in Ecology and Evolution 6 (2013): 1-10. DOI: [10\.4033/iee.2013.6b.6.f](https://10.4033/iee.2013.6b.6.f) ## Using formatting to convey information {#formatting} **Example**: highlighting cells, rows or columns that should be excluded from an analysis, leaving blank rows to indicate separations in data. -![formatting](../fig/formatting.png) +![](fig/formatting.png){alt='formatting'} **Solution**: create a new field to encode which data should be excluded. -![good formatting](../fig/good_formatting.png) - +![](fig/good_formatting.png){alt='good formatting'} -## Using formatting to make the data sheet look pretty {#formatting_pretty} +## Using formatting to make the data sheet look pretty {#formatting\_pretty} **Example**: merging cells. -**Solution**: If you’re not careful, formatting a worksheet to be more aesthetically pleasing can compromise your computer’s ability to see associations in the data. Merged cells are an absolute formatting NO-NO if you want to make your data readable by statistics software. Consider restructuring your data in such a way that you will not need to merge cells to organize your data. - +**Solution**: If you're not careful, formatting a worksheet to be more aesthetically pleasing can compromise your computer's ability to see associations in the data. Merged cells are an absolute formatting NO-NO if you want to make your data readable by statistics software. Consider restructuring your data in such a way that you will not need to merge cells to organize your data. ## Placing comments or units in cells {#units} **Example**: Your data was collected, in part, by a summer student who you later found out was mis-recording the duration of training sessions, some of the time. You want a way to note these data are suspect. -**Solution**: Most statistical programs can’t see Excel’s comments, and would be confused by comments placed within your data cells. As described above for formatting, create another field if you need to add notes to cells. Similarly, don’t include units in cells (such as "hours","min"): ideally, all the units or measurements you place in one column should be of the same standard, but if for some reason they aren’t, insert another column and specify the units. - +**Solution**: Most statistical programs can't see Excel's comments, and would be confused by comments placed within your data cells. As described above for formatting, create another field if you need to add notes to cells. Similarly, don't include units in cells (such as "hours","min"): ideally, all the units or measurements you place in one column should be of the same standard, but if for some reason they aren't, insert another column and specify the units. ## More than one piece of information in a cell {#info} -**Example**: -One table recorded attendance by the different types of attendees. This table recorded number of attendees of different types: post-graduate researcher (PGR), post-doctoral research associate (PDRA), and other. +**Example**: +One table recorded attendance by the different types of attendees. This table recorded number of attendees of different types: post-graduate researcher (PGR), post-doctoral research associate (PDRA), and other. -**Solution**: +**Solution**: Never include more than one piece of information in a cell. Design your data sheet to include a column for each type of attendee, if this information is important to collect, rather than just a total number. +## Field name problems {#field\_name} - -## Field name problems {#field_name} -Choose descriptive field names, but be careful not to include: spaces, numbers, or special characters of any kind. Spaces can be misinterpreted by parsers that use whitespace as delimiters and some programs don’t like field names that are text strings that start with numbers. +Choose descriptive field names, but be careful not to include: spaces, numbers, or special characters of any kind. Spaces can be misinterpreted by parsers that use whitespace as delimiters and some programs don't like field names that are text strings that start with numbers. Underscores (`_`) are a good alternative to spaces and consider writing names in camel-case to improve readability. Remember that abbreviations that make sense at the moment may not be so obvious in 6 months but don't overdo it with names that are excessively long. Including the units in the field names avoids confusion and enables others to readily interpret your fields. -**Examples** +**Examples** | Good Name | Good Alternative | Avoid | |--------------------+--------------------+----------------------| @@ -182,7 +167,6 @@ Underscores (`_`) are a good alternative to spaces and consider writing names in | cell\_type | CellType | Cell Type | | Observation\_01 | first\_observation | 1st Obs | - ## Special characters in data {#special} **Example**: You treat Excel as a word processor when writing notes, even copying data directly from Word or other applications. @@ -191,9 +175,25 @@ Underscores (`_`) are a good alternative to spaces and consider writing names in General best practice is to avoid adding characters such as newlines, tabs, and vertical tabs. In other words, treat a text cell as if it were a simple web form that can only contain text and spaces. - ## Inclusion of metadata in data table {#metadata} **Example**: You add a legend at the top or bottom of your data table explaining column meaning, units, exceptions, etc. **Solution**: While recording data about your data ("metadata") is essential, this information should not be contained in the data file itself. Unlike a table in a paper or a supplemental file, metadata (in the form of legends) should not be included in a data file since this information is not data, and including it can disrupt how computer programs interpret your data file. Rather, metadata should be stored as a separate file in the same directory as your data file, preferably in plain text format with a name that clearly associates it with your data file. Because metadata files are free text format, they also allow you to encode comments, units, information about how null values are encoded, etc. that are important to document but can disrupt the formatting of your data file. + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Don't use multiple tables in one sheet +- Don't use multiple tabs in a file +- Fill in zero when you mean zero +- Use an appropriate null value to record missing data +- Don't use formatting to convey information or make the spreadsheet look pretty +- Don't put units or comments in cells +- Don't combine several values in one cell +- Take care over column names +- Avoid including special characters in your data file +- Put metadata (units, legends etc.) in a separate file + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/03-dates-as-data.md b/episodes/03-dates-as-data.md index 2796b1e..524b827 100644 --- a/episodes/03-dates-as-data.md +++ b/episodes/03-dates-as-data.md @@ -2,31 +2,34 @@ title: Dates as data teaching: 20 exercises: 5 -questions: - - 'How are dates handled by computers?' -objectives: - - Describe how dates are stored and formatted in spreadsheets. - - Describe the advantages of alternative date formatting in spreadsheets. - - Demonstrate best practices for entering dates in spreadsheets. -keypoints: - - Excel is notoriously bad at handling dates. - - Treating dates as multiple pieces of data rather than one makes them easier to handle and exchange between programs. - authors: - - Jez Cope - - Christie Bahlai - - Aleksandra Pawlik +- Jez Cope +- Christie Bahlai +- Aleksandra Pawlik contributors: - - Jennifer Bryan - - Angel Corpuz - - Alexander Duryee - - Jeffrey Hollister - - Daisie Huang - - Owen Jones - - Ben Marwick - - Sherry Lake +- Jennifer Bryan +- Angel Corpuz +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Ben Marwick +- Sherry Lake --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Describe how dates are stored and formatted in spreadsheets. +- Describe the advantages of alternative date formatting in spreadsheets. +- Demonstrate best practices for entering dates in spreadsheets. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How are dates handled by computers? + +:::::::::::::::::::::::::::::::::::::::::::::::::: Dates in spreadsheets are often stored in one column. Whilst this seems the most natural way to enter dates, it actually is not a good @@ -39,20 +42,24 @@ This can cause problems if the date displayed does not fully represent the infor Spreadsheet applications employ numerous features that facilitate the processing and display of date information. While these features often make date information more easily readable, the underlying data handling techniques can create data ambiguity in a variety of ways. The figure below illustrates some of the ways that the display of information representing the same date can vary. Column A is the information as entered by a user, and the following columns show different ways that the information may be displayed. -![Many formats, many ambiguities](../fig/5_excel_dates_1.jpg) +![](fig/5_excel_dates_1.jpg){alt="Many formats, many ambiguities"} -> ## How can these features create data ambiguity? -> -> Ideally, data should be as unambiguous as possible. -> -> * What do you notice about the display of the date information above? What information changes between the columns? -> * What aspects of the display lack specificity and may introduce ambiguity? -{: .discussion} +:::::::::::::::::::::::::::::::::::::: discussion + +## How can these features create data ambiguity? + +Ideally, data should be as unambiguous as possible. + +- What do you notice about the display of the date information above? What information changes between the columns? +- What aspects of the display lack specificity and may introduce ambiguity? + + +:::::::::::::::::::::::::::::::::::::::::::::::::: ### Displaying dates The figure above shows that ambiguity may creep into your data in numerous ways depending on the format you chose when you entered your data. -If you’re not fully aware of these ambiguities, you may find that Excel will interpret your data in unexpected ways later. +If you're not fully aware of these ambiguities, you may find that Excel will interpret your data in unexpected ways later. The display format of each cell can be modified. To change the display in Excel, navigate to the Format menu and choose "Cells...". In the "Format Cells" dialog box, you can select a Date format and choose various display outputs (some are shown in the above figure). In the dialog box, you can also choose to format the cell as a number or text. It may be useful to format the cell as one of these other datatypes, since as we will discuss next, the spreadsheet program understands the date information as a number. @@ -66,17 +73,15 @@ the above functions, you can easily add days, months or years to a given date. Say you had a sampling plan where you needed to sample every thirty seven days. In another cell, you could input the following: -~~~ +```source =B2+37 -~~~ -{: .source} +``` This would display: -~~~ +```output 8-Aug -~~~ -{: .output} +``` This happens because Excel processes the date July 2, 2014 as the number `41822`. Adding `41822 + 37` results in `41859` @@ -85,16 +90,20 @@ part) of the cell that is being operated upon (unless you did some sort of formatting to the cell before, and then all bets are off). Month and year rollovers are internally tracked and applied. -> ## Working with historical dates (before 1900) -> -> Excel is unable to parse dates from before 1899-12-31, and will thus leave these untouched. If you’re mixing historic data -> from before and after this date, Excel will translate only the post-1900 dates into its internal format, thus resulting in mixed data. -> If you’re working with historic data, be extremely careful with your dates! -> -> Excel also entertains a second date system, the 1904 date system, as the default in Excel for Macintosh. This system will assign a -> different serial number than the [1900 date system](https://support.microsoft.com/en-us/help/214330/differences-between-the-1900-and-the-1904-date-system-in-excel). Because of this, -> [dates must be checked for accuracy when exporting data from Excel](http://uc3.cdlib.org/2014/04/09/abandon-all-hope-ye-who-enter-dates-in-excel/) (look for dates that are about 4 years off). -{: .callout} +::::::::::::::::::::::::::::::::::::::::: callout + +## Working with historical dates (before 1900) + +Excel is unable to parse dates from before 1899-12-31, and will thus leave these untouched. If you're mixing historic data +from before and after this date, Excel will translate only the post-1900 dates into its internal format, thus resulting in mixed data. +If you're working with historic data, be extremely careful with your dates! + +Excel also entertains a second date system, the 1904 date system, as the default in Excel for Macintosh. This system will assign a +different serial number than the [1900 date system](https://support.microsoft.com/en-us/help/214330/differences-between-the-1900-and-the-1904-date-system-in-excel). Because of this, +[dates must be checked for accuracy when exporting data from Excel](https://uc3.cdlib.org/2014/04/09/abandon-all-hope-ye-who-enter-dates-in-excel/) (look for dates that are about 4 years off). + + +:::::::::::::::::::::::::::::::::::::::::::::::::: ## Useful spreadsheet functions for working with date information @@ -105,35 +114,43 @@ Gnumeric, etc.) are usually guaranteed to be compatible only within the same family of products.** So, if you will later need to export the data and need to conserve the timestamps you should consider recording date information using one of the solutions discussed below. -If a date is entered in one column, we can use functions to extract information from that column into other columns. For example, it can be useful to display the specific information about the year, month, and day. Conversely, these functions can convert supplied numerical values from numbers into dates. Date-related functions allow us to convert date values from the stored numerical value to a readable display value, make calculations between date values, and also to extract the date values so that they do not change as data is transformed or exchanged between new users and systems. +If a date is entered in one column, we can use functions to extract information from that column into other columns. For example, it can be useful to display the specific information about the year, month, and day. Conversely, these functions can convert supplied numerical values from numbers into dates. Date-related functions allow us to convert date values from the stored numerical value to a readable display value, make calculations between date values, and also to extract the date values so that they do not change as data is transformed or exchanged between new users and systems. The table below outlines a few useful date-related functions and how they differ between some of the widely used spreadsheet applications. -Action of function | Excel | LibreOffice | OpenOffice ---- | --- | --- | --- | -Return the year number represented in the referenced cell value | ```YEAR()``` | ```YEAR()``` | | -Return the month number represented in the referenced date serial number | ```MONTH()``` | ```MONTH()``` | | -Return the day of the month represented in the referenced date serial number | ```DAY()``` | ```DAY()``` | | -Calculate and display a date based on supplied year, month, and day values | ```DATE(Year, Month, Day)``` | ```DATE(Year; Month; Day)``` | | -Return the serial number for date information supplied as a string | ```DATEVALUE()``` | ```DATEVALUE("Text")``` | | -Change display of a number by applying specified formatting | ```TEXT(Value, "Formatting code to apply")``` | ```TEXT(Value; "Formatting to apply")``` | | -Return the current system date | ```NOW()``` | ```NOW()``` | | - -> ## Using Date-Related Functions (Excel) -> -> Pulling month, day, and year out of dates: -> -> - In the `Dates` tab of your Excel file we summarized training data from 2015. There's a `date` column. -> - Extract month, day and year from the date to three new columns. -> -> Tip: Make sure the new column is formatted as a number and not as a date. Change the function to correspond to each row: i.e., =MONTH(A3), =DAY(A3), =YEAR(A3) for the next row. -> -> > ## Solution -> > -> > You can see that even though you wanted the year to be 2015 for all entries, your spreadsheet program interpreted two entries as 2017, the year the data was entered, not the year of the workshop. -> > ![dates, exersize 1](../fig/3_Dates_as_Columns.png) -> {: .solution} -{: .challenge} +| Action of function | Excel | LibreOffice | OpenOffice | +| ---------------------------------------------------------------------------- | ----- | ----------- | ---------- | +| Return the year number represented in the referenced cell value | `YEAR()` | `YEAR()` | | +| Return the month number represented in the referenced date serial number | `MONTH()` | `MONTH()` | | +| Return the day of the month represented in the referenced date serial number | `DAY()` | `DAY()` | | +| Calculate and display a date based on supplied year, month, and day values | `DATE(Year, Month, Day)` | `DATE(Year; Month; Day)` | | +| Return the serial number for date information supplied as a string | `DATEVALUE()` | `DATEVALUE("Text")` | | +| Change display of a number by applying specified formatting | `TEXT(Value, "Formatting code to apply")` | `TEXT(Value; "Formatting to apply")` | | +| Return the current system date | `NOW()` | `NOW()` | | + +::::::::::::::::::::::::::::::::::::::: challenge + +## Using Date-Related Functions (Excel) + +Pulling month, day, and year out of dates: + +- In the `Dates` tab of your Excel file we summarized training data from 2015. There's a `date` column. +- Extract month, day and year from the date to three new columns. + +Tip: Make sure the new column is formatted as a number and not as a date. Change the function to correspond to each row: i.e., =MONTH(A3), =DAY(A3), =YEAR(A3) for the next row. + +::::::::::::::: solution + +## Solution + +You can see that even though you wanted the year to be 2015 for all entries, your spreadsheet program interpreted two entries as 2017, the year the data was entered, not the year of the workshop. +![](fig/3_Dates_as_Columns.png){alt="dates, exersize 1"} + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ### Adding dates @@ -150,22 +167,30 @@ that quantities are added to the correct entities. ## Advantages of Alternative Date Formatting -The display ambiguities discussed above can lead to unintended changes or unknown errors in your data. Exchanging data between applications or converting data into different formats can also create unexpected changes. Similar issues also cause challenges for data interoperability, sharing and reuse, and long-term preservation. Alternative date formats can help to address these issues, and we will learn about three below. First, however, let's explore the challenges further by seeing what happens to date information when converted between different formats. - -> ## Exchanging Date Information between Formats -> -> What happens to the dates in the `dates` tab of our workbook if we save this sheet in Excel (in `csv` format) and then open the file in a plain text editor (like TextEdit or Notepad)? What happens to the dates if we then open the `csv` file in Excel? -> -> > ## Solution -> > -> > - Click to the `dates` tab of the workbook and double-click on any of the values in the `Date collected` column. Notice that most of the dates display with the year 2015 and two are 2017. -> > - Select `File -> Save As` in Excel and in the drop down menu for file format select `CSV UTF-8 (Comma delimited) (.csv)`. Click `Save`. -> > - You will see a pop-up that says 'This workbook cannot be saved in the selected file format because it contains multiple sheets.' Choose `Save Active Sheet`. -> > - Navigate to the file in Finder (Mac) or Explorer (Windows). Right click and select `Open With`. Choose a plain text editor application and view the file. Notice that the dates display as month/day without any year information. -> > - Now right click on the file again and open with Excel. Notice that the dates display with the current year, not 2015. -> > As you can see, exporting data from Excel and then importing it back into Excel fundamentally changed the data once again! -> {: .solution} -{: .challenge} +The display ambiguities discussed above can lead to unintended changes or unknown errors in your data. Exchanging data between applications or converting data into different formats can also create unexpected changes. Similar issues also cause challenges for data interoperability, sharing and reuse, and long-term preservation. Alternative date formats can help to address these issues, and we will learn about three below. First, however, let's explore the challenges further by seeing what happens to date information when converted between different formats. + +::::::::::::::::::::::::::::::::::::::: challenge + +## Exchanging Date Information between Formats + +What happens to the dates in the `dates` tab of our workbook if we save this sheet in Excel (in `csv` format) and then open the file in a plain text editor (like TextEdit or Notepad)? What happens to the dates if we then open the `csv` file in Excel? + +::::::::::::::: solution + +## Solution + +- Click to the `dates` tab of the workbook and double-click on any of the values in the `Date collected` column. Notice that most of the dates display with the year 2015 and two are 2017. +- Select `File -> Save As` in Excel and in the drop down menu for file format select `CSV UTF-8 (Comma delimited) (.csv)`. Click `Save`. +- You will see a pop-up that says 'This workbook cannot be saved in the selected file format because it contains multiple sheets.' Choose `Save Active Sheet`. +- Navigate to the file in Finder (Mac) or Explorer (Windows). Right click and select `Open With`. Choose a plain text editor application and view the file. Notice that the dates display as month/day without any year information. +- Now right click on the file again and open with Excel. Notice that the dates display with the current year, not 2015. + As you can see, exporting data from Excel and then importing it back into Excel fundamentally changed the data once again! + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: ### Storing dates as YEAR, MONTH, DAY {#day} @@ -173,7 +198,7 @@ Storing dates in YEAR, MONTH, DAY format helps remove this ambiguity. Let's look For instance, in a spreadsheet recording insect counts every few days in July 2001, the data displayed as shown below in Column A. Note that the data was recorded in only one cell in each row, and the data only included reference to the month and day (`-`). -![So, so ambiguous, it's even confusing Excel](../fig/6_excel_dates_2.jpg) +![](fig/6_excel_dates_2.jpg){alt="So, so ambiguous, it's even confusing Excel"} When interpreted in Excel, it appears that the observations had been recorded in 2010, 2014, 2015 and 2017 even though our records state that the data was gathered in 2001. @@ -189,9 +214,9 @@ question, this might be what's useful to you, and there is practically no possib Statistical models often incorporate year as a factor, to account for year-to-year variation, and DOY can be used to measure the passage of time within a year. -So, can you convert all your dates into DOY format? Well, in Excel, here’s a handy dandy guide: +So, can you convert all your dates into DOY format? Well, in Excel, here's a handy dandy guide: -![Kill that ambiguity before it bites you!](../fig/7_excel_dates_3.jpg) +![](fig/7_excel_dates_3.jpg){alt="Kill that ambiguity before it bites you!"} ### Storing dates and times as a single string {#str} @@ -224,10 +249,19 @@ and generally make your data table more accessible and interoperable. Working with dates and day of year information can be unfamiliar since it does not match how we typically see dates on a calendar by the day of a month. These resources provide useful information for calculating the day of the year: -* The Earth Systems Research Lab provides this calendar that displays day of year information for any year you select: [https://www.esrl.noaa.gov/gmd/grad/neubrew/Calendar.jsp](https://www.esrl.noaa.gov/gmd/grad/neubrew/Calendar.jsp) -* The U.S. National Snow and Ice Data Center provides a useful chart to calculate the day of year: [https://nsidc.org/support/faq/day-year-doy-calendar](https://nsidc.org/support/faq/day-year-doy-calendar) +- The Earth Systems Research Lab provides this calendar that displays day of year information for any year you select: [https://www.esrl.noaa.gov/gmd/grad/neubrew/Calendar.jsp](https://www.esrl.noaa.gov/gmd/grad/neubrew/Calendar.jsp) +- The U.S. National Snow and Ice Data Center provides a useful chart to calculate the day of year: [https://nsidc.org/support/faq/day-year-doy-calendar](https://nsidc.org/support/faq/day-year-doy-calendar) Most spreadsheet applications offer more detailed information about working with date and time information. Some of these are referenced below: -* Microsoft Excel [date and time functions reference](https://support.microsoft.com/en-us/office/date-and-time-functions-reference-fd1b5961-c1ae-4677-be58-074152f97b81) -* LibreOffice Date & Time Functions Reference [v. 6.2](https://help.libreoffice.org/6.2/en-US/text/scalc/01/04060102.html) \ No newline at end of file +- Microsoft Excel [date and time functions reference](https://support.microsoft.com/en-us/office/date-and-time-functions-reference-fd1b5961-c1ae-4677-be58-074152f97b81) +- LibreOffice Date \& Time Functions Reference [v. 6.2](https://help.libreoffice.org/6.2/en-US/text/scalc/01/04060102.html) + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Excel is notoriously bad at handling dates. +- Treating dates as multiple pieces of data rather than one makes them easier to handle and exchange between programs. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/04-quality-control.md b/episodes/04-quality-control.md index dc5f74b..8a1e71b 100644 --- a/episodes/04-quality-control.md +++ b/episodes/04-quality-control.md @@ -2,30 +2,33 @@ title: Basic quality assurance and control teaching: 20 exercises: 10 -questions: - - "How can you keep data entry clean?" -objectives: - - "Apply quality assurance techniques to limit incorrect data entry." - - "Apply quality control techniques to identify errors in spreadsheets." -keypoints: - - "Use data validation tools to minimise the possibility of input errors." - - "Use sorting and conditional formatting to identify possible errors." - authors: - - Jez Cope - - Christie Bahlai - - Aleksandra Pawlik +- Jez Cope +- Christie Bahlai +- Aleksandra Pawlik contributors: - - Jennifer Bryan - - Alexander Duryee - - Jeffrey Hollister - - Daisie Huang - - Owen Jones - - Ben Marwick - - Ethan White - - Sherry Lake +- Jennifer Bryan +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Ben Marwick +- Ethan White +- Sherry Lake --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Apply quality assurance techniques to limit incorrect data entry. +- Apply quality control techniques to identify errors in spreadsheets. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- How can you keep data entry clean? + +:::::::::::::::::::::::::::::::::::::::::::::::::: When you have a well-structured data table, you can use several simple techniques within your spreadsheet to ensure the data you enter is @@ -34,7 +37,7 @@ implemented **prior to entering data** (**quality assurance**) and techniques that are used **after entering data to check for errors** (**quality control**). -# Quality Assurance +## Quality Assurance Quality assurance stops bad data from ever being entered by checking to see if values are valid during data entry. For example, if research is being conducted @@ -50,18 +53,18 @@ in each data column. 1. Select the cells or column you want to validate 2. On the `Data` tab select `Data Validation` - - ![Image of Data Validation button on Data tab](../fig/data_validation.png) + + ![](fig/data_validation.png){alt='Image of Data Validation button on Data tab'} 3. In the `Allow` box select the kind of data that should be in the - column. Options include whole numbers, decimals, lists of items, dates, and - other values. - - ![Image of Data Validation window](../fig/data_validation_window.png) + column. Options include whole numbers, decimals, lists of items, dates, and + other values. + + ![](fig/data_validation_window.png){alt='Image of Data Validation window'} 4. After selecting an item enter any additional details. For example if you've - chosen a list of values then enter a comma-delimited list of allowable - values in the `Source` box. + chosen a list of values then enter a comma-delimited list of allowable + values in the `Source` box. We can't have half a person attending a workshop, so let's try this out by setting the `num_registered` column in our spreadsheet to only @@ -72,22 +75,21 @@ allow whole numbers between 1 and 100. 3. In the `Allow` box select `Whole number` 4. Set the minimum and maximum values to 1 and 100. -![Image of Data Validation window for validating plot values](../fig/4_data-validation-whole-num.png) +![](fig/4_data-validation-whole-num.png){alt='Image of Data Validation window for validating plot values'} Now let's try entering a new value in the `num_registered` column that isn't a valid class size. The spreadsheet stops us from entering the wrong value and asks us if we would like to try again. -![Image of error when trying to enter invalid data](../fig/4_data-validation-alert.png) +![](fig/4_data-validation-alert.png){alt='Image of error when trying to enter invalid data'} You can customize the resulting message to be more informative by entering your own message in the `Error Alert` tab, and you can edit the `Style` -for when a non-valid value is entered, by not allowing other values or just give a warning about non valid entries. - -![Image of Error Alert tab](../fig/4_data-validation-error-msg.png) +for when a non-valid value is entered, by not allowing other values or just give a warning about non valid entries. +![](fig/4_data-validation-error-msg.png){alt='Image of Error Alert tab'} to display a (pop up) message about the correct values for a column with Data Validation set, use the `Input Message` tab. -![Image of Input Message tab](../fig/4_data-validation-input-message.png) +![](fig/4_data-validation-input-message.png){alt='Image of Input Message tab'} Quality assurance can make data entry easier as well as more robust. For example, if you use a list of options to restrict data entry, the spreadsheet @@ -95,48 +97,59 @@ will provide you with a drop-downlist of the available items. So, instead of trying to remember the workshop title abbreviation, you can just select the right option from the list. -![Image of drop-down menu](../fig/4_data-validation-auto-complete.png) +![](fig/4_data-validation-auto-complete.png){alt='Image of drop-down menu'} + +## Quality Control + +::::::::::::::::::::::::::::::::::::::::: callout -# Quality Control +### Tip! -> ## Tip! -> -> Before doing any quality control operations, save your original file with the formulas and a name indicating it is the original data. Create a **separate file** with appropriate naming and versioning, and ensure your data is stored as **values** and not as **formulas**. Because formulas refer to other cells, and you may be moving cells around, you may compromise the integrity of your data if you do not take this step! -{: .callout} +Before doing any quality control operations, save your original file with the formulas and a name indicating it is the original data. Create a **separate file** with appropriate naming and versioning, and ensure your data is stored as **values** and not as **formulas**. Because formulas refer to other cells, and you may be moving cells around, you may compromise the integrity of your data if you do not take this step! -**readme (README) files:** As you start manipulating your data files, create a readme document / text file to keep track of your files and document your manipulations so that they may be easily understood and replicated, either by your future self or by an independent researcher. Your readme file should document all of the files in your data set (including documentation), describe their content and format, and lay out the organizing principles of folders and subfolders. For each of the separate files listed, it is a good idea to document the manipulations or analyses that were carried out on those data. [Cornell University’s Research Data Management Service Group](https://data.research.cornell.edu/content/readme) provides detailed guidelines for how to write a good readMe file, along with an adaptable template. -## Sorting +:::::::::::::::::::::::::::::::::::::::::::::::::: + +**readme (README) files:** As you start manipulating your data files, create a readme document / text file to keep track of your files and document your manipulations so that they may be easily understood and replicated, either by your future self or by an independent researcher. Your readme file should document all of the files in your data set (including documentation), describe their content and format, and lay out the organizing principles of folders and subfolders. For each of the separate files listed, it is a good idea to document the manipulations or analyses that were carried out on those data. [Cornell University's Research Data Management Service Group](https://data.research.cornell.edu/content/readme) provides detailed guidelines for how to write a good readMe file, along with an adaptable template. + +### Sorting **Bad values often sort to bottom or top of the column**. For example, if your data should be numeric, then alphabetical and null data will group at the ends of the sorted data. Sort your data by each field, one at a time. Scan through each column, but pay the most attention to the top and the bottom of a column. If your dataset is well-structured and does not contain formulas, sorting should never affect the integrity of your dataset. -> ## Exercise -> -> Let's try this with the *Dates* tab in our messy spreadsheet. Go to that tab. Select -> **Data** then select **Sort** -> -> Sort by `len_hours` in the order *Largest to Smallest* -> -> - When you do this sort, do you notice anything strange? -> -> - Try sorting by other columns. Anything strange there? -> ->> ## Solution ->> ->> Click the Sort button on the Data tab in Excel. A pop-up will appear. ->> ->> The following window will display, choose the column you want to sort as well as the sort order. ->> ->> ![Figure of Sorting menu](../fig/4-sort-len_hours.png) ->> ->> Note how the odd values sort to the top. The cells containing “min” or "hour" are found towards the top. Larger values like 90, 60 and 15 also are sorted so you can evaluate them. This is a powerful way to check your data for outliers and odd values. ->> ![Sorted data](../fig/4-sorted-len_hours.png) -> {: .solution} -{: .challenge} - - -## Conditional formatting ## +::::::::::::::::::::::::::::::::::::::: challenge + +### Exercise + +Let's try this with the *Dates* tab in our messy spreadsheet. Go to that tab. Select +**Data** then select **Sort** + +Sort by `len_hours` in the order *Largest to Smallest* + +- When you do this sort, do you notice anything strange? + +- Try sorting by other columns. Anything strange there? + +::::::::::::::: solution + +### Solution + +Click the Sort button on the Data tab in Excel. A pop-up will appear. + +The following window will display, choose the column you want to sort as well as the sort order. + +![](fig/4-sort-len_hours.png){alt='Figure of Sorting menu'} + +Note how the odd values sort to the top. The cells containing "min" or "hour" are found towards the top. Larger values like 90, 60 and 15 also are sorted so you can evaluate them. This is a powerful way to check your data for outliers and odd values. +![](fig/4-sorted-len_hours.png){alt='Sorted data'} + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +### Conditional formatting Use with caution! But a great way to flag inconsistent values when entering data. @@ -144,16 +157,34 @@ Conditional formatting basically can do something like color code your values by criteria or from lowest to highest. This makes it easy to scan your data for outliers. It is nice to be able to do these scans in spreadsheets, but we also can do these checks in a programming language like Python or R, or in OpenRefine or SQL. -> ## Exercise -> -> 1. Make sure the `num_attended` column is highlighted. -> 1. Go to **Format** then **Conditional Formatting**. -> 1. Apply any 2-Color Scale formatting rule. -> 1. Now we can scan through and different colors will stand out. Do you notice any strange values? -> ->> ## Solution ->> We can see two outlier cells of 0 and can see these two classes were canceled. ->>![Conditional Formatting](../fig/4_conditional-formatting.png) ->>{: .output} -> {: .solution} -{: .challenge} +::::::::::::::::::::::::::::::::::::::: challenge + +### Exercise + +1. Make sure the `num_attended` column is highlighted. +2. Go to **Format** then **Conditional Formatting**. +3. Apply any 2-Color Scale formatting rule. +4. Now we can scan through and different colors will stand out. Do you notice any strange values? + +::::::::::::::: solution + +### Solution + +We can see two outlier cells of 0 and can see these two classes were canceled. +![](fig/4_conditional-formatting.png) +{alt='Conditional Formatting' .output} + + + +::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Use data validation tools to minimise the possibility of input errors. +- Use sorting and conditional formatting to identify possible errors. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/05-exporting-data.md b/episodes/05-exporting-data.md index 476373f..d237049 100644 --- a/episodes/05-exporting-data.md +++ b/episodes/05-exporting-data.md @@ -2,40 +2,43 @@ title: Exporting data from spreadsheets teaching: 10 exercises: 0 -questions: - - What problems are there with Excel files? - - How can we share data from spreadsheets that is useful for a variety of applications? -objectives: - - Store spreadsheet data in universal file formats. - - Export data from a spreadsheet to a .csv file. -keypoints: - - Use .csv file format for data storage and processing - authors: - # - Jez Cope - - Christie Bahlai - - Aleksandra Pawlik +- Christie Bahlai +- Aleksandra Pawlik contributors: - - Jennifer Bryan - - Alexander Duryee - - Jeffrey Hollister - - Daisie Huang - - Owen Jones - - Ben Marwick - - Sherry Lake +- Jennifer Bryan +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Ben Marwick +- Sherry Lake --- +::::::::::::::::::::::::::::::::::::::: objectives + +- Store spreadsheet data in universal file formats. +- Export data from a spreadsheet to a .csv file. + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- What problems are there with Excel files? +- How can we share data from spreadsheets that is useful for a variety of applications? + +:::::::::::::::::::::::::::::::::::::::::::::::::: Storing the data you're going to work with for your analyses in Excel default file format (`*.xls` or `*.xlsx` - depending on the Excel version) is a **bad idea**. Why? - Because it is a **proprietary format**, and it is possible that in - the future, technology won’t exist (or will become sufficiently + the future, technology won't exist (or will become sufficiently rare) to make it inconvenient, if not impossible, to open the file. - - Think about zipdisks. How many old theses in your lab are “backed - up” and stored on zipdisks? Ever wanted to pull out the raw data + + Think about zipdisks. How many old theses in your lab are "backed + up" and stored on zipdisks? Ever wanted to pull out the raw data from one of those? *Exactly.* - **Other spreadsheet software** may not be able to open files @@ -45,13 +48,16 @@ version) is a **bad idea**. Why? differently, leading to inconsistencies. + + + -As an example, do you remember how we talked about how Excel stores **dates** earlier? Turns out there are **multiple defaults for different versions of the software**. And you can switch between them all willy-nilly. So, say you’re compiling Excel-stored data from multiple sources. There’s dates in each file- Excel interprets them as their own internally consistent serial numbers. When you combine the data, Excel will take the serial number from the place you’re importing it from, and interpret it using the rule set for the version of Excel you’re using. Essentially, you could be adding a huge error to your data, and it wouldn’t necessarily be flagged by any data cleaning methods if your ranges overlap. +As an example, do you remember how we talked about how Excel stores **dates** earlier? Turns out there are **multiple defaults for different versions of the software**. And you can switch between them all willy-nilly. So, say you're compiling Excel-stored data from multiple sources. There's dates in each file- Excel interprets them as their own internally consistent serial numbers. When you combine the data, Excel will take the serial number from the place you're importing it from, and interpret it using the rule set for the version of Excel you're using. Essentially, you could be adding a huge error to your data, and it wouldn't necessarily be flagged by any data cleaning methods if your ranges overlap. -Storing data in a **universal**, **open**, **static format** will help deal with this problem. Try **tab-delimited** or **CSV** (more common). CSV files are plain text files where the columns are separated by commas, hence 'comma separated variables' or CSV. The advantage of a CSV over an Excel/SPSS/etc. file is that we can open and read a CSV file using just about any software, including a simple **text editor**. Data in a CSV can also be **easily imported** into other formats and environments, such as SQLite and R. We're not tied to a certain version of a certain expensive program when we work with CSV, so it's a good format to work with for maximum portability and endurance. Most spreadsheet programs can save to delimited text formats like CSV easily, although they complain and make you feel like you’re doing something wrong along the way. +Storing data in a **universal**, **open**, **static format** will help deal with this problem. Try **tab-delimited** or **CSV** (more common). CSV files are plain text files where the columns are separated by commas, hence 'comma separated variables' or CSV. The advantage of a CSV over an Excel/SPSS/etc. file is that we can open and read a CSV file using just about any software, including a simple **text editor**. Data in a CSV can also be **easily imported** into other formats and environments, such as SQLite and R. We're not tied to a certain version of a certain expensive program when we work with CSV, so it's a good format to work with for maximum portability and endurance. Most spreadsheet programs can save to delimited text formats like CSV easily, although they complain and make you feel like you're doing something wrong along the way. To save a file you have opened in Excel in `*.csv` format: @@ -59,7 +65,7 @@ To save a file you have opened in Excel in `*.csv` format: 2. In the 'Format' field, from the list, select 'Comma Separated Values' (`*.csv`). 3. Double check the file name and the location where you want to save it and hit 'Save'. -![Saving an Excel file to CSV](../fig/excel-to-csv.png) +![](fig/excel-to-csv.png){alt='Saving an Excel file to CSV'} An important note for backwards compatibility: you can open CSVs in Excel! @@ -69,49 +75,50 @@ By default, most coding and statistical environments expect UNIX-style line endi As such, when exporting to CSV using Excel, your data in text format will look like this: -~~~ +``` data1,data21,24,5 -~~~ +``` When opening your CSV file in Excel again, it will parse it as follows: -![CR LF](../fig/NewLine_example.png) +![](fig/NewLine_example.png){alt='CR LF'} However, if you open your CSV file on a different system that does not parse the `CR` character it will interpret your CSV file differently: Your data in text format then look like this: -~~~ +``` data1,data2 1,2 … -~~~ +``` You will then see a weird character or possibly the string `CR` or `\r`: -![no CR LF](../fig/NewLine_example2.png) +![](fig/NewLine_example2.png){alt='no CR LF'} -thus causing terrible things to happen to your data. For example, `2\r` is not a valid integer, and thus will throw an error (if you’re lucky) when you attempt to operate on it in R or Python. Note that this happens on Excel for macOS as well as Windows, due to legacy Windows compatibility. +thus causing terrible things to happen to your data. For example, `2\r` is not a valid integer, and thus will throw an error (if you're lucky) when you attempt to operate on it in R or Python. Note that this happens on Excel for macOS as well as Windows, due to legacy Windows compatibility. There are a handful of solutions for enforcing uniform UNIX-style line endings on your exported CSVs: -1. When exporting from Excel, save as a “Windows comma separated (.csv)” file -2. If you store your data file under version control (which you should be doing!) using Git, edit the `.git/config` file in your repository to automatically translate `\r\n` line endings into `\n`. - Add the following to the file ([see the detailed tutorial](http://nicercode.github.io/blog/2013-04-30-excel-and-line-endings)): - - ``` - [filter "cr"] - clean = LC_CTYPE=C awk '{printf(\"%s\\n\", $0)}' | LC_CTYPE=C tr '\\r' '\\n' - smudge = tr '\\n' '\\r'` - ``` - - and then create a file `.gitattributes` that contains the line: +1. When exporting from Excel, save as a "Windows comma separated (.csv)" file - ``` - *.csv filter=cr - ``` - -3. Use [dos2unix](http://dos2unix.sourceforge.net/) (available on OSX, *nix, and Cygwin) on local files to standardize line endings. +2. If you store your data file under version control (which you should be doing!) using Git, edit the `.git/config` file in your repository to automatically translate `\r\n` line endings into `\n`. + Add the following to the file ([see the detailed tutorial](https://nicercode.github.io/blog/2013-04-30-excel-and-line-endings)): + + ``` + [filter "cr"] + clean = LC_CTYPE=C awk '{printf(\"%s\\n\", $0)}' | LC_CTYPE=C tr '\\r' '\\n' + smudge = tr '\\n' '\\r'` + ``` + + and then create a file `.gitattributes` that contains the line: + + ``` + *.csv filter=cr + ``` + +3. Use [dos2unix](https://dos2unix.sourceforge.net/) (available on OSX, \*nix, and Cygwin) on local files to standardize line endings. #### A note on Python and `xls` @@ -125,3 +132,11 @@ worksheets in the `xls` documents. additional complexity/dependencies in the data analysis Python code - **data formatting best practice STILL apply** - Is there really a good reason why `csv` (or similar) is not adequate? + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Use .csv file format for data storage and processing + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/episodes/06-data-formats-caveats.md b/episodes/06-data-formats-caveats.md index eb10146..aea1dc7 100644 --- a/episodes/06-data-formats-caveats.md +++ b/episodes/06-data-formats-caveats.md @@ -2,54 +2,68 @@ title: Caveats of popular data and file formats teaching: 5 exercises: 0 -questions: -- What do you need to be aware of when exporting data? -objectives: +--- + +::::::::::::::::::::::::::::::::::::::: objectives + - Identify problems with using the .csv file format. - Apply best practices for data cleaning to avoid problems with the .csv file format. -keypoints: -- Be careful when using commas in values ---- + +:::::::::::::::::::::::::::::::::::::::::::::::::: + +:::::::::::::::::::::::::::::::::::::::: questions + +- What do you need to be aware of when exporting data? + +:::::::::::::::::::::::::::::::::::::::::::::::::: -## Dealing with commas as part of data values in `*.csv` files ## +## Dealing with commas as part of data values in `*.csv` files -In the [previous lesson](../05-exporting-data) we discussed how to export Excel file formats into `*.csv`. Whilst Comma Separated Value files are indeed very useful allowing for easily exchanging and sharing data. +In the [previous lesson](05-exporting-data.md) we discussed how to export Excel file formats into `*.csv`. Whilst Comma Separated Value files are indeed very useful allowing for easily exchanging and sharing data. However, there are some significant problems with this particular format. Quite often the data values themselves may include commas (,). In that case, the software which you use (including Excel) will most likely incorrectly display the data in columns. It is because the commas which are a part of the data values will be interpreted as a delimiter. Data could look like this: -~~~ +``` date,type,len_hours,num_registered,num_attended,trainer,cancelled 29 Apr,OA,1.5,1.5,15,JM,N 3 Mar,OA,60,19,25,PG,N 3 Jul,OA,1,25,20,PG, JM ,N 4 Jan,OA,1,26,17,JM,N 29 Mar,RDM,1,27,24,JM,N -~~~ +``` In record `3 Jul,OA,1,25,20,PG, JM ,N` the value for *trainer* includes a comma for multiple trainers (`PG, JM`). If we try to read the above into Excel (or other spreadsheet programme), we will get something like this: -![Issue with importing csv format](../fig/csv-mistake.png) +![](fig/csv-mistake.png){alt='Issue with importing csv format'} The value for 'trainer' was split into two columns (instead of being put in one column `F`). This can propagate to a number of further errors. For example, the "extra" column will be interpreted as a column with many missing values (and without a proper header!). If you want to store your data in `csv` format and expect that your data values may contain commas, you can avoid the problem discussed above by putting the values to be included in the same column in quotes (""). Applying this rule, the data might look like this: -~~~ +``` date,type,len_hours,num_registered,num_attended,trainer,cancelled 29 Apr,OA,1.5,1.5,15,JM,N 3 Mar,OA,60,19,25,PG,N 3 Jul,OA,1,25,20,"PG, JM",N 4 Jan,OA,1,26,17,JM,N 29 Mar,RDM,1,27,24,JM,N -~~~ +``` Now opening this file as a `csv` in Excel will not lead to an extra column, because Excel will only use commas that fall outside of quotation marks as delimiting characters. However, if you are working with an already existing dataset in which the data values are not included in "" but which have commas as both delimiters and parts of data values, you are potentially facing a major problem with data cleaning. If the dataset you're dealing with contains hundreds or thousands of records, cleaning them up manually (by either removing commas from the data values or putting the values into quotes - "") is not only going to take hours and hours but may potentially end up with you accidentally introducing many errors. Cleaning up datasets is one of the major problems in many scientific disciplines. The approach almost always depends on the particular context. However, it is a good practice to clean the data in an automated fashion, for example by writing and running a script. The Python and R lessons will give you the basis for developing skills to build relevant scripts. + +:::::::::::::::::::::::::::::::::::::::: keypoints + +- Be careful when using commas in values + +:::::::::::::::::::::::::::::::::::::::::::::::::: + + diff --git a/data/training_attendance.xlsx b/episodes/data/training_attendance.xlsx similarity index 100% rename from data/training_attendance.xlsx rename to episodes/data/training_attendance.xlsx diff --git a/fig/1_helpful_clippy.jpg b/episodes/fig/1_helpful_clippy.jpg similarity index 100% rename from fig/1_helpful_clippy.jpg rename to episodes/fig/1_helpful_clippy.jpg diff --git a/fig/2_Multiple_Tables.png b/episodes/fig/2_Multiple_Tables.png similarity index 100% rename from fig/2_Multiple_Tables.png rename to episodes/fig/2_Multiple_Tables.png diff --git a/fig/2_datasheet_example.jpg b/episodes/fig/2_datasheet_example.jpg similarity index 100% rename from fig/2_datasheet_example.jpg rename to episodes/fig/2_datasheet_example.jpg diff --git a/fig/3_Dates_as_Columns.png b/episodes/fig/3_Dates_as_Columns.png similarity index 100% rename from fig/3_Dates_as_Columns.png rename to episodes/fig/3_Dates_as_Columns.png diff --git a/fig/3_white_table_1.jpg b/episodes/fig/3_white_table_1.jpg similarity index 100% rename from fig/3_white_table_1.jpg rename to episodes/fig/3_white_table_1.jpg diff --git a/fig/4-sort-len_hours.png b/episodes/fig/4-sort-len_hours.png similarity index 100% rename from fig/4-sort-len_hours.png rename to episodes/fig/4-sort-len_hours.png diff --git a/fig/4-sorted-len_hours.png b/episodes/fig/4-sorted-len_hours.png similarity index 100% rename from fig/4-sorted-len_hours.png rename to episodes/fig/4-sorted-len_hours.png diff --git a/fig/4_conditional-formatting.png b/episodes/fig/4_conditional-formatting.png similarity index 100% rename from fig/4_conditional-formatting.png rename to episodes/fig/4_conditional-formatting.png diff --git a/fig/4_data-validation-alert.png b/episodes/fig/4_data-validation-alert.png similarity index 100% rename from fig/4_data-validation-alert.png rename to episodes/fig/4_data-validation-alert.png diff --git a/fig/4_data-validation-auto-complete.png b/episodes/fig/4_data-validation-auto-complete.png similarity index 100% rename from fig/4_data-validation-auto-complete.png rename to episodes/fig/4_data-validation-auto-complete.png diff --git a/fig/4_data-validation-error-msg.png b/episodes/fig/4_data-validation-error-msg.png similarity index 100% rename from fig/4_data-validation-error-msg.png rename to episodes/fig/4_data-validation-error-msg.png diff --git a/fig/4_data-validation-input-message.png b/episodes/fig/4_data-validation-input-message.png similarity index 100% rename from fig/4_data-validation-input-message.png rename to episodes/fig/4_data-validation-input-message.png diff --git a/fig/4_data-validation-whole-num.png b/episodes/fig/4_data-validation-whole-num.png similarity index 100% rename from fig/4_data-validation-whole-num.png rename to episodes/fig/4_data-validation-whole-num.png diff --git a/fig/4_merged_cells.jpg b/episodes/fig/4_merged_cells.jpg similarity index 100% rename from fig/4_merged_cells.jpg rename to episodes/fig/4_merged_cells.jpg diff --git a/fig/5_excel_dates_1.jpg b/episodes/fig/5_excel_dates_1.jpg similarity index 100% rename from fig/5_excel_dates_1.jpg rename to episodes/fig/5_excel_dates_1.jpg diff --git a/fig/6_excel_dates_2.jpg b/episodes/fig/6_excel_dates_2.jpg similarity index 100% rename from fig/6_excel_dates_2.jpg rename to episodes/fig/6_excel_dates_2.jpg diff --git a/fig/7_excel_dates_3.jpg b/episodes/fig/7_excel_dates_3.jpg similarity index 100% rename from fig/7_excel_dates_3.jpg rename to episodes/fig/7_excel_dates_3.jpg diff --git a/fig/NewLine_example.png b/episodes/fig/NewLine_example.png similarity index 100% rename from fig/NewLine_example.png rename to episodes/fig/NewLine_example.png diff --git a/fig/NewLine_example2.png b/episodes/fig/NewLine_example2.png similarity index 100% rename from fig/NewLine_example2.png rename to episodes/fig/NewLine_example2.png diff --git a/fig/csv-mistake.png b/episodes/fig/csv-mistake.png similarity index 100% rename from fig/csv-mistake.png rename to episodes/fig/csv-mistake.png diff --git a/fig/data_validation.png b/episodes/fig/data_validation.png similarity index 100% rename from fig/data_validation.png rename to episodes/fig/data_validation.png diff --git a/fig/data_validation_window.png b/episodes/fig/data_validation_window.png similarity index 100% rename from fig/data_validation_window.png rename to episodes/fig/data_validation_window.png diff --git a/fig/drop_down_list.png b/episodes/fig/drop_down_list.png similarity index 100% rename from fig/drop_down_list.png rename to episodes/fig/drop_down_list.png diff --git a/fig/error_alert.png b/episodes/fig/error_alert.png similarity index 100% rename from fig/error_alert.png rename to episodes/fig/error_alert.png diff --git a/fig/excel-to-csv.png b/episodes/fig/excel-to-csv.png similarity index 100% rename from fig/excel-to-csv.png rename to episodes/fig/excel-to-csv.png diff --git a/fig/excel_tables_example.png b/episodes/fig/excel_tables_example.png similarity index 100% rename from fig/excel_tables_example.png rename to episodes/fig/excel_tables_example.png diff --git a/fig/excel_tables_example1.png b/episodes/fig/excel_tables_example1.png similarity index 100% rename from fig/excel_tables_example1.png rename to episodes/fig/excel_tables_example1.png diff --git a/fig/formatting.png b/episodes/fig/formatting.png similarity index 100% rename from fig/formatting.png rename to episodes/fig/formatting.png diff --git a/fig/good_formatting.png b/episodes/fig/good_formatting.png similarity index 100% rename from fig/good_formatting.png rename to episodes/fig/good_formatting.png diff --git a/fig/input_message.png b/episodes/fig/input_message.png similarity index 100% rename from fig/input_message.png rename to episodes/fig/input_message.png diff --git a/fig/invalid_value.png b/episodes/fig/invalid_value.png similarity index 100% rename from fig/invalid_value.png rename to episodes/fig/invalid_value.png diff --git a/fig/multiple-info.png b/episodes/fig/multiple-info.png similarity index 100% rename from fig/multiple-info.png rename to episodes/fig/multiple-info.png diff --git a/fig/plot_validation.png b/episodes/fig/plot_validation.png similarity index 100% rename from fig/plot_validation.png rename to episodes/fig/plot_validation.png diff --git a/fig/single-info.png b/episodes/fig/single-info.png similarity index 100% rename from fig/single-info.png rename to episodes/fig/single-info.png diff --git a/fig/solution_exercise_1_dates.png b/episodes/fig/solution_exercise_1_dates.png similarity index 100% rename from fig/solution_exercise_1_dates.png rename to episodes/fig/solution_exercise_1_dates.png diff --git a/fig/sort-date.png b/episodes/fig/sort-date.png similarity index 100% rename from fig/sort-date.png rename to episodes/fig/sort-date.png diff --git a/fig/sorting.png b/episodes/fig/sorting.png similarity index 100% rename from fig/sorting.png rename to episodes/fig/sorting.png diff --git a/fig/spreadsheet-setup.png b/episodes/fig/spreadsheet-setup.png similarity index 100% rename from fig/spreadsheet-setup.png rename to episodes/fig/spreadsheet-setup.png diff --git a/index.md b/index.md index 03757f0..9be736e 100644 --- a/index.md +++ b/index.md @@ -1,26 +1,47 @@ --- -layout: lesson -contributors: ["Jez Cope", "Christie Bahlai", "Aleksandra Pawlik", "Jennifer Bryan", "Alexander Duryee", "Jeffrey Hollister", "Daisie Huang", "Owen Jones", "Ben Marwick", "Tracy Teal"] -maintainers: - - Jez Cope +contributors: +- Jez Cope +- Christie Bahlai +- Aleksandra Pawlik +- Jennifer Bryan +- Alexander Duryee +- Jeffrey Hollister +- Daisie Huang +- Owen Jones +- Ben Marwick +- Tracy Teal +maintainers: Jez Cope domain: Library topic: Spreadsheets software: Spreadsheets dataurl: https://ndownloader.figshare.com/files/2252083 status: Teaching +site: sandpaper::sandpaper_site --- -[Library Carpentry]({{ site.lc_site }})'s aim is to teach researchers basic concepts, skills, and tools for working with data so that they can get more done in less time, and with less pain. The lessons below were designed for those interested in working with {{page.domain}} data in {{page.topic}}. +> **ATTENTION** This is an experimental test of [The Carpentries Workbench](https://carpentries.github.io/workbench) lesson infrastructure. +> It was automatically converted from the source lesson via [the lesson transition script](https://github.com/carpentries/lesson-transition/). +> +> If anything seems off, please contact Zhian Kamvar [zkamvar@carpentries.org](mailto:zkamvar@carpentries.org) -> ## Prerequisites -> -> Library Carpentry's teaching is hands-on, so participants are encouraged to use -> their own computers to insure the proper setup of tools for an efficient workflow. -> *These lessons assume no prior knowledge of the skills or tools*, but working -> through this lesson requires working copies of the software described below. -> To most effectively use these materials, please make sure to install everything -> *before* working through this lesson. -{: .prereq} +[Library Carpentry](https://librarycarpentry.org/)'s aim is to teach researchers basic concepts, skills, and tools for working with data so that they can get more done in less time, and with less pain. The lessons below were designed for those interested in working with {{page.domain}} data in {{page.topic}}. + +:::::::::::::::::::::::::::::::::::::::::: prereq + +## Prerequisites + +Library Carpentry's teaching is hands-on, so participants are encouraged to use +their own computers to insure the proper setup of tools for an efficient workflow. +*These lessons assume no prior knowledge of the skills or tools*, but working +through this lesson requires working copies of the software described below. +To most effectively use these materials, please make sure to install everything +*before* working through this lesson. + + +:::::::::::::::::::::::::::::::::::::::::::::::::: ### Getting Started + To get started, follow the directions in the "[Setup](https://librarycarpentry.org/lc-spreadsheets/setup.html)" tab to download data to your computer and follow any installation instructions. + + diff --git a/instructors/instructor-notes.md b/instructors/instructor-notes.md new file mode 100644 index 0000000..9a8d492 --- /dev/null +++ b/instructors/instructor-notes.md @@ -0,0 +1,122 @@ +--- +title: Instructor Notes +--- + +## Instructor notes + +### Lesson purpose + +The purpose of this lesson is not to teach how to do data analysis in spreadsheets, +but to teach good data organization and how to do some data cleaning and +quality control in a spreadsheet program. + +### Narrative + +#### [Introduction](../episodes/00-intro.md) + +- Introduce that we're teaching data organization, and that we're using + spreadsheets, because most people do data entry in spreadsheets or + have data in spreadsheets. +- Emphasize that we are teaching good practice in data organization and that + this is the foundation of their research practice. Without organized and clean + data, it will be difficult for them to apply the things we're teaching in the + rest of the workshop to their data. +- Much of their lives as a researcher will be spent on this 'data wrangling' stage, but + some of it can be prevented with good strategies for data collection up front. +- Tell that we're not teaching data analysis or plotting in spreadsheets, because it's + very manual and also not reproducible. That's why we're teaching SQL, R, Python! +- Now let's talk about spreadsheets, and when we say spreadsheets, we mean any program that + does spreadsheets like Excel, LibreOffice, OpenOffice. Most learners are probably using Excel. +- Ask the audience any things they've accidentally done in spreadsheets. Talk about an example of your own, like that you accidentally sorted only a single column and not the rest + of the data in the spreadsheet. What are the pain points!? +- As people answer highlight some of these issues with spreadsheets + +#### [Formatting data](../episodes/01-format-data.md) + +- Go through the point about keeping track of your steps and keeping raw data raw +- Go through the cardinal rule of spreadsheets about columns, rows and cells +- Hand them a messy data file and have them pair up and work together to clean up the data. + *Give them 15 minutes to do this.* +- Ask for what people did to clean the data. As they bring up different points you can + refer to them in the 02-common-mistakes.md file, or expand a bit on the point they brought up. + If you are just teaching the lesson, it would be good to familiarize yourself with + the set of mistakes in 02-common-mistakes. All these mistakes are present in the messy + dataset. +- If you get a response where they've fixed the date, you can pause and go to the + 03-dates-as-data.md lesson. Or you can say you'll come back to dates at the end. + There's an exercise in that file about how to change the + date into three columns using Excel's built in MONTH, DAY, YEAR functions. Have them + run through that exercise. + +#### [Common formatting problems](../episodes/02-common-mistakes.md) + +- **Don't go through this chapter** except to refer to as responses to the exercise in + the previous chapter. + +#### [Dates as data](../episodes/03-dates-as-data.md) + +- Do the exercise and make the point about dates either in response to a learner bringing + up date as an issue during the responses, or at the end of the response time. + +#### [Quality control](../episodes/04-quality-control.md) + +*This lesson is optional* + +The challenge with this lesson is that the instructor's version of the spreadsheet software is going to look different than about half the room's. It makes +it challenging to show where you can find menu options and navigate through. + +Instead discuss the concepts of quality control, and how things like sorting can help you find outliers in your data. + +#### [Exporting data](../episodes/05-exporting-data.md) + +- Have the students export their cleaned data as `.csv`. Reiterate again the need for + data in this format for the other tools we'll be using. + +#### [Data Format Caveats](../episodes/06-data-formats-caveats.md) + +*This lesson is for reference* + +- This is mainly here as a reference if people have questions about different file formats. + You don't need to go through this. + +#### Concluding points + +- Now your data is organized so that a computer can read and understand it. This + let's you use the full power of the computer for your analyses as we'll see in the + rest of the workshop. +- While your data is now neatly organized, it still might have errors or missing data + or other problems. It's like you put all your data in the right drawers, but the + drawers might still be messy. The next lesson is going to teach you OpenRefine which + is great for data cleaning and for some of the quality control that we touched on + in this lesson. It also has the advantage that it automatically keeps track of the + steps you take. + +## Technical tips and tricks + +Provide information on setting up your environment for learners to view your +live coding (increasing text size, changing text color, etc), as well as +general recommendations for working with coding tools to best suit the +learning environment. + +### Potential issues + +#### Excel looks and acts different on different operating systems + +The main challenge with this lesson is that Excel looks very different and how you +do things is even different between Mac and PC, and between different versions of +Excel. So, the presenter's environment will only be the same as some of the learners. + +We need better notes and screenshots of how things work on both Mac and PC. But we +likely won't be able to cover all the different versions of Excel. + +If you have a helper who has experience with the other OS than you, it would be good +to prep them to help with this lesson and tell how people to do things in the other OS. + +#### People are not interactive or responsive on the Exercise + +This lesson depends on people working on the exercise and responding with things +that are fixed. If your audience is reluctant to participate, start out with +some things on your own, or ask a helper for their answers. This generally gets +even a reluctant audience started. + + diff --git a/_extras/discuss.md b/learners/discuss.md similarity index 97% rename from _extras/discuss.md rename to learners/discuss.md index 7ecc631..8001a12 100644 --- a/_extras/discuss.md +++ b/learners/discuss.md @@ -1,7 +1,7 @@ --- -layout: page title: Discussion --- + There are many ways to discuss Library Carpentry lessons: - Join our [Gitter discussion forum](https://gitter.im/LibraryCarpentry/). @@ -10,3 +10,4 @@ There are many ways to discuss Library Carpentry lessons: - Follow updates on [Twitter](https://twitter.com/LibCarpentry). - Make a suggestion or correct an error by [raising an Issue](https://github.com/LibraryCarpentry/lc-open-refine/issues). + diff --git a/learners/reference.md b/learners/reference.md new file mode 100644 index 0000000..e76fc85 --- /dev/null +++ b/learners/reference.md @@ -0,0 +1,9 @@ +--- +title: 'FIXME' +--- + +## Glossary + +FIXME This is a placeholder file. Please add content here. + + diff --git a/learners/setup.md b/learners/setup.md new file mode 100644 index 0000000..8aa16a5 --- /dev/null +++ b/learners/setup.md @@ -0,0 +1,81 @@ +--- +title: Setup +--- + +### Spreadsheets + +Spreadsheets are useful for data entry and data organization, and some +subsetting and sorting of the data as well as getting an overview of the +data. To interact with spreadsheets, we can use +[LibreOffice](https://www.libreoffice.org), [Microsoft +Excel](https://products.office.com/en-us/excel), +[Gnumeric](https://www.gnumeric.org), +[OpenOffice.org](https://www.openoffice.org), or other programs. +Commands may differ a bit between programs, but general ideas for +thinking about spreadsheets is the same. + +For this lesson, if you don't have a spreadsheet program already, you +can use [LibreOffice](https://www.libreoffice.org). It's a free, open +source spreadsheet program. + +
+ +#### Windows + +- **Download the Installer** + Install LibreOffice by going to the [installation + page](https://www.libreoffice.org/download/libreoffice-fresh/). The + version for Windows should automatically be selected. Click + **Download**. You will go to a page that asks about a + donation, but you don't need to make one. Your download should begin + automatically. +- **Install LibreOffice** + Once the installer is downloaded, double click on it and it should + install. + +
+ +
+ +#### macOS {#macosx} + +- **Download the Installer** + Install LibreOffice by going to the [installation + page](https://www.libreoffice.org/download/libreoffice-fresh/). The + version for macOS should automatically be selected. Click + **Download**. You will go to a page that asks about a + donation, but you don't need to make one. Your download should begin + automatically. +- **Install LibreOffice** + The file *LibreOffice\_X.X.X\_MacOS\_x86-64* (whichever version of LibreOffice you have selected) should have been + downloaded. Double click on this file, and LibreOffice will be + installed. + +
+ +
+ +#### Linux + +- **Download the Installer** + Install LibreOffice by going to the [installation + page](https://www.libreoffice.org/download/libreoffice-fresh/). The + version for Linux should automatically be selected. Click **Download**. You will go to a page that asks about a donation, + but you don't need to make one. Your download should begin + automatically. +- **Install LibreOffice** + Once the installer is downloaded, double click on it and it should + install. + +
+ +       +      +      +      + +### Download Data File + +Download [training\_attendance.xlsx](data/training_attendance.xlsx), which is a `xlsx` file that should automatically download. You may need to right click or control click in order to save the file (NOTE: In Safari, right click and select **download linked file**; in Chrome and Firefox, right click and select **save link as**). Make a note of the location (i.e the folder, your desktop) to which you save the file. + + diff --git a/profiles/learner-profiles.md b/profiles/learner-profiles.md new file mode 100644 index 0000000..434e335 --- /dev/null +++ b/profiles/learner-profiles.md @@ -0,0 +1,5 @@ +--- +title: FIXME +--- + +This is a placeholder file. Please add content here. diff --git a/reference.md b/reference.md deleted file mode 100644 index d32d8fb..0000000 --- a/reference.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: reference ---- - diff --git a/setup.md b/setup.md deleted file mode 100644 index 420920d..0000000 --- a/setup.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -layout: page -title: Setup ---- -### Spreadsheets - -Spreadsheets are useful for data entry and data organization, and some -subsetting and sorting of the data as well as getting an overview of the -data. To interact with spreadsheets, we can use -[LibreOffice](https://www.libreoffice.org), [Microsoft -Excel](https://products.office.com/en-us/excel), -[Gnumeric](http://www.gnumeric.org), -[OpenOffice.org](https://www.openoffice.org), or other programs. -Commands may differ a bit between programs, but general ideas for -thinking about spreadsheets is the same. - -For this lesson, if you don't have a spreadsheet program already, you -can use [LibreOffice](https://www.libreoffice.org). It's a free, open -source spreadsheet program. - -
- -#### Windows - -- **Download the Installer** - Install LibreOffice by going to the [installation - page](https://www.libreoffice.org/download/libreoffice-fresh/). The - version for Windows should automatically be selected. Click - **Download**. You will go to a page that asks about a - donation, but you don't need to make one. Your download should begin - automatically. -- **Install LibreOffice** - Once the installer is downloaded, double click on it and it should - install. - -
- -
- -#### macOS {#macosx} - -- **Download the Installer** - Install LibreOffice by going to the [installation - page](https://www.libreoffice.org/download/libreoffice-fresh/). The - version for macOS should automatically be selected. Click - **Download**. You will go to a page that asks about a - donation, but you don't need to make one. Your download should begin - automatically. -- **Install LibreOffice** - The file *LibreOffice\_X.X.X\_MacOS\_x86-64* (whichever version of LibreOffice you have selected) should have been - downloaded. Double click on this file, and LibreOffice will be - installed. - -
- -
- -#### Linux - -- **Download the Installer** - Install LibreOffice by going to the [installation - page](https://www.libreoffice.org/download/libreoffice-fresh/). The - version for Linux should automatically be selected. Click **Download**. You will go to a page that asks about a donation, - but you don't need to make one. Your download should begin - automatically. -- **Install LibreOffice** - Once the installer is downloaded, double click on it and it should - install. - -
-       -      -      -      - -### Download Data File -Download [training_attendance.xlsx](data/training_attendance.xlsx), which is a `xlsx` file that should automatically download. You may need to right click or control click in order to save the file (NOTE: In Safari, right click and select **download linked file**; in Chrome and Firefox, right click and select **save link as**). Make a note of the location (i.e the folder, your desktop) to which you save the file. diff --git a/site/README.md b/site/README.md new file mode 100644 index 0000000..42997e3 --- /dev/null +++ b/site/README.md @@ -0,0 +1,2 @@ +This directory contains rendered lesson materials. Please do not edit files +here.