diff --git a/.quarto/idx/index.qmd.json b/.quarto/idx/index.qmd.json index a9f69a4..215fdbf 100644 --- a/.quarto/idx/index.qmd.json +++ b/.quarto/idx/index.qmd.json @@ -1 +1 @@ -{"title":"Course overview","markdown":{"yaml":{"title":"Course overview","number-sections":false},"headingText":"Core aims","containsRefs":false,"markdown":"\n\nWelcome to the wonderful world of generalised linear models!\n\nThese sessions are intended to enable you to construct and use generalised linear models confidently.\n\nAs with all of our statistics courses our focus is not on mathematical derivations, but on developing an intuitive understanding of the underlying statistical concepts.\n\nAt the same time this is also *not* a \"how to mindlessly use a stats program\" course! We hope that at the end of this course you feel like you have a better grasp on what it is we're trying to do, and gained sufficient confidence in your coding skills to implement these statistical concepts in your own research!\n\n\nTo introduce sufficient understanding and coding experience for analysing data with non-continuous response variables.\n\n::: callout-note\n## Course aims\n\nTo know what to do when presented with an arbitrary data set e.g.\n\n1. Construct\n a. a logistic model for binary response variables\n b. a logistic model for proportion response variables\n c. a Poisson model for count response variables\n d. ~~a Negative Binomial model for count response variables~~ (to be added later)\n2. Plot the data and the fitted curve in each case for both continuous and categorical predictors\n3. Assess the significance of fit\n4. Assess assumption of the model\n:::\n","srcMarkdownNoYaml":"\n\nWelcome to the wonderful world of generalised linear models!\n\nThese sessions are intended to enable you to construct and use generalised linear models confidently.\n\nAs with all of our statistics courses our focus is not on mathematical derivations, but on developing an intuitive understanding of the underlying statistical concepts.\n\nAt the same time this is also *not* a \"how to mindlessly use a stats program\" course! We hope that at the end of this course you feel like you have a better grasp on what it is we're trying to do, and gained sufficient confidence in your coding skills to implement these statistical concepts in your own research!\n\n## Core aims\n\nTo introduce sufficient understanding and coding experience for analysing data with non-continuous response variables.\n\n::: callout-note\n## Course aims\n\nTo know what to do when presented with an arbitrary data set e.g.\n\n1. Construct\n a. a logistic model for binary response variables\n b. a logistic model for proportion response variables\n c. a Poisson model for count response variables\n d. ~~a Negative Binomial model for count response variables~~ (to be added later)\n2. Plot the data and the fitted curve in each case for both continuous and categorical predictors\n3. Assess the significance of fit\n4. Assess assumption of the model\n:::\n"},"formats":{"courseformat-html":{"identifier":{"display-name":"HTML","target-format":"courseformat-html","base-format":"html","extension-name":"courseformat"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":null,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":"none","code-overflow":"scroll","code-link":true,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true,"shortcodes":[]},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","toc":true,"number-sections":false,"filters":[],"output-file":"index.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.531","theme":["default","_extensions/cambiotraining/courseformat/theme.scss"],"number-depth":3,"code-copy":true,"revealjs-plugins":[],"bibliography":["references.bib"],"knitr":{"opts_knit":{"cache.path":".knitr_cache"}},"title":"Course overview"},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["courseformat-html"]} \ No newline at end of file +{"title":"Course overview","markdown":{"yaml":{"title":"Course overview","number-sections":false},"headingText":"Core aims","containsRefs":false,"markdown":"\n\nWelcome to the wonderful world of generalised linear models!\n\nThese sessions are intended to enable you to construct and use generalised linear models confidently.\n\nAs with all of our statistics courses our focus is not on mathematical derivations, but on developing an intuitive understanding of the underlying statistical concepts.\n\nAt the same time this is also *not* a \"how to mindlessly use a stats program\" course! We hope that at the end of this course you feel like you have a better grasp on what it is we're trying to do, and gained sufficient confidence in your coding skills to implement these statistical concepts in your own research!\n\n\nTo introduce sufficient understanding and coding experience for analysing data with non-continuous response variables.\n\n::: callout-note\n## Course aims\n\nTo know what to do when presented with an arbitrary data set e.g.\n\n1. Construct\n a. a logistic model for binary response variables\n b. a logistic model for proportion response variables\n c. a Poisson model for count response variables\n d. ~~a Negative Binomial model for count response variables~~ (to be added later)\n2. Plot the data and the fitted curve in each case for both continuous and categorical predictors\n3. Assess the significance of fit\n4. Assess assumption of the model\n:::\n","srcMarkdownNoYaml":"\n\nWelcome to the wonderful world of generalised linear models!\n\nThese sessions are intended to enable you to construct and use generalised linear models confidently.\n\nAs with all of our statistics courses our focus is not on mathematical derivations, but on developing an intuitive understanding of the underlying statistical concepts.\n\nAt the same time this is also *not* a \"how to mindlessly use a stats program\" course! We hope that at the end of this course you feel like you have a better grasp on what it is we're trying to do, and gained sufficient confidence in your coding skills to implement these statistical concepts in your own research!\n\n## Core aims\n\nTo introduce sufficient understanding and coding experience for analysing data with non-continuous response variables.\n\n::: callout-note\n## Course aims\n\nTo know what to do when presented with an arbitrary data set e.g.\n\n1. Construct\n a. a logistic model for binary response variables\n b. a logistic model for proportion response variables\n c. a Poisson model for count response variables\n d. ~~a Negative Binomial model for count response variables~~ (to be added later)\n2. Plot the data and the fitted curve in each case for both continuous and categorical predictors\n3. Assess the significance of fit\n4. Assess assumption of the model\n:::\n"},"formats":{"courseformat-html":{"identifier":{"display-name":"HTML","target-format":"courseformat-html","base-format":"html","extension-name":"courseformat"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":null,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":"none","code-overflow":"scroll","code-link":true,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true,"shortcodes":[]},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","toc":true,"number-sections":false,"filters":["courseformat"],"output-file":"index.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.546","theme":["default","_extensions/cambiotraining/courseformat/theme.scss"],"number-depth":3,"code-copy":true,"revealjs-plugins":[],"bibliography":["references.bib"],"knitr":{"opts_knit":{"cache.path":".knitr_cache"}},"title":"Course overview"},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["courseformat-html"]} \ No newline at end of file diff --git a/_freeze/materials/glm-practical-logistic-binary/execute-results/html.json b/_freeze/materials/glm-practical-logistic-binary/execute-results/html.json index be4c132..8e9870a 100644 --- a/_freeze/materials/glm-practical-logistic-binary/execute-results/html.json +++ b/_freeze/materials/glm-practical-logistic-binary/execute-results/html.json @@ -1,8 +1,8 @@ { - "hash": "34c6f67a8ac54c2d9cb418c133d33a2b", + "hash": "28252b951f9aa43eac78479dce327bea", "result": { "engine": "knitr", - "markdown": "---\ntitle: \"Binary response\"\n---\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n:::\n\n\n::: {.callout-tip}\n## Learning outcomes\n\n**Questions**\n\n- How do we analyse data with a binary outcome?\n- Can we test if our model is any good?\n- Be able to perform a logistic regression with a binary outcome\n- Predict outcomes of new data, based on a defined model\n\n**Objectives**\n\n- Be able to analyse binary outcome data\n- Understand different methods of testing model fit\n- Be able to make model predictions\n:::\n\n## Libraries and functions\n\n::: {.callout-note collapse=\"true\"}\n## Click to expand\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n### Libraries\n### Functions\n\n## Python\n\n### Libraries\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# A maths library\nimport math\n# A Python data analysis and manipulation tool\nimport pandas as pd\n\n# Python equivalent of `ggplot2`\nfrom plotnine import *\n\n# Statistical models, conducting tests and statistical data exploration\nimport statsmodels.api as sm\n\n# Convenience interface for specifying models using formula strings and DataFrames\nimport statsmodels.formula.api as smf\n```\n:::\n\n\n### Functions\n:::\n:::\n\nThe example in this section uses the following data set:\n\n`data/finches_early.csv`\n\nThese data come from an analysis of gene flow across two finch species [@lamichhaney2020]. They are slightly adapted here for illustrative purposes.\n\nThe data focus on two species, _Geospiza fortis_ and _G. scandens_. The original measurements are split by a uniquely timed event: a particularly strong El Niño event in 1983. This event changed the vegetation and food supply of the finches, allowing F1 hybrids of the two species to survive, whereas before 1983 they could not. The measurements are classed as `early` (pre-1983) and `late` (1983 onwards).\n\nHere we are looking only at the `early` data. We are specifically focussing on the beak shape classification, which we saw earlier in @fig-beak_shape_glm.\n\n## Load and visualise the data\n\nFirst we load the data, then we visualise it.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nearly_finches <- read_csv(\"data/finches_early.csv\")\n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nearly_finches_py = pd.read_csv(\"data/finches_early.csv\")\n```\n:::\n\n\n:::\n\nLooking at the data, we can see that the `pointed_beak` column contains zeros and ones. These are actually yes/no classification outcomes and not numeric representations.\n\nWe'll have to deal with this soon. For now, we can plot the data:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(early_finches,\n aes(x = factor(pointed_beak),\n y = blength)) +\n geom_boxplot()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-6-1.png){width=672}\n:::\n:::\n\n\n## Python\n\nWe could just give Python the `pointed_beak` data directly, but then it would view the values as numeric. Which doesn't really work, because we have two groups as such: those with a pointed beak (`1`), and those with a blunt one (`0`).\n\nWe can force Python to temporarily covert the data to a factor, by making the `pointed_beak` column an `object` type. We can do this directly inside the `ggplot()` function.\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py,\n aes(x = early_finches_py.pointed_beak.astype(object),\n y = \"blength\")) +\n geom_boxplot())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-7-1.png){width=614}\n:::\n:::\n\n:::\n\nIt looks as though the finches with blunt beaks generally have shorter beak lengths.\n\nWe can visualise that differently by plotting all the data points as a classic binary response plot:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(early_finches,\n aes(x = blength, y = pointed_beak)) +\n geom_point()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-8-3.png){width=672}\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py,\n aes(x = \"blength\",\n y = \"pointed_beak\")) +\n geom_point())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-9-1.png){width=614}\n:::\n:::\n\n\n:::\n\nThis presents us with a bit of an issue. We could fit a linear regression model to these data, although we already know that this is a bad idea...\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(early_finches,\n aes(x = blength, y = pointed_beak)) +\n geom_point() +\n geom_smooth(method = \"lm\", se = FALSE)\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-10-3.png){width=672}\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py,\n aes(x = \"blength\",\n y = \"pointed_beak\")) +\n geom_point() +\n geom_smooth(method = \"lm\",\n colour = \"blue\",\n se = False))\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-11-1.png){width=614}\n:::\n:::\n\n\n:::\n\nOf course this is rubbish - we can't have a beak classification outside the range of $[0, 1]$. It's either blunt (`0`) or pointed (`1`).\n\nBut for the sake of exploration, let's look at the assumptions:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_bks <- lm(pointed_beak ~ blength,\n data = early_finches)\n\nresid_panel(lm_bks,\n plots = c(\"resid\", \"qq\", \"ls\", \"cookd\"),\n smoother = TRUE)\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-12-3.png){width=672}\n:::\n:::\n\n\n## Python\n\nFirst, we create a linear model:\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# create a linear model\nmodel = smf.ols(formula = \"pointed_beak ~ blength\",\n data = early_finches_py)\n# and get the fitted parameters of the model\nlm_bks_py = model.fit()\n```\n:::\n\n\nNext, we can create the diagnostic plots:\n\n::: {.cell}\n\n```{.python .cell-code}\ndgplots(lm_bks_py)\n```\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-15-1.png){width=96}\n:::\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![](images/dgplots/2024_01_15-10-25-49_AM_dgplots.png){width=804}\n:::\n:::\n\n\n:::\n\nThey're ~~pretty~~ extremely bad.\n\n- The response is not linear (Residual Plot, binary response plot, common sense).\n- The residuals do not appear to be distributed normally (Q-Q Plot)\n- The variance is not homogeneous across the predicted values (Location-Scale Plot)\n- But - there is always a silver lining - we don't have influential data points.\n\n## Creating a suitable model\n\nSo far we've established that using a simple linear model to describe a potential relationship between beak length and the probability of having a pointed beak is not a good idea. So, what _can_ we do?\n\nOne of the ways we can deal with binary outcome data is by performing a logistic regression. Instead of fitting a straight line to our data, and performing a regression on that, we fit a line that has an S shape. This avoids the model making predictions outside the $[0, 1]$ range.\n\nWe described our standard linear relationship as follows:\n\n$Y = \\beta_0 + \\beta_1X$\n\nWe can now map this to our non-linear relationship via the **logistic link function**:\n\n$Y = \\frac{\\exp(\\beta_0 + \\beta_1X)}{1 + \\exp(\\beta_0 + \\beta_1X)}$\n\nNote that the $\\beta_0 + \\beta_1X$ part is identical to the formula of a straight line.\n\nThe rest of the function is what makes the straight line curve into its characteristic S shape. \n\n:::{.callout-note collapse=true}\n## Euler's number ($\\exp$): would you like to know more?\n\nIn mathematics, $\\rm e$ represents a constant of around 2.718. Another notation is $\\exp$, which is often used when notations become a bit cumbersome. Here, I exclusively use the $\\exp$ notation for consistency.\n:::\n\n::: {.callout-important}\n## The logistic function\n\nThe shape of the logistic function is hugely influenced by the different parameters, in particular $\\beta_1$. The plots below show different situations, where $\\beta_0 = 0$ in all cases, but $\\beta_1$ varies.\n\nThe first plot shows the logistic function in its simplest form, with the others showing the effect of varying $\\beta_1$.\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-17-1.png){width=672}\n:::\n:::\n\n\n* when $\\beta_1 = 1$, this gives the simplest logistic function\n* when $\\beta_1 = 0$ gives a horizontal line, with $Y = \\frac{\\exp(\\beta_0)}{1+\\exp(\\beta_0)}$\n* when $\\beta_1$ is negative flips the curve around, so it slopes down\n* when $\\beta_1$ is very large then the curve becomes extremely steep\n\n:::\n\nWe can fit such an S-shaped curve to our `early_finches` data set, by creating a generalised linear model.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nIn R we have a few options to do this, and by far the most familiar function would be `glm()`. Here we save the model in an object called `glm_bks`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_bks <- glm(pointed_beak ~ blength,\n family = binomial,\n data = early_finches)\n```\n:::\n\n\nThe format of this function is similar to that used by the `lm()` function for linear models. The important difference is that we must specify the _family_ of error distribution to use. For logistic regression we must set the family to **binomial**.\n\nIf you forget to set the `family` argument, then the `glm()` function will perform a standard linear model fit, identical to what the `lm()` function would do.\n\n## Python\n\nIn Python we have a few options to do this, and by far the most familiar function would be `glm()`. Here we save the model in an object called `glm_bks_py`:\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# create a linear model\nmodel = smf.glm(formula = \"pointed_beak ~ blength\",\n family = sm.families.Binomial(),\n data = early_finches_py)\n# and get the fitted parameters of the model\nglm_bks_py = model.fit()\n```\n:::\n\n\nThe format of this function is similar to that used by the `ols()` function for linear models. The important difference is that we must specify the _family_ of error distribution to use. For logistic regression we must set the family to **binomial**. This is buried deep inside the `statsmodels` package and needs to be defined as `sm.families.Binomial()`.\n\n:::\n\n## Model output\n\nThat's the easy part done! The trickier part is interpreting the output. First of all, we'll get some summary information.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(glm_bks)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = pointed_beak ~ blength, family = binomial, data = early_finches)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -43.410 15.250 -2.847 0.00442 **\nblength 3.387 1.193 2.839 0.00452 **\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 84.5476 on 60 degrees of freedom\nResidual deviance: 9.1879 on 59 degrees of freedom\nAIC: 13.188\n\nNumber of Fisher Scoring iterations: 8\n```\n\n\n:::\n:::\n\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nprint(glm_bks_py.summary())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Generalized Linear Model Regression Results \n==============================================================================\nDep. Variable: pointed_beak No. Observations: 61\nModel: GLM Df Residuals: 59\nModel Family: Binomial Df Model: 1\nLink Function: Logit Scale: 1.0000\nMethod: IRLS Log-Likelihood: -4.5939\nDate: Mon, 15 Jan 2024 Deviance: 9.1879\nTime: 10:25:50 Pearson chi2: 15.1\nNo. Iterations: 8 Pseudo R-squ. (CS): 0.7093\nCovariance Type: nonrobust \n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -43.4096 15.250 -2.847 0.004 -73.298 -13.521\nblength 3.3866 1.193 2.839 0.005 1.049 5.724\n==============================================================================\n```\n\n\n:::\n:::\n\n\n:::\n\nThere’s a lot to unpack here, but let's start with what we're familiar with: coefficients!\n\n## Parameter interpretation\n\n::: {.panel-tabset group=\"language\"}\n## R\nThe coefficients or parameters can be found in the `Coefficients` block. The main numbers to extract from the output are the two numbers underneath `Estimate.Std`:\n\n```\nCoefficients:\n Estimate Std.\n(Intercept) -43.410\nblength 3.387 \n```\n\n## Python\n\nRight at the bottom is a table showing the model coefficients. The main numbers to extract from the output are the two numbers in the `coef` column:\n\n```\n======================\n coef\n----------------------\nIntercept -43.4096\nblength 3.3866\n======================\n```\n\n:::\n\nThese are the coefficients of the logistic model equation and need to be placed in the correct equation if we want to be able to calculate the probability of having a pointed beak for a given beak length.\n\nThe $p$ values at the end of each coefficient row merely show whether that particular coefficient is significantly different from zero. This is similar to the $p$ values obtained in the summary output of a linear model. As with continuous predictors in simple models, these $p$ values can be used to decide whether that predictor is important (so in this case beak length appears to be significant). However, these $p$ values aren’t great to work with when we have multiple predictor variables, or when we have categorical predictors with multiple levels (since the output will give us a $p$ value for each level rather than for the predictor as a whole).\n\nWe can use the coefficients to calculate the probability of having a pointed beak for a given beak length:\n\n$$ P(pointed \\ beak) = \\frac{\\exp(-43.41 + 3.39 \\times blength)}{1 + \\exp(-43.41 + 3.39 \\times blength)} $$\n\nHaving this formula means that we can calculate the probability of having a pointed beak for any beak length. How do we work this out in practice? \n\n::: {.panel-tabset group=\"language\"}\n## R\n\nWell, the probability of having a pointed beak if the beak length is large (for example 15 mm) can be calculated as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexp(-43.41 + 3.39 * 15) / (1 + exp(-43.41 + 3.39 * 15))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.9994131\n```\n\n\n:::\n:::\n\n\nIf the beak length is small (for example 10 mm), the probability of having a pointed beak is extremely low:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexp(-43.41 + 3.39 * 10) / (1 + exp(-43.41 + 3.39 * 10))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 7.410155e-05\n```\n\n\n:::\n:::\n\n\n## Python\nWell, the probability of having a pointed beak if the beak length is large (for example 15 mm) can be calculated as follows:\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# import the math library\nimport math\n```\n:::\n\n::: {.cell}\n\n```{.python .cell-code}\nmath.exp(-43.41 + 3.39 * 15) / (1 + math.exp(-43.41 + 3.39 * 15))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n0.9994130595039192\n```\n\n\n:::\n:::\n\n\nIf the beak length is small (for example 10 mm), the probability of having a pointed beak is extremely low:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nmath.exp(-43.41 + 3.39 * 10) / (1 + math.exp(-43.41 + 3.39 * 10))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n7.410155028945912e-05\n```\n\n\n:::\n:::\n\n:::\n\nWe can calculate the the probabilities for all our observed values and if we do that then we can see that the larger the beak length is, the higher the probability that a beak shape would be pointed. I'm visualising this together with the logistic curve, where the blue points are the calculated probabilities:\n\n::: {.callout-note collapse=true}\n## Code available here\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_bks %>% \n augment(type.predict = \"response\") %>% \n ggplot() +\n geom_point(aes(x = blength, y = pointed_beak)) +\n geom_line(aes(x = blength, y = .fitted),\n linetype = \"dashed\",\n colour = \"blue\") +\n geom_point(aes(x = blength, y = .fitted),\n colour = \"blue\", alpha = 0.5) +\n labs(x = \"beak length (mm)\",\n y = \"Probability\")\n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py) +\n geom_point(aes(x = \"blength\", y = \"pointed_beak\")) +\n geom_line(aes(x = \"blength\", y = glm_bks_py.fittedvalues),\n linetype = \"dashed\",\n colour = \"blue\") +\n geom_point(aes(x = \"blength\", y = glm_bks_py.fittedvalues),\n colour = \"blue\", alpha = 0.5) +\n labs(x = \"beak length (mm)\",\n y = \"Probability\"))\n```\n:::\n\n:::\n:::\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Predicted probabilities for beak classification](glm-practical-logistic-binary_files/figure-html/fig-beak_class_glm_probs-2.png){#fig-beak_class_glm_probs width=672}\n:::\n:::\n\n\nThe graph shows us that, based on the data that we have and the model we used to make predictions about our response variable, the probability of seeing a pointed beak increases with beak length.\n\nShort beaks are more closely associated with the bluntly shaped beaks, whereas long beaks are more closely associated with the pointed shape. It's also clear that there is a range of beak lengths (around 13 mm) where the probability of getting one shape or another is much more even.\n\n## Parameter estimation explained\n\nNow that we know how to interpret the coefficients or parameters, let's have a look at how they're actually determined. To understand this, we need to take a step back.\n\n* Sum of squares (OLS)\n* MLE\n* Deviance\n\n## Assumptions\n\n* GAMLSS\n\n## Exercises\n\n### Diabetes {#sec-exr_diabetes}\n\n:::{.callout-exercise}\n\n\n{{< level 2 >}}\n\n\n\nFor this exercise we'll be using the data from `data/diabetes.csv`.\n\nThis is a data set comprising 768 observations of three variables (one dependent and two predictor variables). This records the results of a diabetes test result as a binary variable (1 is a positive result, 0 is a negative result), along with the result of a glucose tolerance test and the diastolic blood pressure for each of 768 women. The variables are called `test_result`, `glucose` and `diastolic`.\n\nWe want to see if the `glucose` tolerance is a meaningful predictor for predictions on a diabetes test. To investigate this, do the following:\n\n1. Load and visualise the data\n2. Create a suitable model\n3. Determine if there are any statistically significant predictors\n4. Calculate the probability of a positive diabetes test result for a glucose tolerance test value of `glucose = 150`\n\n::: {.callout-answer collapse=\"true\"}\n## Answer\n\n#### Load and visualise the data\n\nFirst we load the data, then we visualise it.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndiabetes <- read_csv(\"data/diabetes.csv\")\n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\ndiabetes_py = pd.read_csv(\"data/diabetes.csv\")\n```\n:::\n\n\n:::\n\nLooking at the data, we can see that the `test_result` column contains zeros and ones. These are yes/no test result outcomes and not actually numeric representations.\n\nWe'll have to deal with this soon. For now, we can plot the data, by outcome:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(diabetes,\n aes(x = factor(test_result),\n y = glucose)) +\n geom_boxplot()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-32-1.png){width=672}\n:::\n:::\n\n\n## Python\n\nWe could just give Python the `test_result` data directly, but then it would view the values as numeric. Which doesn't really work, because we have two groups as such: those with a negative diabetes test result, and those with a positive one.\n\nWe can force Python to temporarily covert the data to a factor, by making the `test_result` column an `object` type. We can do this directly inside the `ggplot()` function.\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(diabetes_py,\n aes(x = diabetes_py.test_result.astype(object),\n y = \"glucose\")) +\n geom_boxplot())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-33-1.png){width=614}\n:::\n:::\n\n:::\n\nIt looks as though the patients with a positive diabetes test have slightly higher glucose levels than those with a negative diabetes test.\n\nWe can visualise that differently by plotting all the data points as a classic binary response plot:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(diabetes,\n aes(x = glucose,\n y = test_result)) +\n geom_point()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-34-3.png){width=672}\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(diabetes_py,\n aes(x = \"glucose\",\n y = \"test_result\")) +\n geom_point())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-35-1.png){width=614}\n:::\n:::\n\n\n:::\n\n#### Create a suitable model\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nWe'll use the `glm()` function to create a generalised linear model. Here we save the model in an object called `glm_dia`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_dia <- glm(test_result ~ glucose,\n family = binomial,\n data = diabetes)\n```\n:::\n\n\nThe format of this function is similar to that used by the `lm()` function for linear models. The important difference is that we must specify the _family_ of error distribution to use. For logistic regression we must set the family to **binomial**.\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# create a linear model\nmodel = smf.glm(formula = \"test_result ~ glucose\",\n family = sm.families.Binomial(),\n data = diabetes_py)\n# and get the fitted parameters of the model\nglm_dia_py = model.fit()\n```\n:::\n\n\n:::\n\nLet's look at the model parameters:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(glm_dia)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = test_result ~ glucose, family = binomial, data = diabetes)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -5.611732 0.442289 -12.69 <2e-16 ***\nglucose 0.039510 0.003398 11.63 <2e-16 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 936.6 on 727 degrees of freedom\nResidual deviance: 752.2 on 726 degrees of freedom\nAIC: 756.2\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n:::\n\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nprint(glm_dia_py.summary())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Generalized Linear Model Regression Results \n==============================================================================\nDep. Variable: test_result No. Observations: 728\nModel: GLM Df Residuals: 726\nModel Family: Binomial Df Model: 1\nLink Function: Logit Scale: 1.0000\nMethod: IRLS Log-Likelihood: -376.10\nDate: Mon, 15 Jan 2024 Deviance: 752.20\nTime: 10:25:54 Pearson chi2: 713.\nNo. Iterations: 4 Pseudo R-squ. (CS): 0.2238\nCovariance Type: nonrobust \n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -5.6117 0.442 -12.688 0.000 -6.479 -4.745\nglucose 0.0395 0.003 11.628 0.000 0.033 0.046\n==============================================================================\n```\n\n\n:::\n:::\n\n:::\n\nWe can see that `glucose` is a significant predictor for the `test_result` (the $p$ value is much smaller than 0.05).\n\nKnowing this, we're interested in the coefficients. We have an intercept of `-5.61` and `0.0395` for `glucose`. We can use these coefficients to write a formula that describes the potential relationship between the probability of having a positive test result, dependent on the glucose tolerance level value:\n\n$$ P(positive \\ test\\ result) = \\frac{\\exp(-5.61 + 0.04 \\times glucose)}{1 + \\exp(-5.61 + 0.04 \\times glucose)} $$\n\n#### Calculating probabilities\n\nUsing the formula above, we can now calculate the probability of having a positive test result, for a given `glucose` value. If we do this for `glucose = 150`, we get the following:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexp(-5.61 + 0.04 * 150) / (1 + exp(-5.61 + 0.04 * 145))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.6685441\n```\n\n\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nmath.exp(-5.61 + 0.04 * 150) / (1 + math.exp(-5.61 + 0.04 * 145))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n0.6685441044999503\n```\n\n\n:::\n:::\n\n:::\n\nThis tells us that the probability of having a positive diabetes test result, given a glucose tolerance level of 150 is around 67%.\n\n:::\n:::\n\n## Summary\n\n::: {.callout-tip}\n#### Key points\n\n- We use a logistic regression to model a binary response\n- We can feed new observations into the model and get probabilities for the outcome\n:::\n", + "markdown": "---\ntitle: \"Binary response\"\n---\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n:::\n\n\n::: {.callout-tip}\n## Learning outcomes\n\n**Questions**\n\n- How do we analyse data with a binary outcome?\n- Can we test if our model is any good?\n- Be able to perform a logistic regression with a binary outcome\n- Predict outcomes of new data, based on a defined model\n\n**Objectives**\n\n- Be able to analyse binary outcome data\n- Understand different methods of testing model fit\n- Be able to make model predictions\n:::\n\n## Libraries and functions\n\n::: {.callout-note collapse=\"true\"}\n## Click to expand\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n### Libraries\n### Functions\n\n## Python\n\n### Libraries\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# A maths library\nimport math\n# A Python data analysis and manipulation tool\nimport pandas as pd\n\n# Python equivalent of `ggplot2`\nfrom plotnine import *\n\n# Statistical models, conducting tests and statistical data exploration\nimport statsmodels.api as sm\n\n# Convenience interface for specifying models using formula strings and DataFrames\nimport statsmodels.formula.api as smf\n```\n:::\n\n\n### Functions\n:::\n:::\n\nThe example in this section uses the following data set:\n\n`data/finches_early.csv`\n\nThese data come from an analysis of gene flow across two finch species [@lamichhaney2020]. They are slightly adapted here for illustrative purposes.\n\nThe data focus on two species, _Geospiza fortis_ and _G. scandens_. The original measurements are split by a uniquely timed event: a particularly strong El Niño event in 1983. This event changed the vegetation and food supply of the finches, allowing F1 hybrids of the two species to survive, whereas before 1983 they could not. The measurements are classed as `early` (pre-1983) and `late` (1983 onwards).\n\nHere we are looking only at the `early` data. We are specifically focussing on the beak shape classification, which we saw earlier in @fig-beak_shape_glm.\n\n## Load and visualise the data\n\nFirst we load the data, then we visualise it.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nearly_finches <- read_csv(\"data/finches_early.csv\")\n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nearly_finches_py = pd.read_csv(\"data/finches_early.csv\")\n```\n:::\n\n\n:::\n\nLooking at the data, we can see that the `pointed_beak` column contains zeros and ones. These are actually yes/no classification outcomes and not numeric representations.\n\nWe'll have to deal with this soon. For now, we can plot the data:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(early_finches,\n aes(x = factor(pointed_beak),\n y = blength)) +\n geom_boxplot()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-6-1.png){width=672}\n:::\n:::\n\n\n## Python\n\nWe could just give Python the `pointed_beak` data directly, but then it would view the values as numeric. Which doesn't really work, because we have two groups as such: those with a pointed beak (`1`), and those with a blunt one (`0`).\n\nWe can force Python to temporarily covert the data to a factor, by making the `pointed_beak` column an `object` type. We can do this directly inside the `ggplot()` function.\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py,\n aes(x = early_finches_py.pointed_beak.astype(object),\n y = \"blength\")) +\n geom_boxplot())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-7-1.png){width=614}\n:::\n:::\n\n:::\n\nIt looks as though the finches with blunt beaks generally have shorter beak lengths.\n\nWe can visualise that differently by plotting all the data points as a classic binary response plot:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(early_finches,\n aes(x = blength, y = pointed_beak)) +\n geom_point()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-8-3.png){width=672}\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py,\n aes(x = \"blength\",\n y = \"pointed_beak\")) +\n geom_point())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-9-1.png){width=614}\n:::\n:::\n\n\n:::\n\nThis presents us with a bit of an issue. We could fit a linear regression model to these data, although we already know that this is a bad idea...\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(early_finches,\n aes(x = blength, y = pointed_beak)) +\n geom_point() +\n geom_smooth(method = \"lm\", se = FALSE)\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-10-3.png){width=672}\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py,\n aes(x = \"blength\",\n y = \"pointed_beak\")) +\n geom_point() +\n geom_smooth(method = \"lm\",\n colour = \"blue\",\n se = False))\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-11-1.png){width=614}\n:::\n:::\n\n\n:::\n\nOf course this is rubbish - we can't have a beak classification outside the range of $[0, 1]$. It's either blunt (`0`) or pointed (`1`).\n\nBut for the sake of exploration, let's look at the assumptions:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nlm_bks <- lm(pointed_beak ~ blength,\n data = early_finches)\n\nresid_panel(lm_bks,\n plots = c(\"resid\", \"qq\", \"ls\", \"cookd\"),\n smoother = TRUE)\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-12-3.png){width=672}\n:::\n:::\n\n\n## Python\n\nFirst, we create a linear model:\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# create a linear model\nmodel = smf.ols(formula = \"pointed_beak ~ blength\",\n data = early_finches_py)\n# and get the fitted parameters of the model\nlm_bks_py = model.fit()\n```\n:::\n\n\nNext, we can create the diagnostic plots:\n\n::: {.cell}\n\n```{.python .cell-code}\ndgplots(lm_bks_py)\n```\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-15-1.png){width=96}\n:::\n:::\n\n::: {.cell}\n::: {.cell-output-display}\n![](images/dgplots/2024_01_19-06-05-54_PM_dgplots.png){width=804}\n:::\n:::\n\n\n:::\n\nThey're ~~pretty~~ extremely bad.\n\n- The response is not linear (Residual Plot, binary response plot, common sense).\n- The residuals do not appear to be distributed normally (Q-Q Plot)\n- The variance is not homogeneous across the predicted values (Location-Scale Plot)\n- But - there is always a silver lining - we don't have influential data points.\n\n## Creating a suitable model\n\nSo far we've established that using a simple linear model to describe a potential relationship between beak length and the probability of having a pointed beak is not a good idea. So, what _can_ we do?\n\nOne of the ways we can deal with binary outcome data is by performing a logistic regression. Instead of fitting a straight line to our data, and performing a regression on that, we fit a line that has an S shape. This avoids the model making predictions outside the $[0, 1]$ range.\n\nWe described our standard linear relationship as follows:\n\n$Y = \\beta_0 + \\beta_1X$\n\nWe can now map this to our non-linear relationship via the **logistic link function**:\n\n$Y = \\frac{\\exp(\\beta_0 + \\beta_1X)}{1 + \\exp(\\beta_0 + \\beta_1X)}$\n\nNote that the $\\beta_0 + \\beta_1X$ part is identical to the formula of a straight line.\n\nThe rest of the function is what makes the straight line curve into its characteristic S shape. \n\n:::{.callout-note collapse=true}\n## Euler's number ($\\exp$): would you like to know more?\n\nIn mathematics, $\\rm e$ represents a constant of around 2.718. Another notation is $\\exp$, which is often used when notations become a bit cumbersome. Here, I exclusively use the $\\exp$ notation for consistency.\n:::\n\n::: {.callout-important}\n## The logistic function\n\nThe shape of the logistic function is hugely influenced by the different parameters, in particular $\\beta_1$. The plots below show different situations, where $\\beta_0 = 0$ in all cases, but $\\beta_1$ varies.\n\nThe first plot shows the logistic function in its simplest form, with the others showing the effect of varying $\\beta_1$.\n\n\n::: {.cell}\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-17-1.png){width=672}\n:::\n:::\n\n\n* when $\\beta_1 = 1$, this gives the simplest logistic function\n* when $\\beta_1 = 0$ gives a horizontal line, with $Y = \\frac{\\exp(\\beta_0)}{1+\\exp(\\beta_0)}$\n* when $\\beta_1$ is negative flips the curve around, so it slopes down\n* when $\\beta_1$ is very large then the curve becomes extremely steep\n\n:::\n\nWe can fit such an S-shaped curve to our `early_finches` data set, by creating a generalised linear model.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nIn R we have a few options to do this, and by far the most familiar function would be `glm()`. Here we save the model in an object called `glm_bks`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_bks <- glm(pointed_beak ~ blength,\n family = binomial,\n data = early_finches)\n```\n:::\n\n\nThe format of this function is similar to that used by the `lm()` function for linear models. The important difference is that we must specify the _family_ of error distribution to use. For logistic regression we must set the family to **binomial**.\n\nIf you forget to set the `family` argument, then the `glm()` function will perform a standard linear model fit, identical to what the `lm()` function would do.\n\n## Python\n\nIn Python we have a few options to do this, and by far the most familiar function would be `glm()`. Here we save the model in an object called `glm_bks_py`:\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# create a linear model\nmodel = smf.glm(formula = \"pointed_beak ~ blength\",\n family = sm.families.Binomial(),\n data = early_finches_py)\n# and get the fitted parameters of the model\nglm_bks_py = model.fit()\n```\n:::\n\n\nThe format of this function is similar to that used by the `ols()` function for linear models. The important difference is that we must specify the _family_ of error distribution to use. For logistic regression we must set the family to **binomial**. This is buried deep inside the `statsmodels` package and needs to be defined as `sm.families.Binomial()`.\n\n:::\n\n## Model output\n\nThat's the easy part done! The trickier part is interpreting the output. First of all, we'll get some summary information.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(glm_bks)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = pointed_beak ~ blength, family = binomial, data = early_finches)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -43.410 15.250 -2.847 0.00442 **\nblength 3.387 1.193 2.839 0.00452 **\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 84.5476 on 60 degrees of freedom\nResidual deviance: 9.1879 on 59 degrees of freedom\nAIC: 13.188\n\nNumber of Fisher Scoring iterations: 8\n```\n\n\n:::\n:::\n\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nprint(glm_bks_py.summary())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Generalized Linear Model Regression Results \n==============================================================================\nDep. Variable: pointed_beak No. Observations: 61\nModel: GLM Df Residuals: 59\nModel Family: Binomial Df Model: 1\nLink Function: Logit Scale: 1.0000\nMethod: IRLS Log-Likelihood: -4.5939\nDate: Fri, 19 Jan 2024 Deviance: 9.1879\nTime: 18:05:55 Pearson chi2: 15.1\nNo. Iterations: 8 Pseudo R-squ. (CS): 0.7093\nCovariance Type: nonrobust \n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -43.4096 15.250 -2.847 0.004 -73.298 -13.521\nblength 3.3866 1.193 2.839 0.005 1.049 5.724\n==============================================================================\n```\n\n\n:::\n:::\n\n\n:::\n\nThere’s a lot to unpack here, but let's start with what we're familiar with: coefficients!\n\n## Parameter interpretation\n\n::: {.panel-tabset group=\"language\"}\n## R\nThe coefficients or parameters can be found in the `Coefficients` block. The main numbers to extract from the output are the two numbers underneath `Estimate.Std`:\n\n```\nCoefficients:\n Estimate Std.\n(Intercept) -43.410\nblength 3.387 \n```\n\n## Python\n\nRight at the bottom is a table showing the model coefficients. The main numbers to extract from the output are the two numbers in the `coef` column:\n\n```\n======================\n coef\n----------------------\nIntercept -43.4096\nblength 3.3866\n======================\n```\n\n:::\n\nThese are the coefficients of the logistic model equation and need to be placed in the correct equation if we want to be able to calculate the probability of having a pointed beak for a given beak length.\n\nThe $p$ values at the end of each coefficient row merely show whether that particular coefficient is significantly different from zero. This is similar to the $p$ values obtained in the summary output of a linear model. As with continuous predictors in simple models, these $p$ values can be used to decide whether that predictor is important (so in this case beak length appears to be significant). However, these $p$ values aren’t great to work with when we have multiple predictor variables, or when we have categorical predictors with multiple levels (since the output will give us a $p$ value for each level rather than for the predictor as a whole).\n\nWe can use the coefficients to calculate the probability of having a pointed beak for a given beak length:\n\n$$ P(pointed \\ beak) = \\frac{\\exp(-43.41 + 3.39 \\times blength)}{1 + \\exp(-43.41 + 3.39 \\times blength)} $$\n\nHaving this formula means that we can calculate the probability of having a pointed beak for any beak length. How do we work this out in practice? \n\n::: {.panel-tabset group=\"language\"}\n## R\n\nWell, the probability of having a pointed beak if the beak length is large (for example 15 mm) can be calculated as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexp(-43.41 + 3.39 * 15) / (1 + exp(-43.41 + 3.39 * 15))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.9994131\n```\n\n\n:::\n:::\n\n\nIf the beak length is small (for example 10 mm), the probability of having a pointed beak is extremely low:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexp(-43.41 + 3.39 * 10) / (1 + exp(-43.41 + 3.39 * 10))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 7.410155e-05\n```\n\n\n:::\n:::\n\n\n## Python\n\nWell, the probability of having a pointed beak if the beak length is large (for example 15 mm) can be calculated as follows:\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# import the math library\nimport math\n```\n:::\n\n::: {.cell}\n\n```{.python .cell-code}\nmath.exp(-43.41 + 3.39 * 15) / (1 + math.exp(-43.41 + 3.39 * 15))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n0.9994130595039192\n```\n\n\n:::\n:::\n\n\nIf the beak length is small (for example 10 mm), the probability of having a pointed beak is extremely low:\n\n\n::: {.cell}\n\n```{.python .cell-code}\nmath.exp(-43.41 + 3.39 * 10) / (1 + math.exp(-43.41 + 3.39 * 10))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n7.410155028945912e-05\n```\n\n\n:::\n:::\n\n:::\n\nWe can calculate the the probabilities for all our observed values and if we do that then we can see that the larger the beak length is, the higher the probability that a beak shape would be pointed. I'm visualising this together with the logistic curve, where the blue points are the calculated probabilities:\n\n::: {.callout-note collapse=true}\n## Code available here\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_bks %>% \n augment(type.predict = \"response\") %>% \n ggplot() +\n geom_point(aes(x = blength, y = pointed_beak)) +\n geom_line(aes(x = blength, y = .fitted),\n linetype = \"dashed\",\n colour = \"blue\") +\n geom_point(aes(x = blength, y = .fitted),\n colour = \"blue\", alpha = 0.5) +\n labs(x = \"beak length (mm)\",\n y = \"Probability\")\n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(early_finches_py) +\n geom_point(aes(x = \"blength\", y = \"pointed_beak\")) +\n geom_line(aes(x = \"blength\", y = glm_bks_py.fittedvalues),\n linetype = \"dashed\",\n colour = \"blue\") +\n geom_point(aes(x = \"blength\", y = glm_bks_py.fittedvalues),\n colour = \"blue\", alpha = 0.5) +\n labs(x = \"beak length (mm)\",\n y = \"Probability\"))\n```\n:::\n\n:::\n:::\n\n\n::: {.cell}\n::: {.cell-output-display}\n![Predicted probabilities for beak classification](glm-practical-logistic-binary_files/figure-html/fig-beak_class_glm_probs-2.png){#fig-beak_class_glm_probs width=672}\n:::\n:::\n\n\nThe graph shows us that, based on the data that we have and the model we used to make predictions about our response variable, the probability of seeing a pointed beak increases with beak length.\n\nShort beaks are more closely associated with the bluntly shaped beaks, whereas long beaks are more closely associated with the pointed shape. It's also clear that there is a range of beak lengths (around 13 mm) where the probability of getting one shape or another is much more even.\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Exercises\n\n### Diabetes {#sec-exr_diabetes}\n\n:::{.callout-exercise}\n\n\n{{< level 2 >}}\n\n\n\nFor this exercise we'll be using the data from `data/diabetes.csv`.\n\nThis is a data set comprising 768 observations of three variables (one dependent and two predictor variables). This records the results of a diabetes test result as a binary variable (1 is a positive result, 0 is a negative result), along with the result of a glucose tolerance test and the diastolic blood pressure for each of 768 women. The variables are called `test_result`, `glucose` and `diastolic`.\n\nWe want to see if the `glucose` tolerance is a meaningful predictor for predictions on a diabetes test. To investigate this, do the following:\n\n1. Load and visualise the data\n2. Create a suitable model\n3. Determine if there are any statistically significant predictors\n4. Calculate the probability of a positive diabetes test result for a glucose tolerance test value of `glucose = 150`\n\n::: {.callout-answer collapse=\"true\"}\n\n#### Load and visualise the data\n\nFirst we load the data, then we visualise it.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndiabetes <- read_csv(\"data/diabetes.csv\")\n```\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\ndiabetes_py = pd.read_csv(\"data/diabetes.csv\")\n```\n:::\n\n\n:::\n\nLooking at the data, we can see that the `test_result` column contains zeros and ones. These are yes/no test result outcomes and not actually numeric representations.\n\nWe'll have to deal with this soon. For now, we can plot the data, by outcome:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(diabetes,\n aes(x = factor(test_result),\n y = glucose)) +\n geom_boxplot()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-32-1.png){width=672}\n:::\n:::\n\n\n## Python\n\nWe could just give Python the `test_result` data directly, but then it would view the values as numeric. Which doesn't really work, because we have two groups as such: those with a negative diabetes test result, and those with a positive one.\n\nWe can force Python to temporarily covert the data to a factor, by making the `test_result` column an `object` type. We can do this directly inside the `ggplot()` function.\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(diabetes_py,\n aes(x = diabetes_py.test_result.astype(object),\n y = \"glucose\")) +\n geom_boxplot())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-33-1.png){width=614}\n:::\n:::\n\n:::\n\nIt looks as though the patients with a positive diabetes test have slightly higher glucose levels than those with a negative diabetes test.\n\nWe can visualise that differently by plotting all the data points as a classic binary response plot:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(diabetes,\n aes(x = glucose,\n y = test_result)) +\n geom_point()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-34-3.png){width=672}\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n(ggplot(diabetes_py,\n aes(x = \"glucose\",\n y = \"test_result\")) +\n geom_point())\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-binary_files/figure-html/unnamed-chunk-35-1.png){width=614}\n:::\n:::\n\n\n:::\n\n#### Create a suitable model\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nWe'll use the `glm()` function to create a generalised linear model. Here we save the model in an object called `glm_dia`:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_dia <- glm(test_result ~ glucose,\n family = binomial,\n data = diabetes)\n```\n:::\n\n\nThe format of this function is similar to that used by the `lm()` function for linear models. The important difference is that we must specify the _family_ of error distribution to use. For logistic regression we must set the family to **binomial**.\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# create a linear model\nmodel = smf.glm(formula = \"test_result ~ glucose\",\n family = sm.families.Binomial(),\n data = diabetes_py)\n# and get the fitted parameters of the model\nglm_dia_py = model.fit()\n```\n:::\n\n\n:::\n\nLet's look at the model parameters:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(glm_dia)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = test_result ~ glucose, family = binomial, data = diabetes)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) -5.611732 0.442289 -12.69 <2e-16 ***\nglucose 0.039510 0.003398 11.63 <2e-16 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 936.6 on 727 degrees of freedom\nResidual deviance: 752.2 on 726 degrees of freedom\nAIC: 756.2\n\nNumber of Fisher Scoring iterations: 4\n```\n\n\n:::\n:::\n\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nprint(glm_dia_py.summary())\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n Generalized Linear Model Regression Results \n==============================================================================\nDep. Variable: test_result No. Observations: 728\nModel: GLM Df Residuals: 726\nModel Family: Binomial Df Model: 1\nLink Function: Logit Scale: 1.0000\nMethod: IRLS Log-Likelihood: -376.10\nDate: Fri, 19 Jan 2024 Deviance: 752.20\nTime: 18:05:59 Pearson chi2: 713.\nNo. Iterations: 4 Pseudo R-squ. (CS): 0.2238\nCovariance Type: nonrobust \n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -5.6117 0.442 -12.688 0.000 -6.479 -4.745\nglucose 0.0395 0.003 11.628 0.000 0.033 0.046\n==============================================================================\n```\n\n\n:::\n:::\n\n:::\n\nWe can see that `glucose` is a significant predictor for the `test_result` (the $p$ value is much smaller than 0.05).\n\nKnowing this, we're interested in the coefficients. We have an intercept of `-5.61` and `0.0395` for `glucose`. We can use these coefficients to write a formula that describes the potential relationship between the probability of having a positive test result, dependent on the glucose tolerance level value:\n\n$$ P(positive \\ test\\ result) = \\frac{\\exp(-5.61 + 0.04 \\times glucose)}{1 + \\exp(-5.61 + 0.04 \\times glucose)} $$\n\n#### Calculating probabilities\n\nUsing the formula above, we can now calculate the probability of having a positive test result, for a given `glucose` value. If we do this for `glucose = 150`, we get the following:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nexp(-5.61 + 0.04 * 150) / (1 + exp(-5.61 + 0.04 * 145))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.6685441\n```\n\n\n:::\n:::\n\n\n## Python\n\n\n::: {.cell}\n\n```{.python .cell-code}\nmath.exp(-5.61 + 0.04 * 150) / (1 + math.exp(-5.61 + 0.04 * 145))\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n0.6685441044999503\n```\n\n\n:::\n:::\n\n:::\n\nThis tells us that the probability of having a positive diabetes test result, given a glucose tolerance level of 150 is around 67%.\n\n:::\n:::\n\n## Summary\n\n::: {.callout-tip}\n#### Key points\n\n- We use a logistic regression to model a binary response\n- We can feed new observations into the model and get probabilities for the outcome\n:::\n", "supporting": [ "glm-practical-logistic-binary_files" ], diff --git a/_freeze/materials/glm-practical-logistic-proportion/execute-results/html.json b/_freeze/materials/glm-practical-logistic-proportion/execute-results/html.json new file mode 100644 index 0000000..afe5a1f --- /dev/null +++ b/_freeze/materials/glm-practical-logistic-proportion/execute-results/html.json @@ -0,0 +1,17 @@ +{ + "hash": "afed90465b1cade91182440ceb6c776d", + "result": { + "engine": "knitr", + "markdown": "---\ntitle: \"Proportional response\"\n---\n\n::: {.cell}\n\n:::\n\n\n::: {.callout-tip}\n## Learning outcomes\n\n- How do I analyse proportion responses?\n- Be able to create a logistic model to test proportion response variables\n- Be able to plot the data and fitted curve\n- Assess the significance of the fit\n:::\n\n## Libraries and functions\n\n::: {.callout-note collapse=\"true\"}\n## Click to expand\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n### Libraries\n### Functions\n\n## Python\n\n### Libraries\n\n\n::: {.cell}\n\n```{.python .cell-code}\n# A maths library\nimport math\n# A Python data analysis and manipulation tool\nimport pandas as pd\n\n# Python equivalent of `ggplot2`\nfrom plotnine import *\n\n# Statistical models, conducting tests and statistical data exploration\nimport statsmodels.api as sm\n\n# Convenience interface for specifying models using formula strings and DataFrames\nimport statsmodels.formula.api as smf\n```\n:::\n\n\n### Functions\n:::\n:::\n\nThe example in this section uses the following data set:\n\n`data/challenger.csv`\n\nThese data, obtained from the [faraway package](https://www.rdocumentation.org/packages/faraway/versions/1.0.7), contain information related to the explosion of the USA Space Shuttle Challenger on 28 January, 1986. An investigation after the disaster traced back to certain joints on one of the two solid booster rockets, each containing O-rings that ensured no exhaust gases could escape from the booster.\n\nThe night before the launch was unusually cold, with temperatures below freezing. The final report suggested that the cold snap during the night made the o-rings stiff, and unable to adjust to changes in pressure. As a result, exhaust gases leaked away from the solid booster rockets, causing one of them to break loose and rupture the main fuel tank, leading to the final explosion.\n\nThe question we're trying to answer in this session is: based on the data from the previous flights, would it have been possible to predict the failure of most o-rings on the Challenger flight?\n\n## Load and visualise the data\n\nFirst we load the data, then we visualise it.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nchallenger <- read_csv(\"data/challenger.csv\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nRows: 23 Columns: 2\n── Column specification ────────────────────────────────────────────────────────\nDelimiter: \",\"\ndbl (2): temp, damage\n\nℹ Use `spec()` to retrieve the full column specification for this data.\nℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.\n```\n\n\n:::\n:::\n\n:::\n\nThe data set contains several columns:\n\n1. `temp`, the launch temperature in degrees Fahrenheit\n2. `damage`, the number of o-rings that showed erosion\n\nBefore we have a further look at the data, let's calculate the proportion of damaged o-rings (`prop_damaged`) and the total number of o-rings (`total`) and update our data set.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nchallenger <-\nchallenger %>%\n mutate(total = 6, # total number of o-rings\n intact = 6 - damage, # number of undamaged o-rings\n prop_damaged = damage / total) # proportion damaged o-rings\n\nchallenger\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 23 × 5\n temp damage total intact prop_damaged\n \n 1 53 5 6 1 0.833\n 2 57 1 6 5 0.167\n 3 58 1 6 5 0.167\n 4 63 1 6 5 0.167\n 5 66 0 6 6 0 \n 6 67 0 6 6 0 \n 7 67 0 6 6 0 \n 8 67 0 6 6 0 \n 9 68 0 6 6 0 \n10 69 0 6 6 0 \n# ℹ 13 more rows\n```\n\n\n:::\n:::\n\n:::\n\nPlotting the proportion of damaged o-rings against the launch temperature shows the following picture:\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(challenger, aes(x = temp, y = prop_damaged)) +\n geom_point()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-5-1.png){width=672}\n:::\n:::\n\n:::\n\nThe point on the left is the data point corresponding to the coldest flight experienced before the disaster, where five damaged o-rings were found. Fortunately, this did not result in a disaster.\n\nHere we'll explore if we could have predicted the failure of both o-rings on the Challenger flight, where the launch temperature was 31 degrees Fahrenheit.\n\n## Creating a suitable model\n\nWe only have 23 data points in total. So we're building a model on not that much data - we should keep this in mind when we draw our conclusions!\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nWe are using a logistic regression for a proportion response in this case, since we're interested in the proportion of o-rings that are damaged.\n\nWe can define this as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_chl <- glm(cbind(damage, intact) ~ temp,\n family = binomial,\n data = challenger)\n```\n:::\n\n\nDefining the relationship for proportion responses is a bit annoying, where you have to give the `glm` model a two-column matrix to specify the response variable.\n\nHere, the first column corresponds to the number of damaged o-rings, whereas the second column refers to the number of intact o-rings. We use the `cbind()` function to bind these two together into a matrix.\n\n:::\n\n## Model output\n\nThat's the easy part done! The trickier part is interpreting the output. First of all, we'll get some summary information.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nNext, we can have a closer look at the results:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(glm_chl)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = cbind(damage, intact) ~ temp, family = binomial, \n data = challenger)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) 11.66299 3.29626 3.538 0.000403 ***\ntemp -0.21623 0.05318 -4.066 4.78e-05 ***\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 38.898 on 22 degrees of freedom\nResidual deviance: 16.912 on 21 degrees of freedom\nAIC: 33.675\n\nNumber of Fisher Scoring iterations: 6\n```\n\n\n:::\n:::\n\n\nWe can see that the p-values of the `intercept` and `temp` are significant. We can also use the intercept and `temp` coefficients to construct the logistic equation, which we can use to sketch the logistic curve.\n\n:::\n\n$$E(prop \\ failed\\ orings) = \\frac{\\exp{(11.66 - 0.22 \\times temp)}}{1 + \\exp{(11.66 - 0.22 \\times temp)}}$$\n\nLet's see how well our model would have performed if we would have fed it the data from the ill-fated Challenger launch.\n\n::: {.panel-tabset group=\"language\"}\n## R\n\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(challenger, aes(temp, prop_damaged)) +\n geom_point() +\n geom_smooth(method = \"glm\", se = FALSE, fullrange = TRUE, \n method.args = list(family = binomial)) +\n xlim(25,85)\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in eval(family$initialize): non-integer #successes in a binomial glm!\n```\n\n\n:::\n\n::: {.cell-output-display}\n![](glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-8-1.png){width=672}\n:::\n:::\n\n\n::: {.callout-note collapse=true}\n## Generating predicted values\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nAnother way of doing this it to generate a table with data for a range of temperatures, from 25 to 85 degrees Fahrenheit, in steps of 1. We can then use these data to generate the logistic curve, based on the fitted model.\n\n\n::: {.cell}\n\n```{.r .cell-code}\n# create a table with sequential numbers ranging from 25 to 85\nmodel <- tibble(temp = seq(25, 85, by = 1)) %>% \n # add a new column containing the predicted values\n mutate(.pred = predict(glm_chl, newdata = ., type = \"response\"))\n\nggplot(model, aes(temp, .pred)) +\n geom_line()\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-9-1.png){width=672}\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\n# plot the curve and the original data\nggplot(model, aes(temp, .pred)) +\n geom_line(colour = \"blue\") +\n geom_point(data = challenger, aes(temp, prop_damaged)) +\n # add a vertical line at the disaster launch temperature\n geom_vline(xintercept = 31, linetype = \"dashed\")\n```\n\n::: {.cell-output-display}\n![](glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-10-1.png){width=672}\n:::\n:::\n\n\nIt seems that there was a high probability of both o-rings failing at that launch temperature. One thing that the graph shows is that there is a lot of uncertainty involved in this model. We can tell, because the fit of the line is very poor at the lower temperature range. There is just very little data to work on, with the data point at 53 F having a large influence on the fit.\n:::\n:::\n:::\n\n## Exercises\n\n### Predicting failure {#sec-exr_failure}\n\n:::{.callout-exercise}\n\n\n{{< level 2 >}}\n\n\n\nThe data point at 53 degrees Fahrenheit is quite influential for the analysis. Remove this data point and repeat the analysis. Is there still a predicted link between launch temperature and o-ring failure?\n\n::: {.callout-answer collapse=\"true\"}\n\n::: {.panel-tabset group=\"language\"}\n## R\n\nFirst, we need to remove the influential data point:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nchallenger_new <- challenger %>% filter(temp != 53)\n```\n:::\n\n\nWe can create a new generalised linear model, based on these data:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nglm_chl_new <- glm(cbind(damage, intact) ~ temp,\n family = binomial,\n data = challenger_new)\n```\n:::\n\n\nWe can get the model parameters as follows:\n\n\n::: {.cell}\n\n```{.r .cell-code}\nsummary(glm_chl_new)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n\nCall:\nglm(formula = cbind(damage, intact) ~ temp, family = binomial, \n data = challenger_new)\n\nCoefficients:\n Estimate Std. Error z value Pr(>|z|) \n(Intercept) 5.68223 4.43138 1.282 0.1997 \ntemp -0.12817 0.06697 -1.914 0.0556 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n(Dispersion parameter for binomial family taken to be 1)\n\n Null deviance: 16.375 on 21 degrees of freedom\nResidual deviance: 12.633 on 20 degrees of freedom\nAIC: 27.572\n\nNumber of Fisher Scoring iterations: 5\n```\n\n\n:::\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nggplot(challenger_new, aes(temp, prop_damaged)) +\n geom_point() +\n geom_smooth(method = \"glm\", se = FALSE, fullrange = TRUE, \n method.args = list(family = binomial)) +\n xlim(25,85) +\n # add a vertical line at 53 F temperature\n geom_vline(xintercept = 53, linetype = \"dashed\")\n```\n\n::: {.cell-output .cell-output-stderr}\n\n```\nWarning in eval(family$initialize): non-integer #successes in a binomial glm!\n```\n\n\n:::\n\n::: {.cell-output-display}\n![](glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-14-1.png){width=672}\n:::\n:::\n\n\nThe prediction proportion of damaged o-rings is markedly less than what was observed.\n\nBefore we can make any firm conclusions, though, we need to check our model:\n\n\n::: {.cell}\n\n```{.r .cell-code}\n1- pchisq(12.633,20)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.8925695\n```\n\n\n:::\n:::\n\n\nWe get quite a high score (around 0.9) for this, which tells us that our goodness of fit is pretty good – our points are quite close to our curve, overall.\n\nIs the model any better than the null though?\n\n\n::: {.cell}\n\n```{.r .cell-code}\n1 - pchisq(16.375 - 12.633, 1)\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n[1] 0.0530609\n```\n\n\n:::\n\n```{.r .cell-code}\nanova(glm_chl_new, test = 'Chisq')\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\nAnalysis of Deviance Table\n\nModel: binomial, link: logit\n\nResponse: cbind(damage, intact)\n\nTerms added sequentially (first to last)\n\n Df Deviance Resid. Df Resid. Dev Pr(>Chi) \nNULL 21 16.375 \ntemp 1 3.7421 20 12.633 0.05306 .\n---\nSignif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n```\n\n\n:::\n:::\n\n\nHowever, the model is not significantly better than the null in this case, with a p-value here of just over 0.05 for both of these tests (they give a similar result since, yet again, we have just the one predictor variable).\n:::\n\nSo, could NASA have predicted what happened? This model is not significantly different from the null, i.e., temperature is not a significant predictor. Note that it’s only marginally non-significant, and we do have a high goodness-of-fit value.\n\nIt is possible that if more data points were available that followed a similar trend, the story might be different). Even if we did use our non-significant model to make a prediction, it doesn’t give us a value anywhere near 5 failures for a temperature of 53 degrees Fahrenheit. So overall, based on the model we’ve fitted with these data, there was no indication that a temperature just a few degrees cooler than previous missions could have been so disastrous for the Challenger.\n:::\n:::\n\n## Summary\n\n::: {.callout-tip}\n#### Key points\n\n- We can use a logistic model for proportion response variables\n\n:::\n", + "supporting": [ + "glm-practical-logistic-proportion_files" + ], + "filters": [ + "rmarkdown/pagebreak.lua" + ], + "includes": {}, + "engineDependencies": {}, + "preserve": {}, + "postProcess": true + } +} \ No newline at end of file diff --git a/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-10-1.png b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-10-1.png new file mode 100644 index 0000000..9b3e566 Binary files /dev/null and b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-10-1.png differ diff --git a/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-14-1.png b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-14-1.png new file mode 100644 index 0000000..5ea53a3 Binary files /dev/null and b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-14-1.png differ diff --git a/materials/glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-4-1.png b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-4-1.png similarity index 100% rename from materials/glm-practical-logistic-proportion_files/figure-html/unnamed-chunk-4-1.png rename to _freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-4-1.png diff --git a/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-5-1.png b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-5-1.png new file mode 100644 index 0000000..cd17d72 Binary files /dev/null and b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-5-1.png differ diff --git a/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-8-1.png b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-8-1.png new file mode 100644 index 0000000..7c097f1 Binary files /dev/null and b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-8-1.png differ diff --git a/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-9-1.png b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-9-1.png new file mode 100644 index 0000000..101c3bb Binary files /dev/null and b/_freeze/materials/glm-practical-logistic-proportion/figure-html/unnamed-chunk-9-1.png differ diff --git a/_quarto.yml b/_quarto.yml index eb2f396..0c1ef0f 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -16,6 +16,9 @@ project: format: courseformat-html +filters: + - courseformat + bibliography: references.bib execute: diff --git a/_site/index.html b/_site/index.html index 579734c..037a579 100644 --- a/_site/index.html +++ b/_site/index.html @@ -2,7 +2,7 @@ - + @@ -202,6 +202,12 @@ 7  Binary response + + @@ -439,7 +445,12 @@

Core aims

if (window.Quarto?.typesetMath) { window.Quarto.typesetMath(note); } - return note.innerHTML; + // TODO in 1.5, we should make sure this works without a callout special case + if (note.classList.contains("callout")) { + return note.outerHTML; + } else { + return note.innerHTML; + } } } for (var i=0; i - + 7  Binary response