From 0ae212394735f46dfd773b35201536dd42e37743 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 8 May 2021 00:17:25 +0200 Subject: [PATCH] Bugfix release v1.0.1 Fixes #8 --- DESCRIPTION | 2 +- R/clean_data.R | 10 +++++----- README.md | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 25fdf6e..ed70f77 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: OpenPoseR Type: Package Title: Analyze Motion-Tracking Data Derived from Video Files Using OpenPose -Version: 1.0 +Version: 1.0.1 Authors@R: c( person("Patrick C.", "Trettenbrein", email = "trettenbrein@cbs.mpg.de", role = c("aut", "cre")), person("Emiliano", "Zaccarella", email = "zaccarella@cbs.mpg.de", role = "aut")) diff --git a/R/clean_data.R b/R/clean_data.R index c7275ca..b4d0a51 100644 --- a/R/clean_data.R +++ b/R/clean_data.R @@ -47,13 +47,13 @@ clean_data <- function(data, model, cutoff = .1) { for(c in 1:ncol(data_points)) { # We do not need to continue if nothing was tracked, i.e. all data is 0, # or if the first few values are 0 (i.e. point wasn't detected) - # If points were not detecetd initially the whole column will be set to 0 + # If points were not detected initially the whole column will be set to 0 if(!all(data_points[c]==0) && data_points[1,c]!=0 && data_points[2,c]!=0 && data_points[3,c]!=0) { for(r in 2:nrow(data_points)-1) { # Check what to do with 0 values - if(data_points[r,c]==0 && data_points[r-1,c]!=0) { - # If point wasn't dedected (position = 0), compute mean from +-1 frame + if(!is.na(data_points[r,c]) && !is.na(data_points[r-1,c]) && data_points[r,c]==0 && data_points[r-1,c]!=0) { + # If point wasn't detected (position = 0), compute mean from +-1 frame # If there is more than one consecutive non-zero points, use the next # tracked point instead and fill the gap with means. For the final # point (last row) in a data frame don't do this but use the value of @@ -61,7 +61,7 @@ clean_data <- function(data, model, cutoff = .1) { # imputing the missing data at the end of a data frame with the value # of the last detected point. - # Last non-zero value should alwys be row above (-1) + # Last non-zero value should always be row above (-1) last_non_zero <- -1 # Get next non-zero value @@ -84,7 +84,7 @@ clean_data <- function(data, model, cutoff = .1) { } } } else { - # Because the points were not detected intially, set them to 0 + # Because the points were not detected initially, set them to 0 data_points[c] <- 0 } } diff --git a/README.md b/README.md index 1cccd3f..cf82c33 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ # OpenPoseR An [R](https://www.r-project.org) package that provides functions for analyzing motion-tracking data derived from video files using [OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose). -The original motivation for creating this package was to control video stimuli in sign language and gesture reserach, but the provided functionality may also be useful for other purposes. +The original motivation for creating this package was to control video stimuli in sign language and gesture research, but the provided functionality may also be useful for other purposes. ## What is this? OpenPoseR can be used to analyze motion-tracking data derived from video files using [OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose). In other words, OpenPoseR does *not* provide any motion-tracking capabilities by itself. You will need to install and run OpenPose on your system first to perform the actual motion tracking analysis. Then, the OpenPoseR package provides a variety of R functions that can be used to analyse the output generated by OpenPose. -[OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) is currently the most sophisticated means for tracking people in video clips. The results of motion-tracking of people in video clips with OpenPose can be used for further quantitiatve analysis that allows for quantification of movement paramters which are relevant to researchers working on sign language and gesture where bodily movements a person take on linguistic and/or discourse functions. +[OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) is currently the most sophisticated means for tracking people in video clips. The results of motion-tracking of people in video clips with OpenPose can be used for further quantitative analysis that allows for quantification of movement parameters which are relevant to researchers working on sign language and gesture where bodily movements a person take on linguistic and/or discourse functions. [Example video](demo/data/psychologie.mp4) | [Example video with fit body-pose model](doc/examples_body25/psychologie_body25.mp4) :-------------------------:|:-------------------------: @@ -33,7 +33,7 @@ For details on what OpenPoseR can (and can't) do, respectively, how you can use ## Installation -For now, OpenPoseR (current version: 1.0) can be installed using the following commands (you will need to have the ``devtools`` package installed): +For now, OpenPoseR (current version: 1.0.1) can be installed using the following commands (you will need to have the ``devtools`` package installed): ```r # Install devtools from CRAN (if not already installed)