diff --git a/.all-contributorsrc b/.all-contributorsrc
index 5912b36ba..ba8816707 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -649,6 +649,16 @@
"ideas",
"example"
]
+ },
+ {
+ "login": "w3cj",
+ "name": "CJ R.",
+ "avatar_url": "https://avatars0.githubusercontent.com/u/14241866?v=4",
+ "profile": "https://coding.garden",
+ "contributions": [
+ "doc",
+ "content"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index df592d8d5..ea8fd1b3c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -249,7 +249,7 @@ Weβre still rolling out all of our unit tests, but if you want to contribute t
```npm run test:single```
- To run a test on a single model
- ```npm run test -- --model:YourModelNameHere```
+ ```npm run test -- --model=YourModelNameHere```
This last one is case sensitive!
diff --git a/README.md b/README.md
index 2629983ae..90abc93d1 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# ![ml5](https://user-images.githubusercontent.com/10605821/41332516-2ee26714-6eac-11e8-83e4-a40b8761e764.png)
-[![All Contributors](https://img.shields.io/badge/all_contributors-60-orange.svg?style=flat-square)](#contributors)
+[![All Contributors](https://img.shields.io/badge/all_contributors-61-orange.svg?style=flat-square)](#contributors)
[![BrowserStack Status](https://www.browserstack.com/automate/badge.svg?badge_key=QVNDdlkvMzNYSmhRRWlweXlIOTBENkd0MDBCOUJlbmFVZFRneFIzNlh4az0tLXA4S0loSGNlVUc2V2I3cVdLdXBKdGc9PQ==--8a5e5bfd3eafbba0702c02ec57ffec9d627a78ef)](https://www.browserstack.com/automate/public-build/QVNDdlkvMzNYSmhRRWlweXlIOTBENkd0MDBCOUJlbmFVZFRneFIzNlh4az0tLXA4S0loSGNlVUc2V2I3cVdLdXBKdGc9PQ==--8a5e5bfd3eafbba0702c02ec57ffec9d627a78ef)[![Version](https://img.shields.io/npm/v/ml5.svg?style=flat-square)](https://www.npmjs.com/package/ml5)
@@ -26,19 +26,21 @@ There are several ways you can use the ml5.js library:
+
-* You can use the latest version (0.4.0) by adding it to the head section of your HTML document:
+* You can use the latest version (0.4.1) by adding it to the head section of your HTML document:
-**v0.4.0**
+**v0.4.1**
-
+
+
* If you need to use an earlier version for any reason, you can change the version number. The [previous versions of ml5 can be found here](https://www.npmjs.com/package/ml5). You can use those previous versions by replacing `` with the ml5 version of interest:
@@ -49,7 +51,7 @@ There are several ways you can use the ml5.js library:
For example:
```js
-
+
```
* You can also reference "latest", but we do not recommend this as your code may break as we update ml5.
@@ -170,6 +172,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
![Emma Goodliffe](https://avatars1.githubusercontent.com/u/32788926?v=4) Emma Goodliffe π€ π¬ π§ |
![Yang](https://avatars3.githubusercontent.com/u/9332910?v=4) Yang π» π |
![Lydia Jessup](https://avatars3.githubusercontent.com/u/26204298?v=4) Lydia Jessup π» π€ π‘ |
+ ![CJ R.](https://avatars0.githubusercontent.com/u/14241866?v=4) CJ R. π π |
diff --git a/docs/README.md b/docs/README.md
index a860570a9..ef1ae3900 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -19,13 +19,13 @@ The fastest way to get started exploring the creative possibilities of ml5.js ar
3. You can also copy and paste the cdn link to the ml5 library here:
```
-
+
```
***
#### Quickstart: Plain JavaScript
-Reference the [latest version](https://unpkg.com/ml5@0.4.0/dist/ml5.min.js) of ml5.js using a script tag in an HTML file as below:
+Reference the [latest version](https://unpkg.com/ml5@0.4.1/dist/ml5.min.js) of ml5.js using a script tag in an HTML file as below:
In an **index.html** file, copy and paste the following and open up that file in your web browser.
@@ -34,15 +34,15 @@ In an **index.html** file, copy and paste the following and open up that file in
- Getting Started with ml5.js and p5.js
+ Getting Started with ml5.js
-
+
@@ -71,7 +71,7 @@ In an **index.html** file, copy and paste the following and open up that file in
-
+
diff --git a/docs/_sidebar.md b/docs/_sidebar.md
index 48f37dc7d..745d730ca 100644
--- a/docs/_sidebar.md
+++ b/docs/_sidebar.md
@@ -52,7 +52,6 @@
* [CharRNN](/reference/charrnn.md)
* [Sentiment](/reference/sentiment.md)
* [Word2Vec](/reference/word2vec.md)
- * [SketchRNN](/reference/sketchrnn.md)
* **Contributing** π
@@ -62,4 +61,4 @@
* Style Guide
* [Reference guidelines](/styleguide/reference-guidelines.md)
* [Development guidelines](/styleguide/development-guidelines.md)
- * [Design guidelines](/styleguide/design-guidelines.md)
\ No newline at end of file
+ * [Design guidelines](/styleguide/design-guidelines.md)
diff --git a/docs/reference/bodypix.md b/docs/reference/bodypix.md
index 134907a86..c34bf74e9 100644
--- a/docs/reference/bodypix.md
+++ b/docs/reference/bodypix.md
@@ -234,7 +234,10 @@ bodyPix.segmentWithParts(?input, ?options, callback)
* [BodyPix_p5Instance](https://github.com/ml5js/ml5-examples/tree/development/p5js/BodyPix/BodyPix_p5Instance)
**p5 web editor**
-* none yet
+* [BodyPix_Image](https://editor.p5js.org/ml5/sketches/BodyPix_Image)
+* [BodyPix_Webcam](https://editor.p5js.org/ml5/sketches/BodyPix_Webcam)
+* [BodyPix_Webcam_Parts](https://editor.p5js.org/ml5/sketches/BodyPix_Webcam_Parts)
+* [BodyPix_p5Instance](https://editor.p5js.org/ml5/sketches/BodyPix_p5Instance)
**plain javascript**
* [BodyPix_Image](https://github.com/ml5js/ml5-examples/tree/development/javascript/BodyPix/BodyPix_Image)
diff --git a/docs/reference/charrnn.md b/docs/reference/charrnn.md
index 7e9535a56..59478d5c9 100644
--- a/docs/reference/charrnn.md
+++ b/docs/reference/charrnn.md
@@ -159,9 +159,9 @@ charrnn.reset()
**p5 web editor**
-* [CharRNN_Interactive](https://editor.p5js.org/ml5/sketches/u7FVGffzX)
-* [CharRNN_Text](https://editor.p5js.org/ml5/sketches/fAzNrafbpa)
-* [CharRNN_Text_Stateful](https://editor.p5js.org/ml5/sketches/AmPtRcgoX7)
+* [CharRNN_Interactive](https://editor.p5js.org/ml5/sketches/CharRNN_Interactive)
+* [CharRNN_Text](https://editor.p5js.org/ml5/sketches/CharRNN_Text)
+* [CharRNN_Text_Stateful](https://editor.p5js.org/ml5/sketches/CharRNN_Text_Stateful)
**plain javascript**
* [CharRNN_Interactive](https://github.com/ml5js/ml5-examples/tree/development/javascript/CharRNN/CharRNN_Interactive)
diff --git a/docs/reference/cvae.md b/docs/reference/cvae.md
index ff32601a7..7514e5f5e 100644
--- a/docs/reference/cvae.md
+++ b/docs/reference/cvae.md
@@ -88,7 +88,7 @@ cvae.generate(label, callback);
* [CVAE_QuickDraw](https://github.com/ml5js/ml5-examples/tree/development/p5js/CVAE/CVAE_QuickDraw)
**p5 web editor**
-* [CVAE_QuickDraw]()
+* [CVAE_QuickDraw](https://editor.p5js.org/ml5/sketches/CVAE_QuickDraw)
**plain javascript**
* [CVAE_QuickDraw](https://github.com/ml5js/ml5-examples/tree/development/javascript/CVAE/CVAE_QuickDraw)
diff --git a/docs/reference/dcgan.md b/docs/reference/dcgan.md
index 033edfa80..0f907513e 100644
--- a/docs/reference/dcgan.md
+++ b/docs/reference/dcgan.md
@@ -101,13 +101,6 @@ dcgan.generate(callback, ?latentVector);
## Examples
-**plain javascript**
-* [DCGAN_LatentVector](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector)
-* [DCGAN_LatentVector_RandomWalk](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector_RandomWalk)
-* [DCGAN_LatentVector_Slider](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector_Slider)
-* [DCGAN_Random](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_Random)
-
-
**p5.js**
* [DCGAN_LatentVector](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_LatentVector)
* [DCGAN_LatentVector_RandomWalk](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_LatentVector_RandomWalk)
@@ -115,10 +108,19 @@ dcgan.generate(callback, ?latentVector);
* [DCGAN_Random](https://github.com/ml5js/ml5-examples/tree/development/p5js/DCGAN/DCGAN_Random)
**p5 web editor**
-* [DCGAN_LatentVector]()
-* [DCGAN_LatentVector_RandomWalk]()
-* [DCGAN_LatentVector_Slider]()
-* [DCGAN_Random]()
+* [DCGAN_LatentVector](https://editor.p5js.org/ml5/sketches/DCGAN_LatentVector)
+* [DCGAN_LatentVector_RandomWalk](https://editor.p5js.org/ml5/sketches/DCGAN_LatentVector_RandomWalk)
+* [DCGAN_LatentVector_Slider](https://editor.p5js.org/ml5/sketches/DCGAN_LatentVector_Slider)
+* [DCGAN_Random](https://editor.p5js.org/ml5/sketches/DCGAN_Random)
+
+
+**plain javascript**
+* [DCGAN_LatentVector](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector)
+* [DCGAN_LatentVector_RandomWalk](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector_RandomWalk)
+* [DCGAN_LatentVector_Slider](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_LatentVector_Slider)
+* [DCGAN_Random](https://github.com/ml5js/ml5-examples/tree/development/javascript/DCGAN/DCGAN_Random)
+
+
## Demo
diff --git a/docs/reference/face-api.md b/docs/reference/face-api.md
index d63e87f57..a07ceb6df 100644
--- a/docs/reference/face-api.md
+++ b/docs/reference/face-api.md
@@ -147,8 +147,8 @@ faceapi.detectSingle(optionsOrCallback, configOrCallback, cb)
* [FaceApi_Video_Landmarks_LocalModels](https://github.com/ml5js/ml5-examples/tree/development/p5js/FaceApi/FaceApi_Video_Landmarks_LocalModels)
**p5 web editor**
-* [FaceApi_Image_Landmarks]() - coming soon
-* [FaceApi_Video_Landmarks]() - coming soon
+* [FaceApi_Image_Landmarks](https://editor.p5js.org/ml5/sketches/FaceApi_Image_Landmarks)
+* [FaceApi_Video_Landmarks](https://editor.p5js.org/ml5/sketches/FaceApi_Video_Landmarks)
**plain javascript**
* [FaceApi_Image_Landmarks](https://github.com/ml5js/ml5-examples/tree/development/javascript/FaceApi/FaceApi_Image_Landmarks/)
diff --git a/docs/reference/feature-extractor.md b/docs/reference/feature-extractor.md
index f1a33a8c8..ebb5c2af7 100644
--- a/docs/reference/feature-extractor.md
+++ b/docs/reference/feature-extractor.md
@@ -225,8 +225,9 @@ featureExtractor.predict(input, ?callback)
* [FeatureExtractor_Image_Classification](https://github.com/ml5js/ml5-examples/tree/development/p5js/FeatureExtractor/FeatureExtractor_Image_Classification)
**p5 web editor**
-* [FeatureExtractor_Image_Classification](https://editor.p5js.org/ml5/sketches/4AWAKkeZBx)
-* [FeatureExtractor_Image_Regression](https://editor.p5js.org/ml5/sketches/AAA54W1ajd)
+* [FeatureExtractor_Image_Regression](https://editor.p5js.org/ml5/sketches/FeatureExtractor_Image_Classification)
+* [FeatureExtractor_Image_Classification](https://editor.p5js.org/ml5/sketches/FeatureExtractor_Image_Regression)
+
**plain javascript**
* [FeatureExtractor_Image_Regression](https://github.com/ml5js/ml5-examples/tree/development/javascript/FeatureExtractor/FeatureExtractor_Image_Regression)
diff --git a/docs/reference/image-classifier.md b/docs/reference/image-classifier.md
index 77a7b27f3..3f986ec82 100644
--- a/docs/reference/image-classifier.md
+++ b/docs/reference/image-classifier.md
@@ -137,14 +137,15 @@ classifier.classify(?numberOfClasses ,?callback)
**p5.js Web Editor**
-* [ImageClassification](https://editor.p5js.org/ml5/sketches/DUxe1Z0DXG)
-* [ImageClassification_DoodleNet_Canvas]() - coming soon
-* [ImageClassification_DoodleNet_Video]() - coming soon
-* [ImageClassification_MultipleImages](https://editor.p5js.org/ml5/sketches/f3rqIqNey5)
-* [ImageClassification_Teachable-Machine]() - coming soon
-* [ImageClassification_Video](https://editor.p5js.org/ml5/sketches/IlF1JFvWjc)
-* [ImageClassification_VideoScavengerHunt](https://editor.p5js.org/ml5/sketches/APzpeXOuEQ)
-* [ImageClassification_VideoSound](https://editor.p5js.org/ml5/sketches/Ry7EL4JvA3)
+* [ImageClassification](https://editor.p5js.org/ml5/sketches/ImageClassification)
+* [ImageClassification_DoodleNet_Canvas](https://editor.p5js.org/ml5/sketches/ImageClassification_DoodleNet_Canvas)
+* [ImageClassification_DoodleNet_Video](https://editor.p5js.org/ml5/sketches/ImageClassification_DoodleNet_Video)
+* [ImageClassification_MultipleImages](https://editor.p5js.org/ml5/sketches/ImageClassification_MultipleImages)
+* [ImageClassification_Teachable-Machine](https://editor.p5js.org/ml5/sketches/ImageClassification_Teachable-Machine)
+* [ImageClassification_Video](https://editor.p5js.org/ml5/sketches/ImageClassification_Video)
+* [ImageClassification_VideoScavengerHunt](https://editor.p5js.org/ml5/sketches/ImageClassification_VideoScavengerHunt)
+* [ImageClassification_VideoSound](https://editor.p5js.org/ml5/sketches/ImageClassification_VideoSound)
+* [ImageClassification_VideoSoundTranslate](https://editor.p5js.org/ml5/sketches/ImageClassification_VideoSoundTranslate)
**plain javascript**
* [ImageClassification](https://github.com/ml5js/ml5-examples/tree/development/javascript/ImageClassification/ImageClassification)
diff --git a/docs/reference/kmeans.md b/docs/reference/kmeans.md
index 5f7677151..056dcd4ea 100644
--- a/docs/reference/kmeans.md
+++ b/docs/reference/kmeans.md
@@ -93,7 +93,7 @@ const kmeans = ml5.kmeans(data, ?options, ?callback);
* [KMeans_imageSegmentation](https://github.com/ml5js/ml5-examples/tree/development/p5js/KMeans/KMeans_imageSegmentation/)
**p5 web editor**
-* coming soon
+* [KMeans_imageSegmentation](https://editor.p5js.org/ml5/sketches/KMeans_imageSegmentation/)
**plain javascript**
* coming soon
diff --git a/docs/reference/knn-classifier.md b/docs/reference/knn-classifier.md
index c9a968462..0dc2628a5 100644
--- a/docs/reference/knn-classifier.md
+++ b/docs/reference/knn-classifier.md
@@ -233,10 +233,10 @@ knnClassifier.load(path, callback?)
* [KNNClassification_VideoSquare](https://github.com/ml5js/ml5-examples/tree/development/p5js/KNNClassification/KNNClassification_VideoSquare)
**p5 web editor**
-* [KNNClassification_PoseNet](https://editor.p5js.org/ml5/sketches/4c7Fgs7baz)
-* [KNNClassification_Video](https://editor.p5js.org/ml5/sketches/ZIDZyjW5LM)
-* [KNNClassification_VideoSound](https://editor.p5js.org/ml5/sketches/It5-jyEY2R)
-* [KNNClassification_VideoSquare](https://editor.p5js.org/ml5/sketches/S3JfKStZ8I)
+* [KNNClassification_PoseNet](https://editor.p5js.org/ml5/sketches/KNNClassification_PoseNet)
+* [KNNClassification_Video](https://editor.p5js.org/ml5/sketches/KNNClassification_Video)
+* [KNNClassification_VideoSound](https://editor.p5js.org/ml5/sketches/It5-KNNClassification_VideoSound)
+* [KNNClassification_VideoSquare](https://editor.p5js.org/ml5/sketches/KNNClassification_VideoSquare)
**plain javascript**
diff --git a/docs/reference/neural-network.md b/docs/reference/neural-network.md
index b257adbab..0e795473b 100644
--- a/docs/reference/neural-network.md
+++ b/docs/reference/neural-network.md
@@ -454,7 +454,21 @@ neuralNetwork.load(?filesOrPath, ?callback)
- [NeuralNetwork_xy_classifier](https://github.com/ml5js/ml5-examples/tree/development/p5js/NeuralNetwork/NeuralNetwork_xy_classifier)
**p5 web editor**
-* coming soon
+- [NeuralNetwork_Simple-Classification](https://editor.p5js.org/ml5/sketches/NeuralNetwork_Simple-Classification)
+- [NeuralNetwork_Simple-Regression](https://editor.p5js.org/ml5/sketches/NeuralNetwork_Simple-Regression)
+- [NeuralNetwork_XOR](https://editor.p5js.org/ml5/sketches/NeuralNetwork_XOR)
+- [NeuralNetwork_basics](https://editor.p5js.org/ml5/sketches/NeuralNetwork_basics)
+- [NeuralNetwork_co2net](https://editor.p5js.org/ml5/sketches/NeuralNetwork_co2net)
+- [NeuralNetwork_color_classifier](https://editor.p5js.org/ml5/sketches/NeuralNetwork_color_classifier)
+- [NeuralNetwork_load_model](https://editor.p5js.org/ml5/sketches/NeuralNetwork_load_model)
+- [NeuralNetwork_load_saved_data](https://editor.p5js.org/ml5/sketches/NeuralNetwork_load_saved_data)
+- [NeuralNetwork_lowres_pixels](https://editor.p5js.org/ml5/sketches/NeuralNetwork_lowres_pixels)
+- [NeuralNetwork_multiple-layers](https://https://editor.p5js.org/ml5/sketches/NeuralNetwork_multiple-layer)
+- [NeuralNetwork_musical_face](https://editor.p5js.org/ml5/sketches/NeuralNetwork_musical_face)
+- [NeuralNetwork_musical_mouse](https://editor.p5js.org/ml5/sketches/NeuralNetwork_musical_mouse)
+- [NeuralNetwork_pose_classifier](https://editor.p5js.org/ml5/sketches/NeuralNetwork_pose_classifier)
+- [NeuralNetwork_titanic](https://editor.p5js.org/ml5/sketches/NeuralNetwork_titanic)
+- [NeuralNetwork_xy_classifier](https://editor.p5js.org/ml5/sketches/NeuralNetwork_xy_classifier)
**plain javascript**
diff --git a/docs/reference/pitch-detection.md b/docs/reference/pitch-detection.md
index ee24954ea..45c80d50b 100644
--- a/docs/reference/pitch-detection.md
+++ b/docs/reference/pitch-detection.md
@@ -109,9 +109,9 @@ detector.getPitch(?callback)
* [PitchDetection_Piano](https://github.com/ml5js/ml5-examples/tree/development/p5js/PitchDetection/PitchDetection_Piano)
**p5 web editor**
-* [PitchDetection](https://editor.p5js.org/ml5/sketches/RmX1EsBzKi)
-* [PitchDetection_Game](https://editor.p5js.org/ml5/sketches/kQ-qvPySiw-)
-* [PitchDetection_Piano](https://editor.p5js.org/ml5/sketches/fzc0dAdUpMs)
+* [PitchDetection](https://editor.p5js.org/ml5/sketches/PitchDetection)
+* [PitchDetection_Game](https://editor.p5js.org/ml5/sketches/PitchDetection_Game)
+* [PitchDetection_Piano](https://editor.p5js.org/ml5/sketches/PitchDetection_Piano)
**plain javascript**
* [PitchDetection](https://github.com/ml5js/ml5-examples/tree/development/javascript/PitchDetection/PitchDetection)
diff --git a/docs/reference/pix2pix.md b/docs/reference/pix2pix.md
index 64716d2a0..e2d411456 100644
--- a/docs/reference/pix2pix.md
+++ b/docs/reference/pix2pix.md
@@ -87,8 +87,8 @@ styleTransfer.transfer(canvas, ?callback)
* [Pix2Pix_promise](https://github.com/ml5js/ml5-examples/tree/development/p5js/Pix2Pix/Pix2Pix_promise)
**p5 web editor**
-* [Pix2Pix_callback](https://editor.p5js.org/ml5/sketches/WIvXUJj5fz9)
-* [Pix2Pix_promise](https://editor.p5js.org/ml5/sketches/6TX98ozmRf-)
+* [Pix2Pix_callback](https://editor.p5js.org/ml5/sketches/Pix2Pix_callback)
+* [Pix2Pix_promise](https://editor.p5js.org/ml5/sketches/Pix2Pix_promise)
**plain javascript**
* [Pix2Pix_callback](https://github.com/ml5js/ml5-examples/tree/development/javascript/Pix2Pix/Pix2Pix_callback)
diff --git a/docs/reference/posenet.md b/docs/reference/posenet.md
index aaff83daa..f98c6485b 100644
--- a/docs/reference/posenet.md
+++ b/docs/reference/posenet.md
@@ -247,9 +247,9 @@ poseNet.multiPose(?input)
* [PoseNet_webcam](https://github.com/ml5js/ml5-examples/tree/development/p5js/PoseNet/PoseNet_webcam)
**p5 web editor**
-* [PoseNet_image_single](https://editor.p5js.org/ml5/sketches/MmUoz2_thEa)
-* [PoseNet_part_selection](https://editor.p5js.org/ml5/sketches/I-alLCVhX3S)
-* [PoseNet_webcam](https://editor.p5js.org/ml5/sketches/5FZeotHo76R)
+* [PoseNet_image_single](https://editor.p5js.org/ml5/sketches/PoseNet_image_single)
+* [PoseNet_part_selection](https://editor.p5js.org/ml5/sketches/PoseNet_part_selection)
+* [PoseNet_webcam](https://editor.p5js.org/ml5/sketches/PoseNet_webcam)
**plain javascript**
* [PoseNet_image_single](https://github.com/ml5js/ml5-examples/tree/development/javascript/PoseNet/PoseNet_image_single)
diff --git a/docs/reference/sentiment.md b/docs/reference/sentiment.md
index 265a7d6c6..ddf702a3c 100644
--- a/docs/reference/sentiment.md
+++ b/docs/reference/sentiment.md
@@ -83,7 +83,7 @@ sentiment.predict(text)
* [Sentiment_Interactive](https://github.com/ml5js/ml5-examples/tree/development/p5js/Sentiment/Sentiment_Interactive)
**p5 web editor**
-* [Sentiment_Interactive]() - coming soon
+* [Sentiment_Interactive](https://editor.p5js.org/ml5/sketches/Sentiment_Interactive)
**plain javascript**
* [Sentiment_Interactive](https://github.com/ml5js/ml5-examples/tree/development/javascript/Sentiment/Sentiment_Interactive)
diff --git a/docs/reference/sketchrnn.md b/docs/reference/sketchrnn.md
index f335249d8..fc075d801 100644
--- a/docs/reference/sketchrnn.md
+++ b/docs/reference/sketchrnn.md
@@ -111,8 +111,8 @@ sketchrnn.generate(?seed, ?options, ?callback)
* [SketchRNN_interactive](https://github.com/ml5js/ml5-examples/tree/development/p5js/SketchRNN/SketchRNN_interactive)
**p5 web editor**
-* [SketchRNN_basic](https://editor.p5js.org/ml5/sketches/vSQRE1Sl7F_)
-* [SketchRNN_interactive](https://editor.p5js.org/ml5/sketches/uk4JsSRQgIY)
+* [SketchRNN_basic](https://editor.p5js.org/ml5/sketches/SketchRNN_basic)
+* [SketchRNN_interactive](https://editor.p5js.org/ml5/sketches/SketchRNN_interactive)
**plain javascript**
* [SketchRNN_basic](https://github.com/ml5js/ml5-examples/tree/development/javascript/SketchRNN/_basic)
diff --git a/docs/reference/sound-classifier.md b/docs/reference/sound-classifier.md
index 026a288a5..741a553f4 100644
--- a/docs/reference/sound-classifier.md
+++ b/docs/reference/sound-classifier.md
@@ -100,8 +100,8 @@ soundclassifier.classify(callback);
* [SoundClassification_speechcommand_load](https://github.com/ml5js/ml5-examples/tree/development/p5js/SoundClassification/SoundClassification_speechcommand_load)
**p5 web editor**
-* [SoundClassification_speechcommand]() - coming soon
-* [SoundClassification_speechcommand_load]() - coming soon
+* [SoundClassification_speechcommand](https://editor.p5js.org/ml5/sketches/SoundClassification_speechcommand)
+* [SoundClassification_speechcommand_load](https://editor.p5js.org/ml5/sketches/SoundClassification_speechcommand_load)
**plain javascript**
* [SoundClassification_speechcommand](https://github.com/ml5js/ml5-examples/tree/development/javascript/SoundClassification/SoundClassification_speechcommand)
diff --git a/docs/reference/style-transfer.md b/docs/reference/style-transfer.md
index 48623580c..08bb273c0 100644
--- a/docs/reference/style-transfer.md
+++ b/docs/reference/style-transfer.md
@@ -10,7 +10,7 @@
Style Transfer is a machine learning technique that allows to transfer the style of one image into another one. This is a two step process, first you need to train a model on one particular style and then you can apply this style to another image.
-You can train your own images following [this tutorial](/docs/training-styletransfer).
+You can train your own style transfer model by following [this tutorial](https://github.com/ml5js/training-styletransfer).
This implementation is heavily based on [fast-style-transfer-deeplearnjs](https://github.com/reiinakano/fast-style-transfer-deeplearnjs) by [Reiichiro Nakano](https://github.com/reiinakano).
The [original TensorFlow implementation](https://github.com/lengstrom/fast-style-transfer) was developed by [Logan Engstrom](https://github.com/lengstrom)
@@ -90,8 +90,8 @@ styletransfer.transfer(input, ?callback)
* [StyleTransfer_Video](https://github.com/ml5js/ml5-examples/tree/development/p5js/StyleTransfer/StyleTransfer_Video)
**p5 web editor**
-* [StyleTransfer_Image](https://editor.p5js.org/ml5/sketches/BgZzKWNk9)
-* [StyleTransfer_Video](https://editor.p5js.org/ml5/sketches/J5NL3u4LJ)
+* [StyleTransfer_Image](https://editor.p5js.org/ml5/sketches/StyleTransfer_Image)
+* [StyleTransfer_Video](https://editor.p5js.org/ml5/sketches/StyleTransfer_Video)
**plain javascript**
* [StyleTransfer_Image](https://github.com/ml5js/ml5-examples/tree/development/javascript/StyleTransfer/StyleTransfer_Image)
diff --git a/docs/reference/unet.md b/docs/reference/unet.md
index 3e7825139..40ecf86dd 100644
--- a/docs/reference/unet.md
+++ b/docs/reference/unet.md
@@ -92,7 +92,7 @@ unet.segment(?video, ?callback);
* [UNET_webcam](https://github.com/ml5js/ml5-examples/tree/development/p5js/UNET/UNET_webcam)
**p5 web editor**
-* [UNET_webcam]()
+* [UNET_webcam](https://editor.p5js.org/ml5/sketches/UNET_webcam)
**plain javascript**
* [UNET_webcam](https://github.com/ml5js/ml5-examples/tree/development/javascript/UNET/UNET_webcam)
diff --git a/docs/reference/word2vec.md b/docs/reference/word2vec.md
index 01214df94..cc8816a8d 100644
--- a/docs/reference/word2vec.md
+++ b/docs/reference/word2vec.md
@@ -183,7 +183,7 @@ word2vec.getRandomWord(?callback)
* [Word2Vec_Interactive](https://github.com/ml5js/ml5-examples/tree/development/p5js/Word2Vec/Word2Vec_Interactive)
**p5 web editor**
-* [Word2Vec_Interactive]()
+* [Word2Vec_Interactive](https://editor.p5js.org/ml5/sketches/Word2Vec_Interactive)
**plain javascript**
* [Word2Vec_Interactive](https://github.com/ml5js/ml5-examples/tree/development/javascript/Word2Vec/Word2Vec_Interactive)
diff --git a/docs/reference/yolo.md b/docs/reference/yolo.md
index 5eb8f507e..2962ab35e 100644
--- a/docs/reference/yolo.md
+++ b/docs/reference/yolo.md
@@ -106,8 +106,8 @@ yolo.detect(?callback)
* [YOLO_webcam](https://github.com/ml5js/ml5-examples/tree/development/p5js/YOLO/YOLO_webcam)
**p5 web editor**
-* [YOLO_single_image](https://editor.p5js.org/ml5/sketches/3cfHZs_0HcL)
-* [YOLO_webcam](https://editor.p5js.org/ml5/sketches/IE_P4q2m0LV)
+* [YOLO_single_image](https://editor.p5js.org/ml5/sketches/YOLO_single_image)
+* [YOLO_webcam](https://editor.p5js.org/ml5/sketches/YOLO_webcam)
**plain javascript**
* [YOLO_single_image](https://github.com/ml5js/ml5-examples/tree/development/javascript/YOLO/YOLO_single_image)
diff --git a/docs/tutorials/hello-ml5.md b/docs/tutorials/hello-ml5.md
index 45d44f311..ab58d6aeb 100644
--- a/docs/tutorials/hello-ml5.md
+++ b/docs/tutorials/hello-ml5.md
@@ -72,7 +72,7 @@ Here you can see that we read in the javascript libraries. This includes our ml5
-
+
diff --git a/package-lock.json b/package-lock.json
index e6b6d470c..04628e48a 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,6 +1,6 @@
{
"name": "ml5",
- "version": "0.4.0",
+ "version": "0.3.1",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
diff --git a/package.json b/package.json
index 4cf791a3f..753837126 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "ml5",
- "version": "0.4.0",
+ "version": "0.4.1",
"description": "A friendly machine learning library for the web.",
"main": "dist/ml5.min.js",
"directories": {
@@ -16,17 +16,9 @@
"test": "./node_modules/karma/bin/karma start karma.conf.js ",
"test:single": "./node_modules/karma/bin/karma start karma.conf.js --single-run",
"test-travis": "./scripts/test-travis.sh",
- "dev:docs": "docsify serve docs",
- "update:packageVersion": "node ./scripts/updatePackageVersion.js",
- "checkout:latest": "git checkout -b v$npm_package_version",
- "release:prep": "npm-run-all --sequential update:packageVersion checkout:latest",
+ "serve:docs": "docsify serve docs",
"update:readme": "node ./scripts/updateReadme.js",
- "release:build": "npm install && npm run build",
- "release:commitAndPush": "git add . && git commit -m 'bumps version and adds latest build' && git push origin v$npm_package_version",
- "release:tag": "git tag v$npm_package_version && git push --tags",
- "development:sync": "git checkout development && git fetch && git pull",
- "release:sync": "git checkout release && git fetch && git pull",
- "publish:npm": "npm publish"
+ "publish:npm": "npm run build && npm publish"
},
"repository": {
"type": "git",
diff --git a/src/BodyPix/index_test.js b/src/BodyPix/index_test.js
index b1c767a31..bc4f6e29f 100644
--- a/src/BodyPix/index_test.js
+++ b/src/BodyPix/index_test.js
@@ -47,7 +47,7 @@ describe('bodyPix', () => {
return img;
}
- beforeEach(async () => {
+ beforeAll(async () => {
jasmine.DEFAULT_TIMEOUT_INTERVAL = 5000;
bp = await bodyPix();
});
diff --git a/src/CharRNN/index_test.js b/src/CharRNN/index_test.js
index 573c64e2c..5f9dde22e 100644
--- a/src/CharRNN/index_test.js
+++ b/src/CharRNN/index_test.js
@@ -33,20 +33,20 @@ describe('charRnn', () => {
rnn = await charRNN(RNN_MODEL_URL, undefined);
});
- it('instantiates an rnn with all the defaults', async () => {
- expect(rnn.ready).toBeTruthy();
- expect(rnn.defaults.seed).toBe(RNN_DEFAULTS.seed);
- expect(rnn.defaults.length).toBe(RNN_DEFAULTS.length);
- expect(rnn.defaults.temperature).toBe(RNN_DEFAULTS.temperature);
- expect(rnn.defaults.stateful).toBe(RNN_DEFAULTS.stateful);
- });
-
// it('loads the model with all the defaults', async () => {
// expect(rnn.cellsAmount).toBe(RNN_MODEL_DEFAULTS.cellsAmount);
// expect(rnn.vocabSize).toBe(RNN_MODEL_DEFAULTS.vocabSize);
// });
describe('generate', () => {
+ it('instantiates an rnn with all the defaults', async () => {
+ expect(rnn.ready).toBeTruthy();
+ expect(rnn.defaults.seed).toBe(RNN_DEFAULTS.seed);
+ expect(rnn.defaults.length).toBe(RNN_DEFAULTS.length);
+ expect(rnn.defaults.temperature).toBe(RNN_DEFAULTS.temperature);
+ expect(rnn.defaults.stateful).toBe(RNN_DEFAULTS.stateful);
+ });
+
it('Should generate content that follows default options if given an empty object', async() => {
const result = await rnn.generate({});
expect(result.sample.length).toBe(20);
diff --git a/src/FaceApi/index.js b/src/FaceApi/index.js
index 4294eb50c..02c1aa978 100644
--- a/src/FaceApi/index.js
+++ b/src/FaceApi/index.js
@@ -78,13 +78,15 @@ class FaceApiBase {
} = this.config.MODEL_URLS;
this.model = faceapi;
-
- const SsdMobilenetv1Options = this.model.SsdMobilenetv1Options({ minConfidence: this.minConfidence })
+
+ const SsdMobilenetv1Options = this.model.SsdMobilenetv1Options({
+ minConfidence: this.minConfidence
+ })
await this.model.loadSsdMobilenetv1Model(Mobilenetv1Model, SsdMobilenetv1Options)
await this.model.loadFaceLandmarkModel(FaceLandmarkModel)
// await this.model.loadFaceLandmarkTinyModel(FaceLandmark68TinyNet)
await this.model.loadFaceRecognitionModel(FaceRecognitionModel)
-
+
this.modelReady = true;
return this;
}
@@ -101,18 +103,20 @@ class FaceApiBase {
let callback;
let faceApiOptions = this.config;
- // Handle the image to predict
- if (typeof optionsOrCallback === 'function') {
+ // Handle the image to predict
+ if (typeof optionsOrCallback === 'function') {
imgToClassify = this.video;
callback = optionsOrCallback;
// clean the following conditional statement up!
- } else if (optionsOrCallback instanceof HTMLImageElement
- || optionsOrCallback instanceof HTMLCanvasElement
- || optionsOrCallback instanceof ImageData) {
- imgToClassify = optionsOrCallback;
- } else if (typeof optionsOrCallback === 'object' && (optionsOrCallback.elt instanceof HTMLImageElement
- || optionsOrCallback.elt instanceof HTMLCanvasElement
- || optionsOrCallback.elt instanceof ImageData)){
+ } else if (optionsOrCallback instanceof HTMLImageElement ||
+ optionsOrCallback instanceof HTMLCanvasElement ||
+ optionsOrCallback instanceof HTMLVideoElement ||
+ optionsOrCallback instanceof ImageData) {
+ imgToClassify = optionsOrCallback;
+ } else if (typeof optionsOrCallback === 'object' && (optionsOrCallback.elt instanceof HTMLImageElement ||
+ optionsOrCallback.elt instanceof HTMLCanvasElement ||
+ optionsOrCallback.elt instanceof HTMLVideoElement ||
+ optionsOrCallback.elt instanceof ImageData)) {
imgToClassify = optionsOrCallback.elt; // Handle p5.js image
} else if (typeof optionsOrCallback === 'object' && optionsOrCallback.canvas instanceof HTMLCanvasElement) {
imgToClassify = optionsOrCallback.canvas; // Handle p5.js image
@@ -199,13 +203,15 @@ class FaceApiBase {
imgToClassify = this.video;
callback = optionsOrCallback;
// clean the following conditional statement up!
- } else if (optionsOrCallback instanceof HTMLImageElement
- || optionsOrCallback instanceof HTMLCanvasElement
- || optionsOrCallback instanceof ImageData) {
- imgToClassify = optionsOrCallback;
- } else if (typeof optionsOrCallback === 'object' && (optionsOrCallback.elt instanceof HTMLImageElement
- || optionsOrCallback.elt instanceof HTMLCanvasElement
- || optionsOrCallback.elt instanceof ImageData)){
+ } else if (optionsOrCallback instanceof HTMLImageElement ||
+ optionsOrCallback instanceof HTMLCanvasElement ||
+ optionsOrCallback instanceof HTMLVideoElement ||
+ optionsOrCallback instanceof ImageData) {
+ imgToClassify = optionsOrCallback;
+ } else if (typeof optionsOrCallback === 'object' && (optionsOrCallback.elt instanceof HTMLImageElement ||
+ optionsOrCallback.elt instanceof HTMLCanvasElement ||
+ optionsOrCallback.elt instanceof HTMLVideoElement ||
+ optionsOrCallback.elt instanceof ImageData)) {
imgToClassify = optionsOrCallback.elt; // Handle p5.js image
} else if (typeof optionsOrCallback === 'object' && optionsOrCallback.canvas instanceof HTMLCanvasElement) {
imgToClassify = optionsOrCallback.canvas; // Handle p5.js image
diff --git a/src/FaceApi/index_test.js b/src/FaceApi/index_test.js
index 35b965733..5dfa6fe25 100644
--- a/src/FaceApi/index_test.js
+++ b/src/FaceApi/index_test.js
@@ -41,17 +41,18 @@ describe('faceApi', () => {
// return canvas;
// }
- beforeEach(async () => {
+ beforeAll(async () => {
jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
faceapi = await faceApi();
});
-
- it('Should create faceApi with all the defaults', async () => {
- expect(faceapi.config.withLandmarks).toBe(FACEAPI_DEFAULTS.withLandmarks);
- expect(faceapi.config.withDescriptors).toBe(FACEAPI_DEFAULTS.withDescriptors);
- });
describe('landmarks', () => {
+
+ it('Should create faceApi with all the defaults', async () => {
+ expect(faceapi.config.withLandmarks).toBe(FACEAPI_DEFAULTS.withLandmarks);
+ expect(faceapi.config.withDescriptors).toBe(FACEAPI_DEFAULTS.withDescriptors);
+ });
+
it('Should get landmarks for Frida', async () => {
const img = await getImage();
await faceapi.detectSingle(img)
diff --git a/src/ImageClassifier/index.js b/src/ImageClassifier/index.js
index 1359ca68b..8cf509b74 100644
--- a/src/ImageClassifier/index.js
+++ b/src/ImageClassifier/index.js
@@ -76,46 +76,46 @@ class ImageClassifier {
*/
async loadModel(modelUrl) {
if (modelUrl) this.model = await this.loadModelFrom(modelUrl);
- else this.model = await this.modelToUse.load({version: this.version, alpha: this.alpha});
+ else this.model = await this.modelToUse.load({ version: this.version, alpha: this.alpha });
return this;
}
async loadModelFrom(path = null) {
fetch(path)
- .then(r => r.json())
- .then((r) => {
- if (r.ml5Specs) {
- this.mapStringToIndex = r.ml5Specs.mapStringToIndex;
- }
- })
- // When loading model generated by Teachable Machine 2.0, the r.ml5Specs is missing,
- // which is causing imageClassifier failing to display lables.
- // In this case, labels are stored in path/./metadata.json
- // Therefore, I'm fetching the metadata and feeding the labels into this.mapStringToIndex
- // by Yang Yang, yy2473@nyu.edu, Oct 2, 2019
- .then(()=>{
- if (this.mapStringToIndex.length === 0) {
- const split = path.split("/");
- const prefix = split.slice(0, split.length - 1).join("/");
- const metadataUrl = `${prefix}/metadata.json`;
- fetch(metadataUrl)
- .then((res) => {
- if (!res.ok) {
- console.log("Tried to fetch metadata.json, but it seems to be missing.");
- throw Error(res.statusText);
- }
- return res;
- })
- .then(metadataJson => metadataJson.json())
- .then((metadataJson)=> {
- if (metadataJson.labels){
- this.mapStringToIndex = metadataJson.labels;
- }
- })
- .catch(() => console.log("Error when loading metadata.json"));
- }
- });
+ .then(r => r.json())
+ .then((r) => {
+ if (r.ml5Specs) {
+ this.mapStringToIndex = r.ml5Specs.mapStringToIndex;
+ }
+ })
+ // When loading model generated by Teachable Machine 2.0, the r.ml5Specs is missing,
+ // which is causing imageClassifier failing to display lables.
+ // In this case, labels are stored in path/./metadata.json
+ // Therefore, I'm fetching the metadata and feeding the labels into this.mapStringToIndex
+ // by Yang Yang, yy2473@nyu.edu, Oct 2, 2019
+ .then(() => {
+ if (this.mapStringToIndex.length === 0) {
+ const split = path.split("/");
+ const prefix = split.slice(0, split.length - 1).join("/");
+ const metadataUrl = `${prefix}/metadata.json`;
+ fetch(metadataUrl)
+ .then((res) => {
+ if (!res.ok) {
+ console.log("Tried to fetch metadata.json, but it seems to be missing.");
+ throw Error(res.statusText);
+ }
+ return res;
+ })
+ .then(metadataJson => metadataJson.json())
+ .then((metadataJson) => {
+ if (metadataJson.labels) {
+ this.mapStringToIndex = metadataJson.labels;
+ }
+ })
+ .catch(() => console.log("Error when loading metadata.json"));
+ }
+ });
// end of the Oct 2, 2019 fix
this.model = await tf.loadLayersModel(path);
return this.model;
@@ -206,7 +206,7 @@ class ImageClassifier {
|| inputNumOrCallback.elt instanceof HTMLCanvasElement
|| inputNumOrCallback.elt instanceof ImageData)
) {
- imgToPredict = inputNumOrCallback.elt; // Handle p5.js image
+ imgToPredict = inputNumOrCallback.elt; // Handle p5.js image
} else if (typeof inputNumOrCallback === 'object' && inputNumOrCallback.canvas instanceof HTMLCanvasElement) {
imgToPredict = inputNumOrCallback.canvas; // Handle p5.js image
} else if (!(this.video instanceof HTMLVideoElement)) {
@@ -242,15 +242,15 @@ class ImageClassifier {
}
const imageClassifier = (modelName, videoOrOptionsOrCallback, optionsOrCallback, cb) => {
- let model;
let video;
let options = {};
let callback = cb;
- if (typeof modelName === 'string') {
- model = modelName.toLowerCase();
- } else {
+ let model = modelName;
+ if (typeof model !== 'string') {
throw new Error('Please specify a model to use. E.g: "MobileNet"');
+ } else if (model.indexOf('http') === -1) {
+ model = modelName.toLowerCase();
}
if (videoOrOptionsOrCallback instanceof HTMLVideoElement) {
diff --git a/src/ImageClassifier/index_test.js b/src/ImageClassifier/index_test.js
index 31b0b4bdb..93984cdd2 100644
--- a/src/ImageClassifier/index_test.js
+++ b/src/ImageClassifier/index_test.js
@@ -3,7 +3,11 @@
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
-const { imageClassifier } = ml5;
+const {
+ imageClassifier
+} = ml5;
+
+const TM_URL = 'https://storage.googleapis.com/tm-models/WfgKPytY/model.json';
const DEFAULTS = {
learningRate: 0.0001,
@@ -16,67 +20,108 @@ const DEFAULTS = {
version: 2,
};
+async function getImage() {
+ const img = new Image();
+ img.crossOrigin = true;
+ img.src = 'https://cdn.jsdelivr.net/gh/ml5js/ml5-library@development/assets/bird.jpg';
+ await new Promise((resolve) => {
+ img.onload = resolve;
+ });
+ return img;
+}
+
+async function getCanvas() {
+ const img = await getImage();
+ const canvas = document.createElement('canvas');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ canvas.getContext('2d').drawImage(img, 0, 0);
+ return canvas;
+}
+
describe('imageClassifier', () => {
let classifier;
- async function getImage() {
- const img = new Image();
- img.crossOrigin = true;
- img.src = 'https://cdn.jsdelivr.net/gh/ml5js/ml5-library@development/assets/bird.jpg';
- await new Promise((resolve) => { img.onload = resolve; });
- return img;
- }
-
- async function getCanvas() {
- const img = await getImage();
- const canvas = document.createElement('canvas');
- canvas.width = img.width;
- canvas.height = img.height;
- canvas.getContext('2d').drawImage(img, 0, 0);
- return canvas;
- }
-
- beforeEach(async () => {
- jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
- classifier = await imageClassifier('MobileNet', undefined, {});
- });
+ /**
+ * Test imageClassifier with teachable machine
+ */
+ // Teachable machine model
+ describe('with Teachable Machine model', () => {
+
+ beforeAll(async () => {
+ jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
+ classifier = await imageClassifier(TM_URL, undefined, {});
+ });
+
+ describe('instantiate', () => {
+ it('Should create a classifier with all the defaults', async () => {
+ expect(classifier.modelUrl).toBe(TM_URL);
+ });
+ });
- it('Should create a classifier with all the defaults', async () => {
- expect(classifier.version).toBe(DEFAULTS.version);
- expect(classifier.alpha).toBe(DEFAULTS.alpha);
- expect(classifier.topk).toBe(DEFAULTS.topk);
- expect(classifier.ready).toBeTruthy();
});
- describe('classify', () => {
- it('Should classify an image of a Robin', async () => {
- const img = await getImage();
- await classifier.classify(img)
- .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
- });
- it('Should support p5 elements with an image on .elt', async () => {
- const img = await getImage();
- await classifier.classify({ elt: img })
- .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
- });
- it('Should support HTMLCanvasElement', async () => {
- const canvas = await getCanvas();
- await classifier.classify(canvas)
- .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
- });
+ /**
+ * Test imageClassifier with Mobilenet
+ */
+ describe('imageClassifier with Mobilenet', () => {
- it('Should support p5 elements with canvas on .canvas', async () => {
- const canvas = await getCanvas();
- await classifier.classify({ canvas })
- .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ beforeAll(async () => {
+ jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
+ classifier = await imageClassifier('MobileNet', undefined, {});
});
- it('Should support p5 elements with canvas on .elt', async () => {
- const canvas = await getCanvas();
- await classifier.classify({ elt: canvas })
- .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ describe('instantiate', () => {
+
+ it('Should create a classifier with all the defaults', async () => {
+ expect(classifier.version).toBe(DEFAULTS.version);
+ expect(classifier.alpha).toBe(DEFAULTS.alpha);
+ expect(classifier.topk).toBe(DEFAULTS.topk);
+ expect(classifier.ready).toBeTruthy();
+ });
+ })
+
+ describe('classify', () => {
+
+ it('Should classify an image of a Robin', async () => {
+ const img = await getImage();
+ await classifier.classify(img)
+ .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ });
+
+ it('Should support p5 elements with an image on .elt', async () => {
+ const img = await getImage();
+ await classifier.classify({
+ elt: img
+ })
+ .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ });
+
+ it('Should support HTMLCanvasElement', async () => {
+ const canvas = await getCanvas();
+ await classifier.classify(canvas)
+ .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ });
+
+ it('Should support p5 elements with canvas on .canvas', async () => {
+ const canvas = await getCanvas();
+ await classifier.classify({
+ canvas
+ })
+ .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ });
+
+ it('Should support p5 elements with canvas on .elt', async () => {
+ const canvas = await getCanvas();
+ await classifier.classify({
+ elt: canvas
+ })
+ .then(results => expect(results[0].label).toBe('robin, American robin, Turdus migratorius'));
+ });
});
+
});
-});
+
+})
\ No newline at end of file
diff --git a/src/KMeans/index_test.js b/src/KMeans/index_test.js
index c90cb960c..a14bdecac 100644
--- a/src/KMeans/index_test.js
+++ b/src/KMeans/index_test.js
@@ -16,7 +16,8 @@ const KMEANS_DEFAULTS = {
describe('kMeans', () => {
let kmeansModel;
const dataurl = 'https://raw.githubusercontent.com/ml5js/ml5-examples/development/d3/KMeans/KMeans_GaussianClusterDemo/data/gaussian2d_2clusters.csv'
- beforeEach(async () => {
+
+ beforeAll(async () => {
jasmine.DEFAULT_TIMEOUT_INTERVAL = 10000;
kmeansModel = await kmeans(dataurl, KMEANS_DEFAULTS, (err, result) => {
return;
@@ -43,11 +44,4 @@ describe('kMeans', () => {
expect(unique).toBe(2);
});
-
-
-
-
-
-
-
});
\ No newline at end of file
diff --git a/src/NeuralNetwork/NeuralNetworkData.js b/src/NeuralNetwork/NeuralNetworkData.js
index 2b58b4bfc..ad137dcc2 100644
--- a/src/NeuralNetwork/NeuralNetworkData.js
+++ b/src/NeuralNetwork/NeuralNetworkData.js
@@ -135,6 +135,10 @@ class NeuralNetworkData {
if (outputLabels.includes(prop)) {
output.ys[prop] = item[prop]
+ // convert ys into strings, if the task is classification
+ if(this.config.architecture.task === "classification" && typeof output.ys[prop] !== "string"){
+ output.ys[prop] += "";
+ }
}
})
diff --git a/src/NeuralNetwork/index_test.js b/src/NeuralNetwork/index_test.js
index fdb079270..ca09f47ce 100644
--- a/src/NeuralNetwork/index_test.js
+++ b/src/NeuralNetwork/index_test.js
@@ -9,27 +9,27 @@ const {
} = ml5;
const NN_DEFAULTS = {
- task: 'regression',
- activationHidden: 'sigmoid',
- activationOutput: 'sigmoid',
- debug: false,
- learningRate: 0.25,
- inputs: 2,
- outputs: 1,
- noVal: null,
- hiddenUnits: 16,
- modelMetrics: ['accuracy'],
- modelLoss: 'meanSquaredError',
- modelOptimizer: null,
- batchSize: 64,
- epochs: 32,
+ task: 'regression',
+ activationHidden: 'sigmoid',
+ activationOutput: 'sigmoid',
+ debug: false,
+ learningRate: 0.25,
+ inputs: 2,
+ outputs: 1,
+ noVal: null,
+ hiddenUnits: 16,
+ modelMetrics: ['accuracy'],
+ modelLoss: 'meanSquaredError',
+ modelOptimizer: null,
+ batchSize: 64,
+ epochs: 32,
}
describe('neuralNetwork', () => {
let nn;
- beforeEach(async () => {
+ beforeAll(async () => {
jasmine.DEFAULT_TIMEOUT_INTERVAL = 15000;
nn = await neuralNetwork();
});
@@ -50,7 +50,7 @@ describe('neuralNetwork', () => {
// expect(nn.config.training.modelMetrics).toBe(NN_DEFAULTS.modelMetrics);
expect(nn.config.training.modelLoss).toBe(NN_DEFAULTS.modelLoss);
// expect(nn.config.training.modelOptimizer).toBe();
-
+
// data defaults
// expect(nn.config.dataOptions.dataUrl).toBe();
// expect(nn.config.dataOptions.inputs).toBe(NN_DEFAULTS.inputs);
@@ -60,4 +60,4 @@ describe('neuralNetwork', () => {
});
-});
+});
\ No newline at end of file
diff --git a/src/Sentiment/index.js b/src/Sentiment/index.js
index a16f030b2..05bfc501e 100644
--- a/src/Sentiment/index.js
+++ b/src/Sentiment/index.js
@@ -42,7 +42,6 @@ class Sentiment {
* @param {function} callback - Optional. A callback function that is called once the model has loaded. If no callback is provided, it will return a promise that will be resolved once the model has loaded.
*/
constructor(modelName, callback) {
- console.log('constructor');
/**
* Boolean value that specifies if the model has loaded.
* @type {boolean}
diff --git a/src/Sentiment/index_test.js b/src/Sentiment/index_test.js
new file mode 100644
index 000000000..aac09367e
--- /dev/null
+++ b/src/Sentiment/index_test.js
@@ -0,0 +1,20 @@
+const { sentiment } = ml5;
+
+describe('Sentiment', ()=>{
+ let model;
+
+ beforeAll(async () => {
+ jasmine.DEFAULT_TIMEOUT_INTERVAL = 10000;
+ model = await sentiment('moviereviews').ready;
+ });
+
+ it("Model should be ready",()=> expect(model.ready).toBeTruthy());
+
+ it("Happy has a sentiment score greater than 0.5", ()=>{
+ expect(model.predict('Happy').score).toBeGreaterThan(0.5);
+ });
+
+ it("Terrible has a sentiment score less than 0.5", ()=>{
+ expect(model.predict('Terrible').score).toBeLessThan(0.5);
+ });
+});
diff --git a/src/SoundClassifier/index.js b/src/SoundClassifier/index.js
index e9a0a5334..b42449f34 100644
--- a/src/SoundClassifier/index.js
+++ b/src/SoundClassifier/index.js
@@ -59,7 +59,7 @@ class SoundClassifier {
/**
* Classifies the audio from microphone and takes a callback to handle the results
- * @param {function | number} numOrCallback -
+ * @param {function | number} numOrCallback -
* takes any of the following params
* @param {function} cb - a callback function that handles the results of the function.
* @return {function} a promise or the results of a given callback, cb.
@@ -82,14 +82,14 @@ class SoundClassifier {
}
const soundClassifier = (modelName, optionsOrCallback, cb) => {
- let model;
let options = {};
let callback = cb;
- if (typeof modelName === 'string') {
- model = modelName.toLowerCase();
- } else {
+ let model = modelName;
+ if (typeof model !== 'string') {
throw new Error('Please specify a model to use. E.g: "SpeechCommands18w"');
+ } else if (model.indexOf('http') === -1) {
+ model = modelName.toLowerCase();
}
if (typeof optionsOrCallback === 'object') {
diff --git a/src/utils/imageUtilities.js b/src/utils/imageUtilities.js
index 6122c6189..42d331ad6 100644
--- a/src/utils/imageUtilities.js
+++ b/src/utils/imageUtilities.js
@@ -4,6 +4,7 @@
// https://opensource.org/licenses/MIT
import * as tf from '@tensorflow/tfjs';
+import p5Utils from './p5Utils';
// Resize video elements
const processVideo = (input, size, callback = () => {}) => {
@@ -61,6 +62,64 @@ const cropImage = (img) => {
return img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
};
+const flipImage = (img) => {
+ // image image, bitmap, or canvas
+ let imgWidth;
+ let imgHeight;
+ let inputImg;
+
+ if (img instanceof HTMLImageElement ||
+ img instanceof HTMLCanvasElement ||
+ img instanceof HTMLVideoElement ||
+ img instanceof ImageData) {
+ inputImg = img;
+ } else if (typeof img === 'object' &&
+ (img.elt instanceof HTMLImageElement ||
+ img.elt instanceof HTMLCanvasElement ||
+ img.elt instanceof HTMLVideoElement ||
+ img.elt instanceof ImageData)) {
+
+ inputImg = img.elt; // Handle p5.js image
+ } else if (typeof img === 'object' &&
+ img.canvas instanceof HTMLCanvasElement) {
+ inputImg = img.canvas; // Handle p5.js image
+ } else {
+ inputImg = img;
+ }
+
+ if (inputImg instanceof HTMLVideoElement) {
+ // should be videoWidth, videoHeight?
+ imgWidth = inputImg.width;
+ imgHeight = inputImg.height;
+ } else {
+ imgWidth = inputImg.width;
+ imgHeight = inputImg.height;
+ }
+
+
+ if (p5Utils.checkP5()) {
+ const p5Canvas = p5Utils.p5Instance.createGraphics(imgWidth, imgHeight);
+ p5Canvas.push()
+ p5Canvas.translate(imgWidth, 0);
+ p5Canvas.scale(-1, 1);
+ p5Canvas.image(img, 0, 0, imgWidth, imgHeight);
+ p5Canvas.pop()
+
+ return p5Canvas;
+ }
+ const canvas = document.createElement('canvas');
+ canvas.width = imgWidth;
+ canvas.height = imgHeight;
+
+ const ctx = canvas.getContext('2d');
+ ctx.drawImage(inputImg, 0, 0, imgWidth, imgHeight);
+ ctx.translate(imgWidth, 0);
+ ctx.scale(-1, 1);
+ ctx.drawImage(canvas, imgWidth * -1, 0, imgWidth, imgHeight);
+ return canvas;
+
+}
+
// Static Method: image to tf tensor
function imgToTensor(input, size = null) {
return tf.tidy(() => {
@@ -79,4 +138,5 @@ export {
processVideo,
cropImage,
imgToTensor,
-};
+ flipImage
+};
\ No newline at end of file