-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrf.bib
140 lines (140 loc) · 11.6 KB
/
rf.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
@book{Hastie2009,
author = {Hastie, Trevor and Tibshirani, Robert and Friedman, Jerome},
edition = {2},
publisher = {Springer},
title = {{The Elements of Statistical Learning: Data Mining, Inference and Prediction}},
year = {2009}
}
@book{Breiman1984,
author = {Breiman, Leo and Friedman, Jerome H. and Olshen, Richard A. and Stone, Charles J.},
doi = {10.1201/9781315139470},
isbn = {9781315139470},
month = {oct},
publisher = {Routledge},
title = {{Classification And Regression Trees}},
url = {https://www.taylorfrancis.com/books/9781351460491},
year = {1984}
}
@article{Breiman1996,
annote = {r2018-12-05
This is a fantastic paper. Really striking that such a simple idea can be so powerful. Breiman's write-up of it is very clear and thorough, too.},
author = {Breiman, Leo},
doi = {10.1007/BF00058655},
file = {:C$\backslash$:/Users/carle/Documents/Mendeley/Breiman - 1996 - Bagging predictors.pdf:pdf},
issn = {0885-6125},
journal = {Machine Learning},
month = {aug},
number = {2},
pages = {123--140},
publisher = {Kluwer Academic Publishers},
title = {{Bagging predictors}},
url = {http://link.springer.com/10.1007/BF00058655},
volume = {24},
year = {1996}
}
@inproceedings{TinKamHo1995,
abstract = {Decision trees are attractive classifiers due to their high execution speed. But trees derived with traditional methods often cannot be grown to arbitrary complexity for possible loss of generalization accuracy on unseen data. The limitation on complexity usually means suboptimal accuracy on training data. Following the principles of stochastic modeling, we propose a method to construct tree-based classifiers whose capacity can be arbitrarily expanded for increases in accuracy for both training and unseen data. The essence of the method is to build multiple trees in randomly selected subspaces of the feature space. Trees in, different subspaces generalize their classification in complementary ways, and their combined classification can be monotonically improved. The validity of the method is demonstrated through experiments on the recognition of handwritten digits},
author = {{Tin Kam Ho}},
booktitle = {Proceedings of 3rd International Conference on Document Analysis and Recognition},
doi = {10.1109/icdar.1995.598994},
title = {{Random decision forests}},
year = {1995}
}
@article{Breiman2001,
annote = {r2018-12-13
There is some wonky stuff frontloaded in the paper, but after that it's a very clean and helpful exploration of random forests.},
author = {Breiman, Leo},
doi = {10.1023/A:1010933404324},
file = {:C$\backslash$:/Users/carle/Documents/Mendeley/Breiman - 2001 - Random Forests.pdf:pdf},
issn = {08856125},
journal = {Machine Learning},
number = {1},
pages = {5--32},
publisher = {Kluwer Academic Publishers},
title = {{Random Forests}},
url = {http://link.springer.com/10.1023/A:1010933404324},
volume = {45},
year = {2001}
}
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
@misc{breiman_cutler,
title={Random Forests},
author={Breiman, Leo and Cutler, Adele},
year={2019},
url={https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm},
journal={Random forests - classification description},
publisher={University of California, Berkeley}
}
@techreport{Fernandez-Delgado2014,
abstract = {We evaluate 179 classifiers arising from 17 families (discriminant analysis, Bayesian, neural networks, support vector machines, decision trees, rule-based classifiers, boosting, bagging, stacking, random forests and other ensembles, generalized linear models, nearest-neighbors, partial least squares and principal component regression, logistic and multino-mial regression, multiple adaptive regression splines and other methods), implemented in Weka, R (with and without the caret package), C and Matlab, including all the relevant classifiers available today. We use 121 data sets, which represent the whole UCI data base (excluding the large-scale problems) and other own real problems, in order to achieve significant conclusions about the classifier behavior, not dependent on the data set collection. The classifiers most likely to be the bests are the random forest (RF) versions, the best of which (implemented in R and accessed via caret) achieves 94.1{\%} of the maximum accuracy overcoming 90{\%} in the 84.3{\%} of the data sets. However, the difference is not statistically significant with the second best, the SVM with Gaussian kernel implemented in C using LibSVM, which achieves 92.3{\%} of the maximum accuracy. A few models are clearly better than the remaining ones: random forest, SVM with Gaussian and polynomial kernels, extreme learning machine with Gaussian kernel, C5.0 and avNNet (a committee of multi-layer perceptrons implemented in R with the caret package). The random forest is clearly the best family of classifiers (3 out of 5 bests classifiers are RF), followed by SVM (4 classifiers in the top-10), neural networks and boosting ensembles (5 and 3 members in the top-20, respectively).},
author = {Fern{\'{a}}ndez-Delgado, Manuel and Cernadas, Eva and Barro, Sen{\'{e}}n and Amorim, Dinani and Fern{\'{a}}ndez-Delgado, Amorim},
booktitle = {Journal of Machine Learning Research},
file = {:C$\backslash$:/Users/carle/Documents/Mendeley/Fern{\'{a}}ndez-Delgado et al. - 2014 - Do we Need Hundreds of Classifiers to Solve Real World Classification Problems.pdf:pdf},
keywords = {Bayesian classifiers,UCI data base,classification,decision trees,discriminant analysis,ensembles,generalized linear models,logistic and multinomial regression,multiple adaptive regression splines,nearest-neighbors,neural networks,partial least squares and principal component re-gression,random forest,rule-based classifiers,support vector machine},
pages = {3133--3181},
title = {{Do we Need Hundreds of Classifiers to Solve Real World Classification Problems?}},
url = {http://www.mathworks.es/products/neural-network.},
volume = {15},
year = {2014}
}
@article{opencv_library,
author = {Bradski, G.},
citeulike-article-id = {2236121},
journal = {Dr. Dobb's Journal of Software Tools},
keywords = {bibtex-import},
posted-at = {2008-01-15 19:21:54},
priority = {4},
title = {{The OpenCV Library}},
year = {2000}
}
@techreport{King2009,
abstract = {There are many excellent toolkits which provide support for developing machine learning software in Python, R, Matlab, and similar environments. Dlib-ml is an open source library, targeted at both engineers and research scientists, which aims to provide a similarly rich environment for developing machine learning software in the C++ language. Towards this end, dlib-ml contains an extensible linear algebra toolkit with built in BLAS support. It also houses implementations of algorithms for performing inference in Bayesian networks and kernel-based methods for classification , regression, clustering, anomaly detection, and feature ranking. To enable easy use of these tools, the entire library has been developed with contract programming, which provides complete and precise documentation as well as powerful debugging tools.},
author = {King, Davis E},
booktitle = {Journal of Machine Learning Research},
file = {:C$\backslash$:/Users/carle/Documents/Mendeley/King - 2009 - Dlib-ml A Machine Learning Toolkit.pdf:pdf},
keywords = {Bayesian networks,C++,kernel clustering,kernel-methods,rvm,svm},
pages = {1755--1758},
title = {{Dlib-ml: A Machine Learning Toolkit}},
url = {http://delivery.acm.org/10.1145/1760000/1755843/p1755-king.pdf?ip=24.158.43.23{\&}id=1755843{\&}acc=OPEN{\&}key=4D4702B0C3E38B35.4D4702B0C3E38B35.4D4702B0C3E38B35.6D218144511F3437{\&}{\_}{\_}acm{\_}{\_}=1556308638{\_}e47f542ec2e8f2c14217578b4ae061bf},
volume = {10},
year = {2009}
}
@article{Sagonas2016,
abstract = {Computer Vision has recently witnessed great research advance towards automatic facial points detection. Numerous methodologies have been proposed during the last few years that achieve accurate and efficient performance. However, fair comparison between these methodologies is infeasible mainly due to two issues. (a) Most existing databases, captured under both constrained and unconstrained (in-the-wild) conditions have been annotated using different markups and, in most cases, the accuracy of the annotations is low. (b) Most published works report experimental results using different training/testing sets, different error met-rics and, of course, landmark points with semantically different locations. In this paper, we aim to overcome the aforementioned problems by (a) proposing a semi-automatic annotation technique that was employed to re-annotate most existing facial databases under a unified protocol, and (b) presenting the 300 Faces In-The-Wild Challenge (300-W), the first facial landmark localization challenge that was organized twice, in 2013 and 2015. To the best of our knowledge, this is the first effort towards a unified annotation scheme of massive databases and a fair experimental comparison of existing facial landmark localization systems. The images and annotations of the new testing database that was used in the 300-W challenge are available from http://ibug.doc.ic.ac.uk/resources/300-W{\_}IMAVIS/.},
author = {Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja},
doi = {10.1016/j.imavis.2016.01.002},
file = {:C$\backslash$:/Users/carle/Documents/Mendeley/Sagonas et al. - 2016 - 300 Faces In-The-Wild Challenge database and results.pdf:pdf},
journal = {IMAVIS},
keywords = {Challenge,Facial database,Facial landmark localization,Semi-automatic annotation tool},
pages = {3--18},
title = {{300 Faces In-The-Wild Challenge: database and results}},
url = {http://dx.doi.org/10.1016/j.imavis.2016.01.002},
volume = {47},
year = {2016}
}
@techreport{Sagonas,
abstract = {Automatic facial point detection plays arguably the most important role in face analysis. Several methods have been proposed which reported their results on databases of both constrained and unconstrained conditions. Most of these databases provide annotations with different markups and in some cases the are problems related to the accuracy of the fiducial points. The aforementioned issues as well as the lack of a evaluation protocol makes it difficult to compare performance between different systems. In this paper, we present the 300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge which is held in conjunction with the International Conference on Computer Vision 2013, Sydney, Australia. The main goal of this challenge is to compare the performance of different methods on a new-collected dataset using the same evaluation protocol and the same markup and hence to develop the first standardized benchmark for facial landmark localization.},
author = {Sagonas, Christos and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja},
file = {:C$\backslash$:/Users/carle/Documents/Mendeley/Sagonas et al. - Unknown - 300 Faces in-the-Wild Challenge The first facial landmark localization Challenge.pdf:pdf},
title = {{300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge}},
url = {http://ibug.doc.ic.ac.uk/resources/300-W/},
year = {2013}
}
@misc{Rosebrock2017,
author = {Rosebrock, Adrian},
booktitle = {Pyimagesearch},
title = {{Drowsiness detection with OpenCV}},
url = {https://www.pyimagesearch.com/2017/05/08/drowsiness-detection-opencv/},
urldate = {2019-05-06},
year = {2017}
}