diff --git a/CHANGELOG b/CHANGELOG index efa2252..2a387a3 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,14 @@ +-- Note -- +As of the move to github on Oct 2013, we will not mention +minor changes in this changelog since minor changes +can be viewed automatically using github's versioning tools +---------- + +Oct 29 2013, v1.3 release 2 + Renaming solver_OrderedLASSO.m to solver_SLOPE.m + Renaming prox_Ol1.m to prox_Sl1.m + Renaming examples/smallscale/test_OrderedLASSO.m to ...test_SLOPE.m + Oct 10 2013, v1.3 License changed! See LICENSE for details New: diff --git a/Contents.m b/Contents.m index 8b0a581..443d4aa 100644 --- a/Contents.m +++ b/Contents.m @@ -57,7 +57,7 @@ % prox_hinge - Hinge-loss function. % prox_hingeDual - Dual function of the Hinge-loss function. % prox_l1 - L1 norm. -% prox_Ol1 - Ordered L1 norm. +% prox_Sl1 - Sorted (aka ordered) L1 norm. % prox_l1l2 - L1-L2 block norm: sum of L2 norms of rows. % prox_l1linf - L1-LInf block norm: sum of L2 norms of rows. % prox_l1pos - L1 norm, restricted to x >= 0 @@ -87,7 +87,7 @@ % Premade solvers for specific problems (vector variables) % solver_L1RLS - l1-regularized least squares problem, sometimes called the LASSO. % solver_LASSO - Minimize residual subject to l1-norm constraints. -% solver_OrderedLASSO - LASSO using ordered l1-norm. +% solver_SLOPE - Sorted L One Penalized Estimation (LASSO using sorted/ordered l1 norm) % solver_sBP - Basis pursuit (l1-norm with equality constraints). Uses smoothing. % solver_sBPDN - Basis pursuit de-noising. BP with relaxed constraints. Uses smoothing. % solver_sBPDN_W - Weighted BPDN problem. Uses smoothing. diff --git a/examples/smallscale/reference_solutions/ordered_asso_problem1_noisy.mat b/examples/smallscale/reference_solutions/ordered_lasso_problem1_noisy.mat similarity index 100% rename from examples/smallscale/reference_solutions/ordered_asso_problem1_noisy.mat rename to examples/smallscale/reference_solutions/ordered_lasso_problem1_noisy.mat diff --git a/examples/smallscale/test_OrderedLASSO.m b/examples/smallscale/test_SLOPE.m similarity index 90% rename from examples/smallscale/test_OrderedLASSO.m rename to examples/smallscale/test_SLOPE.m index f87eaa3..2b12b8d 100644 --- a/examples/smallscale/test_OrderedLASSO.m +++ b/examples/smallscale/test_SLOPE.m @@ -1,5 +1,6 @@ %{ - Tests the Ordered LASSO ( aka Ordered L1 regularized Least Squares) problem + Tests the Sorted/Ordered LASSO ( aka Ordered L1 regularized Least Squares) problem + Also known as SLOPE for Sorted L-One Penalized Estimation min_x sum(lambda*sort(abs(x),'descend')) + .5||A(x)-b||_2^2 @@ -23,7 +24,7 @@ min_x sum(lambda*sort(abs(x),'descend')) + .5||A(x)-b||_2^2 %% % Try to load the problem from disk fileName = fullfile(tfocs_where,... - 'examples','smallscale','reference_solutions','ordered_asso_problem1_noisy'); + 'examples','smallscale','reference_solutions','ordered_lasso_problem1_noisy'); randn('state',34324); rand('state',34324); N = 1024; @@ -57,7 +58,7 @@ min_x sum(lambda*sort(abs(x),'descend')) + .5||A(x)-b||_2^2 % We cannot get the solution via CVX easily, so solve using our method opts = struct('restart',-Inf,'tol',1e-13,'maxits',1000, 'printEvery',10); - [ x, out, optsOut ] = solver_OrderedLASSO( A, b, lambda, x0, opts ); + [ x, out, optsOut ] = solver_SLOPE( A, b, lambda, x0, opts ); x_ref = x; obj_ref = norm(A*x-b)^2/2 + sum(lambda(:).*sort(abs(x),'descend')); @@ -85,7 +86,7 @@ min_x sum(lambda*sort(abs(x),'descend')) + .5||A(x)-b||_2^2 opts.errFcn = { @(f,primal) er(primal), ... @(f,primal) f - obj_ref }; tic; -[ x, out, optsOut ] = solver_OrderedLASSO( A, b, lambda, x0, opts ); +[ x, out, optsOut ] = solver_SLOPE( A, b, lambda, x0, opts ); time_TFOCS = toc; fprintf('Solution has %d nonzeros. Error vs. reference solution is %.2e\n',... diff --git a/prox_Ol1.m b/prox_Sl1.m similarity index 91% rename from prox_Ol1.m rename to prox_Sl1.m index 738d983..ab4420e 100644 --- a/prox_Ol1.m +++ b/prox_Sl1.m @@ -1,5 +1,5 @@ -function op = prox_Ol1( lambda ) -%PROX_OL1 Ordered L1 norm. +function op = prox_Sl1( lambda ) +%PROX_SL1 Sorted/Ordered L1 norm. % OP = PROX_L1( lambda ) implements the nonsmooth function % OP(X) = sum(lambda.*sort(abs(X),'descend')) % where lambda is strictly positive and sorted in decreasing order, @@ -28,7 +28,7 @@ error( 'Argument lambda must be sorted in decreasing order.'); end if numel(lambda)==1 - warning('TFOCS:prox_OL1','When lambda is a scalar, we recommend prox_l1.m instead pf prox_OL1.m'); + warning('TFOCS:prox_SL1','When lambda is a scalar, we recommend prox_l1.m instead pf prox_SL1.m'); end @@ -39,7 +39,7 @@ makeMex; % check that it worked if 3 ~= exist('proxAdaptiveL1Mex','file') - disp('Compilation of mex files for prox_OL1.m failed; please report this error'); + disp('Compilation of mex files for prox_SL1.m failed; please report this error'); end end diff --git a/solver_OrderedLASSO.m b/solver_SLOPE.m similarity index 71% rename from solver_OrderedLASSO.m rename to solver_SLOPE.m index e300af0..fe55f5f 100644 --- a/solver_OrderedLASSO.m +++ b/solver_SLOPE.m @@ -1,18 +1,21 @@ -function [ x, odata, opts ] = solver_OrderedLASSO( A, b, lambda, x0, opts ) -% SOLVER_ORDEREDLASSO l1-regularized least squares problem, sometimes called the LASSO, -% [ beta, odata, opts ] = solver_L1RLS( X, y, lambda, beta0, opts ) -% Solves the l1-regularized least squares problem, using the ordered l1 norm, +function [ x, odata, opts ] = solver_SLOPE( A, b, lambda, x0, opts ) +% SOLVER_SLOPE Sorted l1-regularized least squares problem, +% [ beta, odata, opts ] = solver_SLOPE( X, y, lambda, beta0, opts ) +% Solves the l1-regularized least squares problem, using the sorted/ordered l1 norm, % minimize (1/2)*norm( A * x - b )^2 + norm( lasso.*sort(abs(x),'descend'), 1 ) % using the Auslender/Teboulle variant with restart. X must be a matrix % or a linear operator, y must be a vector, and lambda must be a real % positive vector in decreasing order. % The initial point beta0 and option structure opts are both optional. +% +% SLOPE stands for Sorted L-One Penalized Estimation +% % Reference: % "Statistical Estimation and Testing via the Ordered l1 Norm" % by M. Bogdan, E. van den Berg, W. Su, and E. J. Candès, 2013 % http://www-stat.stanford.edu/~candes/OrderedL1/ % -% See also solver_L1RLS.m, solver_LASSO.m, prox_Ol1.m +% See also solver_L1RLS.m, solver_LASSO.m, prox_Sl1.m error(nargchk(3,5,nargin)); if nargin < 4, x0 = []; end @@ -21,7 +24,7 @@ opts.restart = 100; end -[x,odata,opts] = tfocs( smooth_quad, { A, -b }, prox_Ol1( lambda ), x0, opts ); +[x,odata,opts] = tfocs( smooth_quad, { A, -b }, prox_Sl1( lambda ), x0, opts ); % TFOCS v1.3 by Stephen Becker, Emmanuel Candes, and Michael Grant. % Copyright 2013 California Institute of Technology and CVX Research. diff --git a/userguide.tex b/userguide.tex index d557ac6..2390d41 100644 --- a/userguide.tex +++ b/userguide.tex @@ -1729,7 +1729,7 @@ \section{Appendix: list of TFOCS functions} \verb@prox_hinge@ & Hinge-loss function. \\ \verb@prox_hingeDual@ & Dual function of the Hinge-loss function. \\ \verb@prox_l1@ & L1 norm. \\ -\verb@prox_Ol1@ & Ordered L1 norm. \\ +\verb@prox_Sl1@ & Sorted (aka) ordered L1 norm. \\ \verb@prox_l1l2@ & L1-L2 block norm: sum of L2 norms of rows. \\ \verb@prox_l1linf@ & L1-LInf block norm: sum of L2 norms of rows. \\ \verb@prox_l1pos@ & L1 norm, restricted to $x \ge 0$ \\ @@ -1759,7 +1759,7 @@ \section{Appendix: list of TFOCS functions} \multicolumn{2}{l}{\bf Premade solvers for specific problems (vector variables)}\\ \verb@solver_L1RLS@ & l1-regularized least squares problem, sometimes called the LASSO. \\ \verb@solver_LASSO@ & Minimize residual subject to l1-norm constraints. \\ -\verb@solver_OrderedLASSO@ & Like LASSO but with an ordered l1 norm; see documentation. \\ +\verb@solver_SLOPE@ & Sorted L One Penalized Estimation; like LASSO but with an ordered l1 norm; see documentation. \\ \verb@solver_sBP@ & Basis pursuit (l1-norm with equality constraints). Uses smoothing. \\ \verb@solver_sBPDN@ & Basis pursuit de-noising. BP with relaxed constraints. Uses smoothing. \\ \verb@solver_sBPDN_W@ & Weighted BPDN problem. Uses smoothing. \\