diff --git a/PoleFigureAnalysis/@PoleFigure/calcErrorPF.m b/PoleFigureAnalysis/@PoleFigure/calcErrorPF.m index daa93576d..c969e9270 100644 --- a/PoleFigureAnalysis/@PoleFigure/calcErrorPF.m +++ b/PoleFigureAnalysis/@PoleFigure/calcErrorPF.m @@ -30,7 +30,14 @@ pfcalc = calcPoleFigure(pfcalc,pfmeas.allH,pfmeas.allR,'superposition',pfmeas.c); end -progress(0,pfmeas.numPF); + +if check_option(varargin,'silent') + varg = {'progress','silent'}; +else + varg = {}; +end + +progress(0,pfmeas.numPF,varg{:}); for i = 1:pfmeas.numPF % normalization @@ -54,5 +61,5 @@ %d = abs(d1-d2)./(d1+epsilon*alpha); end pfcalc.allI{i} = d; - progress(i,pfmeas.numPF); + progress(i,pfmeas.numPF,varg{:}); end diff --git a/PoleFigureAnalysis/@PoleFigure/calcODFIterative.m b/PoleFigureAnalysis/@PoleFigure/calcODFIterative.m new file mode 100644 index 000000000..083da87fc --- /dev/null +++ b/PoleFigureAnalysis/@PoleFigure/calcODFIterative.m @@ -0,0 +1,120 @@ +function [odf,alpha] = calcODFIterative(pf,varargin) +% iterative PDF to ODF inversion +% +% *calcODFiterative* solves the PF to ODF inversion problem by iteratively +% adjusting the kernel width, starting with a uniform ODF. +% +% Iteratively adjusting the kernel width allows to model ODFs from highly +% irregularly sampled data, as missing information is modelled by assuming +% volume portions along tori (fibres) justified by a coarser model. This method +% basically provides an elaborated guess for starting values for +% |PoleFigure.calcODF| and may be seen as some form of regularization. +% +% Syntax +% +% odf = calcODFIterative(pf) +% odf = calcODFIterative(pf,'halfwidth',5*degree) +% +% Input +% pf - @PoleFigure +% +% Options +% kernel - the ansatz functions (default = de la Vallee Poussin) +% halfwidth - halfwidth of the ansatz functions (default = resolution) +% resolution - localization grid for the ansatz functions (default = resolution(pf)) +% thinning - corresponds to zero range, will remove nodes with low volume portion (default = 0.025, i.e. 2.5% of uniform) +% +% Flags +% nothinning - omit reduction of nodes +% silent - no output +% +% Output +% odf - reconstructed @SO3Fun +% alpha - scaling factors, calculated during reconstruction +% +% References +% +% Bachmann, F. (2016). +% Texturbestimmung aus Beugungsbildern. In: Optimierung der Goniometrie zur +% Texturbestimmung aus Röntgenbeugungsbildern. Springer Spektrum, Wiesbaden. +% +% See also +% PoleFigure/calcODF PoleFigureRefinement + + +% target resolution +% targetS3G = getClass(varargin,'SO3Grid'); +if pf.allR{1}.isOption('resolution') + targetres = min(cellfun(@(r) r.resolution,pf.allR)); +else + targetres = 5*degree; +end + +targetres = get_option(varargin,'resolution',targetres); +targethw = get_option(varargin,{'HALFWIDTH','KERNELWIDTH'},targetres,'double'); + +psi = SO3DeLaValleePoussinKernel('halfwidth', ... + get_option(varargin,{'HALFWIDTH','KERNELWIDTH'},targethw,'double')); +psi = getClass(varargin,'SO3Kernel',psi); +targethw = psi.halfwidth; + +% ensure that halfwidth is larger than resolution +% we could choose hw = 3/2*res +targetres = min(targetres,targethw); + +% parameterize kernel +psi = @(hw) feval(class(psi),'halfwidth',hw); +nsteps = get_option(varargin,'MaxSteps',5); +steps = @(res) (sqrt(exp(1)).^((nsteps:-1:1)-1))*res; +hw = steps(targethw); +res = steps(targetres); + +% remove irrelevant resolution +rm = res>45*degree; +hw (rm) = []; +res(rm) = []; + +format = [' %s : %s |' repmat(' %1.2f',1,pf.numPF) '\n']; + +% initialized with uniform portion +odf = uniformODF(pf.CS,pf.SS); +for k=1:numel(hw) + % discretization of ODF space + grid = equispacedSO3Grid(pf.CS,pf.SS,'resolution',res(k)); + psik = psi(hw(k)); + + % naive initial weights + % c0 = eval(odf,grid); + % c0 = c0./sum(c0); + + % better initial weights + warning('off','mlsq:itermax') + SO3F = SO3FunRBF.approximation(grid,odf.eval(grid),'kernel',psik,'mlsq','odf','nothinning'); + warning('on','mlsq:itermax') + + % adjust grid + [grid,c0] = deal(SO3F.center,SO3F.weights); + if ~check_option(varargin,'nothinning') + % the default unimodalODF thinning is quite strict + % id = weights./sum(weights(:)) > 1e-2 / psi.eval(1) / numel(weights); + + id = c0./sum(c0(:)) > min(1.0,get_option(varargin,'thinning',0.025)) / numel(c0); + try + grid = grid.subGrid(id); + catch + grid = grid(id); + end + c0 = c0(id); + end + + % or use MLSSolver directly? + [odf,alpha] = calcODF(pf,grid,psik,'c0',c0(:),'silent'); + + if ~check_option(varargin,'silent') && ~getMTEXpref('generatingHelpMode') + e = calcError(pf,odf,'silent'); % not silent :/ + fprintf(format,xnum2str(k,'fixedWidth',2), xnum2str(numel(grid),'fixedWidth',7),e); + end + +end + +end \ No newline at end of file diff --git a/doc/PoleFigureAnalysis/PoleFigureRefinement.m b/doc/PoleFigureAnalysis/PoleFigureRefinement.m new file mode 100644 index 000000000..1c99b1373 --- /dev/null +++ b/doc/PoleFigureAnalysis/PoleFigureRefinement.m @@ -0,0 +1,148 @@ +%% Succesive refinement Demo + +%% Open in Editor +% + +%% Regular ODF estimation +% Please refer to |PoleFigure2ODF| tutorial first. The regular way of +% estimating an ODF: + +mtexdata dubna +odf_naive = calcODF(pf); + +calcError(pf,odf_naive) + +%% +% visual inspection + +odf_naive.plot(pf.allH) + +%% +% anthor form to regularize the inversion problem is to iteratively +% adjuste the kernel width during ODF estimation. + +odf_iter = calcODFIterative(pf,'nothinning'); + +calcError(pf,odf_iter) + +%% +% visual inspection + +odf_iter.plot(pf.allH) + +%% +% volume portion that is differently distributed between the two methods + +calcError(odf_iter,odf_naive,'l1') + +%% +% visual inspection indicates that the uniform portion is distributed +% differently + +plot(calcPoleFigure(pf,odf_naive-odf_iter)) +%plotDiff(odf_naive,odf_iter) + +%% Some arbitrary modelODF of a somewhat sharp texture +% demonstrated the iterative ODF estimation with a synthetic data set. + +cs = crystalSymmetry('cubic'); +ss = specimenSymmetry; + +q = rotation.byEuler(10*degree,10*degree,10*degree,'ABG'); +q2 = rotation.byEuler(10*degree,30*degree,10*degree,'ABG'); + +odf_true = .6*unimodalODF(q,cs,ss,'halfwidth',5*degree) + ... + .4*unimodalODF(q2,cs,ss,'halfwidth',4*degree); + +%% Polefigures to measure + +h = [ ... + Miller(1,1,1,cs), ... + Miller(1,0,0,cs), ... + Miller(1,1,0,cs), ... + ]; + +figure, odf_true.plotPDF(h) + +%% Initial measure grid + +r = equispacedS2Grid('resolution',15*degree,'maxtheta',80*degree); + +figure +plot(r,'markersize',12) + +%% Refinement +% for selected directions r we perform a 'point' like measurement. + +r = equispacedS2Grid('resolution',15*degree,'maxtheta',80*degree); +r = repcell(r,size(h)); +pf_measured = []; +pf_simulated = []; +pf_sim_h = {}; +% number of refinement steps +nsteps = 5; + +for k=1:nsteps + + % perform for every PoleFigure a measurement + pf_simulated = calcPoleFigure(odf_true,h,r,'silent'); + + % merge the new measurements with old ones + pf_measured = union(pf_simulated,pf_measured); + plot(pf_measured,'silent') + drawnow + + fprintf('- at resolution : %f\n', mean(cellfun(@(r) r.resolution, pf_measured.allR))/degree); + + if k < nsteps + % odf modelling + odf_recalc = calcODF(pf_measured,'zeroRange','silent'); + + % in order to minimize the modelling error + pf_recalcerror = calcErrorPF(pf_measured,odf_recalc,'l1','silent'); + + % we could initialized initial weights with previous estimation + odf_recalcerror = calcODF(pf_recalcerror,'silent'); + + % the error we don't know actually + fprintf(' error true -- estimated odf : %f\n', calcError(odf_true,odf_recalc,'silent')) + + % refine the grid for every polefigure + for l=1:length(h) + r_old = pf_measured{l}.r; + [newS2G, r_new] = refine( r_old(:) ); + + % selection of points of interest, naive criterion + pf_sim_h = calcPoleFigure(odf_recalc, h(l), r_new,'silent'); + th = quantile(pf_sim_h.intensities,0.75); + r{l} = pf_sim_h.r(pf_sim_h.intensities > th); + end + end +end + +%% Measured Polefigure + +pf_measured +plot(pf_measured,'silent'); + +%% Final model +% the default odf estimation will distribute volume on nodes that do not +% have corresponding data. + +odf_recalc = calcODF(pf_measured,'zeroRange','halfwidth',2.5*degree); +fprintf(' error true -- estimated odf : %f\n', calcError(odf_true,odf_recalc)) + +%% Compare with iterative odf estimation +% the iterative odf estimation will model the volume portions of nodes that +% do not have corresponding data by assuming volume portions coming from a +% broader halfwidth, thus the error to the true odf will be significantly +% smaller. + +odf_recalc_iterative = calcODFIterative(pf_measured,'halfwidth',2.5*degree); +fprintf(' error true -- iter. est. odf : %f\n', calcError(odf_true,odf_recalc_iterative)) + +%% +% volume portion that is differently distributed + +calcError(odf_recalc,odf_recalc_iterative,'l1') + diff --git a/geometry/@vector3d/refine.m b/geometry/@vector3d/refine.m index 339c81054..95a1c77f7 100644 --- a/geometry/@vector3d/refine.m +++ b/geometry/@vector3d/refine.m @@ -16,6 +16,6 @@ r = sum(vector3d(v.subSet(tri)),2); r = r./norm(r); -r(t.theta > max(v.theta(:))) = []; +r(r.theta > max(v.theta(:))) = []; v = [v; r(:)];