Skip to content

Commit

Permalink
Remove a side effect where a stopping criterion accidentally updated …
Browse files Browse the repository at this point in the history
…a gradient.
  • Loading branch information
kellertuer committed Oct 30, 2023
1 parent b4b5603 commit 7002e01
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 11 deletions.
11 changes: 9 additions & 2 deletions src/plans/adabtive_regularization_with_cubics_plan.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,13 @@ function get_cost(
)
M = base_manifold(TpM)
p = TpM.point
c = get_objective_cocst(M, arcmo, p)
c = get_objective_cost(M, arcmo, p)
G = get_objective_gradient(M, arcmo, p)
Y = get_objective_hessian(M, arcmo, p, X)
return c + inner(M, p, G, X) + 1 / 2 * inner(M, p, Y, X) + σ / 3 * norm(M, p, X)^3
return c + inner(M, p, G, X) + 1 / 2 * inner(M, p, Y, X) + arcmo.σ / 3 * norm(M, p, X)^3
end
function get_cost_function(arcmo::AdaptiveRagularizationWithCubicsModelObjective)
return (TpM, X) -> get_cost(TpM, arcmo, X)
end
@doc raw"""
get_gradient(TpM, trmo::AdaptiveRagularizationWithCubicsModelObjective, X)
Expand Down Expand Up @@ -105,4 +108,8 @@ function get_gradient!(
Y .= Y + get_objective_gradient(M, arcmo, p) + arcmo.σ * norm(M, p, X) * X
return Y
end
function get_gradient_function(arcmo::AdaptiveRagularizationWithCubicsModelObjective)
return (TpM, X) -> get_gradient(TpM, arcmo, X)
end

# Also Implement the Hessian for Newton subsubsolver?
4 changes: 2 additions & 2 deletions src/plans/subsolver_plan.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ get_objective(amso::AbstractManifoldSubObjective)
Evaluate the cost of the (original) objective stored within the subobjective.
"""
function get_objective_cost(
M::AbstractManifold, amso::AbstractManifoldSubObjective{O}, p
) where {O<:AbstractManifoldCostObjective}
M::AbstractManifold, amso::AbstractManifoldSubObjective{E,O}, p
) where {E,O<:AbstractManifoldCostObjective}
return get_cost(M, get_objective(amso), p)
end

Expand Down
4 changes: 1 addition & 3 deletions src/solvers/Lanczos.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
#
# Lanczos sub solver
#

@doc raw"""
LanczosState{P,T,SC,B,I,R,TM,V,Y} <: AbstractManoptSolverState
Expand Down Expand Up @@ -263,8 +262,7 @@ function (c::StopWhenFirstOrderProgress)(
TpM = get_manifold(dmp)
p = TpM.point
M = base_manifold(TpM)
get_gradient!(dmp, ls.X, p)
nX = norm(M, p, ls.X)
nX = norm(M, p, get_gradient(dmp, p))
y = @view(ls.coefficients[1:(i - 1)])
Ty = @view(ls.tridig_matrix[1:i, 1:(i - 1)]) * y
ny = norm(y)
Expand Down
1 change: 0 additions & 1 deletion src/solvers/adaptive_regularization_with_cubics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,6 @@ function step_solver!(dmp::AbstractManoptProblem, arcs::AdaptiveRegularizationSt
set_manopt_parameter!(arcs.sub_problem, :Objective, , arcs.σ)
set_iterate!(arcs.sub_state, M, copy(M, arcs.p, arcs.X))
set_manopt_parameter!(arcs.sub_state, , arcs.σ)
set_manopt_parameter!(arcs.sub_state, :p, copy(M, arcs.p))
#Solve the sub_problem – via dispatch depending on type
solve_arc_subproblem!(M, arcs.S, arcs.sub_problem, arcs.sub_state, arcs.p)
# Compute ρ
Expand Down
6 changes: 3 additions & 3 deletions test/solvers/test_adaptive_regularization_with_cubics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -159,12 +159,12 @@ include("../utils/example_tasks.jl")

# test both inplace and allocating variants of grad_g
X0 = grad_f(M, p0)
X1 = grad_g(M2, X0)
X1 = get_gradient(M2, arcmo, X0)
X2 = zero_vector(M, p0)
grad_g(M2, X2, X0)
get_gradient!(M2, X2, arcmo, X0)
@test isapprox(M, p0, X1, X2)

sub_problem = DefaultManoptProblem(M2, ManifoldGradientObjective(g, grad_g))
sub_problem = DefaultManoptProblem(M2, arcmo)
sub_state = GradientDescentState(
M2,
zero_vector(M, p0);
Expand Down

0 comments on commit 7002e01

Please sign in to comment.