From b0e7e85558edb93f5747190caec502bdd7ef9cc4 Mon Sep 17 00:00:00 2001 From: Jeremiah Lewis <4462211+jeremiahpslewis@users.noreply.github.com> Date: Tue, 17 Dec 2024 19:08:54 +0100 Subject: [PATCH] Simplify FluxApproximator's optimise! method by using a single-line function definition --- .../src/policies/learners/flux_approximator.jl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/ReinforcementLearningCore/src/policies/learners/flux_approximator.jl b/src/ReinforcementLearningCore/src/policies/learners/flux_approximator.jl index 6657e6bc2..a6f0cb5b9 100644 --- a/src/ReinforcementLearningCore/src/policies/learners/flux_approximator.jl +++ b/src/ReinforcementLearningCore/src/policies/learners/flux_approximator.jl @@ -43,8 +43,5 @@ Flux.@layer FluxApproximator trainable=(model,) forward(A::FluxApproximator, args...; kwargs...) = A.model(args...; kwargs...) forward(A::FluxApproximator, env::E, player::AbstractPlayer=current_player(env)) where {E <: AbstractEnv} = env |> (x -> state(x, player)) |> (x -> forward(A, x)) -function RLBase.optimise!(A::FluxApproximator, grad::NamedTuple) - - Flux.Optimise.update!(A.optimiser_state, A.model, grad.model) -end +RLBase.optimise!(A::FluxApproximator, grad::NamedTuple) = Flux.Optimise.update!(A.optimiser_state, A.model, grad.model)