diff --git a/Project.toml b/Project.toml index 5a5202d..909a2ea 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "FourierFilterFlux" uuid = "3d7dfd45-6c90-4c9b-b697-194a05757159" authors = ["dsweber2"] -version = "0.3.5" +version = "0.3.7" [deps] AbstractFFTs = "621f4979-c628-5d54-868e-fcf4e3e8185c" diff --git a/src/FourierFilterFlux.jl b/src/FourierFilterFlux.jl index 4bf331e..bff1762 100644 --- a/src/FourierFilterFlux.jl +++ b/src/FourierFilterFlux.jl @@ -1,13 +1,15 @@ module FourierFilterFlux using Reexport -# @reexport using CUDA -using CUDA using Zygote, Flux, Adapt, LinearAlgebra using AbstractFFTs, FFTW # TODO: check the license on FFTW and such using ContinuousWavelets using RecipesBase +using CUDA const use_cuda = Ref(false) +if CUDA.functional() + use_cuda[] = true +end import Adapt: adapt export pad, originalDomain, formatJLD, getBatchSize diff --git a/src/Utils.jl b/src/Utils.jl index 64f2d9c..58fa0be 100644 --- a/src/Utils.jl +++ b/src/Utils.jl @@ -1,7 +1,7 @@ import NNlib.relu # just a little bit of type piracy used internally TODO maybe don't... relu(x::C) where {C<:Complex} = real(x) > 0 ? x : C(0) - +import CUDA: CuArray # ways to convert between gpu and cpu import Adapt.adapt function adapt(to, cft::ConvFFT{D,OT,F,A,V,PD,P,T,An}) where {D,OT,F,A,V,PD,P,T,An} @@ -38,16 +38,23 @@ function cu(cft::ConvFFT{D,OT,F,A,V,PD,P,T,An}) where {D,OT,F,A,V,PD,P,T,An} cft.analytic) end -# TODO this is somewhat kludgy, not sure why cu was converting these back -#function CUDA.cu(P::FFTW.rFFTWPlan) -# return plan_rfft(cu(zeros(real(eltype(P)), P.sz)), P.region) -#end -#CUDA.cu(P::CUFFT.rCuFFTPlan) = P +# Only convert FFTW plans to CUFFT plans if CUDA is actually functional +function CUDA.cu(P::FFTW.rFFTWPlan) + if CUDA.functional() + return plan_rfft(CUDA.cu(zeros(real(eltype(P)), P.sz)), P.region) + else + return P # fallback to CPU FFTW plan + end +end -#function CUDA.cu(P::FFTW.cFFTWPlan) -# return plan_fft(cu(zeros(eltype(P), P.sz)), P.region) -#end -#CUDA.cu(P::CUFFT.cCuFFTPlan) = P +function CUDA.cu(P::FFTW.cFFTWPlan) + if CUDA.functional() + return plan_fft(CUDA.cu(zeros(eltype(P), P.sz)), P.region) + else + return P # fallback to CPU FFTW plan + end +end +CUDA.cu(P::CUDA.CUFFT.Plan) = P Adapt.adapt(::Type{Array{T}}, P::FFTW.FFTWPlan{T}) where {T} = P function Adapt.adapt(::Type{Array{T}}, P::FFTW.rFFTWPlan) where {T} @@ -62,8 +69,8 @@ adapt(::Type{<:CuArray}, x::T) where {T<:CUDA.CUFFT.CuFFTPlan} = x # is actually converting function adapt(::Union{Type{<:Array},Flux.FluxCPUAdaptor}, x::T) where {T<:CUDA.CUFFT.CuFFTPlan} - transformSize = x.osz - dataSize = x.sz + transformSize = x.output_size + dataSize = x.input_size if dataSize != transformSize # this is an rfft, since the dimension isn't preserved return plan_rfft(zeros(dataSize), x.region) diff --git a/src/boundaries.jl b/src/boundaries.jl index 32ac357..a873209 100644 --- a/src/boundaries.jl +++ b/src/boundaries.jl @@ -11,7 +11,7 @@ end N gives the number of dimensions of convolution, while `x` gives the specific amount to pad in each dimension (done on both sides). If the values in `x` are negative, then the support of the filters will be determined automataically """ -Pad(x::Vararg{<:Integer,N}) where {N} = Pad{N}(x) +Pad(x::Vararg{Integer,N}) where {N} = Pad{N}(x) import Base.ndims ndims(p::Pad{N}) where {N} = N diff --git a/src/convFFTConstructors.jl b/src/convFFTConstructors.jl index 68eaa55..d1959c5 100644 --- a/src/convFFTConstructors.jl +++ b/src/convFFTConstructors.jl @@ -78,7 +78,7 @@ function waveletLayer(inputSize::Union{T,NTuple{N,T}}; averagingStyle = RealWaveletComplexSignal end An = map(ii -> ((ii in An) ? averagingStyle() : - AnalyticWavelet()), (1:size(wavelets, 2)[end]...,)) + AnalyticWavelet()), 1:size(wavelets, 2)) end if bias bias = dType.(init(inputSize[2:end-1]..., size(wavelets, 2))) diff --git a/src/paramCollection.jl b/src/paramCollection.jl index a8ba978..22afc00 100644 --- a/src/paramCollection.jl +++ b/src/paramCollection.jl @@ -1,4 +1,4 @@ -Flux.@functor ConvFFT +Flux.@layer ConvFFT function Flux.trainable(CFT::ConvFFT{A,B,C,D,E,F,G,true}) where {A,B,C,D,E,F,G} (CFT.weight, CFT.bias) diff --git a/src/transforms.jl b/src/transforms.jl index 2fbac19..076582a 100644 --- a/src/transforms.jl +++ b/src/transforms.jl @@ -1,5 +1,5 @@ # TODO: version that doesn't have an fft built in - +import CUDA: CuArray function (shears::ConvFFT)(x) if typeof(shears.weight) <: CuArray && !(typeof(x) <: CuArray) error("don't try to apply a gpu transform to a non-CuArray") diff --git a/test/CUDATests.jl b/test/CUDATests.jl index c0b1092..70cdc6e 100644 --- a/test/CUDATests.jl +++ b/test/CUDATests.jl @@ -1,13 +1,13 @@ if CUDA.functional() @testset "CUDA methods" begin w = ConvFFT((100,), nConvDims = 1) - @test cu(w.fftPlan) isa CUFFT.rCuFFTPlan # does cu work on the fft plans when applied directly? + @test cu(w.fftPlan) isa CUDA.CUFFT.CuFFTPlan # does cu work on the fft plans when applied directly? cw = cu(w) @test cw.weight isa NTuple{N,CuArray} where {N} # does cu work on the weights? - @test cw.fftPlan isa CUFFT.rCuFFTPlan # does cu work on the fftPlan? + @test cw.fftPlan isa CUDA.CUFFT.CuFFTPlan # does cu work on the fftPlan? cw1 = gpu(w) @test cw1.weight isa NTuple{N,CuArray} where {N} # does gpu work on the weights? - @test cw1.fftPlan isa CUFFT.rCuFFTPlan # does gpu work on the fftPlan? + @test cw1.fftPlan isa CUDA.CUFFT.CuFFTPlan # does gpu work on the fftPlan? w1 = cpu(cw) @test w1.weight isa NTuple{N,Array} where {N} # does cpu work on the weights? @test w1.fftPlan isa FFTW.rFFTWPlan # does cpu work on the fftPlan? @@ -16,15 +16,15 @@ if CUDA.functional() @test cw(cx) isa CuArray @test cw(cx) ≈ cu(w(x)) # CUDA and cpu version get the same result approximately cw(cx) - ∇cu = gradient(t -> sum(cw(t)), cx)[1] - ∇ = gradient(t -> sum(w(t)), x)[1] + ∇cu = Flux.gradient(t -> sum(cw(t)), cx)[1] + ∇ = Flux.gradient(t -> sum(w(t)), x)[1] @test ∇ ≈ cpu(∇cu) w1 = waveletLayer((100, 1, 1)) cw1 = cu(w1) @test cw1(cx) ≈ cu(w1(x)) - CUDA.@allowscalar ∇cu = gradient(t -> abs(cw1(t)[1]), cx)[1] - CUDA.@allowscalar ∇ = gradient(t -> abs(w1(t)[1]), x)[1] + CUDA.@allowscalar ∇cu = Flux.gradient(t -> abs(cw1(t)[1]), cx)[1] + CUDA.@allowscalar ∇ = Flux.gradient(t -> abs(w1(t)[1]), x)[1] @test ∇ ≈ cpu(∇cu) end end diff --git a/test/ConvFFTConstructors.jl b/test/ConvFFTConstructors.jl index 7369e49..6ca9331 100644 --- a/test/ConvFFTConstructors.jl +++ b/test/ConvFFTConstructors.jl @@ -1,6 +1,7 @@ # TODO: add some checks for different boundary conditions # TODO: add checks for analytic wavelets # ConvFFT constructor tests +using FourierFilterFlux: applyWeight, applyBC, internalConvFFT @testset "ConvFFT constructors" begin @testset "Utils" begin explicit = [1 0 0; 0 1 0; 0 0 1; zeros(2, 3)] @@ -22,16 +23,17 @@ shears = ConvFFT(weightMatrix, nothing, originalSize, abs, plan = true, boundary = Pad(padding), trainable = true) - @test Flux.params(shears).order[1] == shears.weight[1] - @test length(Flux.params(shears).order) == 1 + trn = Flux.trainable(shears) + @test trn[1][1] == shears.weight[1] + @test length(trn) >= 1 shears = ConvFFT(weightMatrix, nothing, originalSize, abs, boundary = Pad(padding), trainable = false) - @test isempty(Flux.params(shears)) + @test isempty(Flux.trainable(shears)) x = randn(21, 11, 1, 10) - ∇ = gradient((x) -> shears(x)[1, 1, 1, 1, 3], x) - @test minimum(∇[1][:, :, :, [1:2..., 4:10...]] .≈ 0) + ∇ = Flux.gradient((x) -> shears(x)[1, 1, 1, 1, 3], x) + @test all(∇[1][:, :, :, [1:2..., 4:10...]] .≈ 0) # check that the identity ConvFFT is, in fact, an identity weightMatrix = ones(Float32, (21 + 10) >> 1 + 1, 11 + 10, 1) @@ -59,7 +61,7 @@ nextLayer = FourierFilterFlux.internalConvFFT(x̂, shears.weight, usedInds, shears.fftPlan, shears.bias, shears.analytic) - ∇ = gradient((x̂) -> FourierFilterFlux.internalConvFFT(x̂, + ∇ = Flux.gradient((x̂) -> FourierFilterFlux.internalConvFFT(x̂, shears.weight, usedInds, shears.fftPlan, @@ -69,20 +71,20 @@ 1, 1], x̂) - @test minimum(abs.(diag(∇[1][:, :, 1, 1])) .≈ 2.0f0 / 31 / 21) + @test all(abs.(diag(∇[1][:, :, 1, 1])) .≈ 2.0f0 / 31 / 21) ax = axes(x̂)[3:end-1] - ∇ = gradient((x̂) -> FourierFilterFlux.applyWeight(x̂, shears.weight[1], usedInds, + ∇ = Flux.gradient((x̂) -> FourierFilterFlux.applyWeight(x̂, shears.weight[1], usedInds, shears.fftPlan, shears.bias, FourierFilterFlux.NonAnalyticMatching())[1, 1, 1, 1, 1], x̂) - @test minimum(abs.(diag(∇[1][:, :, 1, 1])) .≈ 2.0f0 / 31 / 21) + @test all(abs.(diag(∇[1][:, :, 1, 1])) .≈ 2.0f0 / 31 / 21) - ∇ = gradient((x̂) -> (shears.fftPlan\(x̂.*shears.weight[1]))[1, 1, 1, 1], x̂) - @test minimum(abs.(diag(∇[1][:, :, 1, 1])) .≈ 1.0f0 / 31 * 2 / 21) + ∇ = Flux.gradient((x̂) -> (shears.fftPlan\(x̂.*shears.weight[1]))[1, 1, 1, 1], x̂) + @test all(abs.(diag(∇[1][:, :, 1, 1])) .≈ 1.0f0 / 31 * 2 / 21) sheared = shears(x) @test size(sheared) == (21, 11, 1, 1, 10) @@ -97,11 +99,11 @@ if CUDA.functional() gpuVer = shears |> gpu @test gpuVer.weight[1] isa CuArray - @test gpuVer.fftPlan isa CUFFT.rCuFFTPlan + @test gpuVer.fftPlan isa CUDA.CUFFT.CuFFTPlan if !(gpuVer.weight[1] isa CuArray) println("gpuVer.weight is of type $(typeof(gpuVer.weight))") end - if !(gpuVer.fftPlan isa CUFFT.rCuFFTPlan) + if !(gpuVer.fftPlan isa CUDA.CUFFT.CuFFTPlan) println("gpuVer.fftPlan is of type $(typeof(gpuVer.fftPlan))") end end @@ -147,15 +149,17 @@ @test shears.σ == abs @test shears.bias == nothing @test shears.bc.padBy == (5,) - @test Flux.params(shears).order[1] == shears.weight[1] + trn = Flux.trainable(shears) + @test trn[1][1] == shears.weight[1] + @test length(trn) >= 1 shears = ConvFFT(weightMatrix, nothing, originalSize, abs, plan = true, boundary = Pad(padding), trainable = false) - @test isempty(Flux.params(shears)) + @test isempty(Flux.trainable(shears)) x = randn(21, 1, 10) - ∇ = gradient((x) -> shears(x)[1, 1, 1, 3], x) - @test minimum(∇[1][:, :, [1:2..., 4:10...]] .≈ 0) + ∇ = Flux.gradient((x) -> shears(x)[1, 1, 1, 3], x) + @test all(∇[1][:, :, [1:2..., 4:10...]] .≈ 0) # Sym test weightMatrix = randn(Float32, (21 + 1), 1) @@ -165,11 +169,13 @@ @test shears.σ == abs @test shears.bias == nothing @test typeof(shears.bc) <: Sym - @test Flux.params(shears).order[1] == shears.weight[1] + trn = Flux.trainable(shears) + @test trn[1][1] == shears.weight[1] + @test length(trn) >= 1 x = randn(21, 1, 10) - ∇ = gradient((x) -> shears(x)[1, 1, 1, 3], x) - @test minimum(∇[1][:, :, [1:2..., 4:10...]] .≈ 0) - @test minimum(abs.(∇[1][:, 1, 3])) > 0 + ∇ = Flux.gradient((x) -> shears(x)[1, 1, 1, 3], x) + @test all(∇[1][:, :, [1:2..., 4:10...]] .≈ 0) + @test all(abs.(∇[1][:, 1, 3]) .> 0) weightMatrix = randn(Float32, 21 >> 1 + 1, 1) shears = ConvFFT(weightMatrix, nothing, originalSize, abs, plan = true, boundary = FourierFilterFlux.Periodic()) @@ -177,11 +183,13 @@ @test shears.σ == abs @test shears.bias == nothing @test typeof(shears.bc) <: FourierFilterFlux.Periodic - @test Flux.params(shears).order[1] == shears.weight[1] + trn = Flux.trainable(shears) + @test trn[1][1] == shears.weight[1] + @test length(trn) >= 1 x = randn(21, 1, 10) - ∇ = gradient((x) -> shears(x)[1, 1, 1, 3], x) - @test minimum(∇[1][:, :, [1:2..., 4:10...]] .≈ 0) - @test minimum(abs.(∇[1][:, 1, 3])) > 0 + ∇ = Flux.gradient((x) -> shears(x)[1, 1, 1, 3], x) + @test all(∇[1][:, :, [1:2..., 4:10...]] .≈ 0) + @test all(abs.(∇[1][:, 1, 3]) .> 0) end # check that the identity ConvFFT is, in fact, an identity @@ -235,7 +243,6 @@ end - using FourierFilterFlux: applyWeight, applyBC, internalConvFFT weight = (2 .* ones(Complex{Float32}, (21 + 10) >> 1 + 1),) bc = Pad(5) x = randn(Float32, 21, 1, 10) @@ -244,13 +251,13 @@ fftPlan = plan_rfft(xbc, (1,)) An = map(x -> FourierFilterFlux.NonAnalyticMatching(), (1:length(weight)...,)) nextLayer = internalConvFFT(x̂, weight, usedInds, fftPlan, nothing, An) - ∇ = gradient((x̂) -> internalConvFFT(x̂, weight, usedInds, fftPlan, nothing, An)[1, + ∇ = Flux.gradient((x̂) -> internalConvFFT(x̂, weight, usedInds, fftPlan, nothing, An)[1, 1, 1, 1, 1], x̂) - y, ∂ = pullback((x̂) -> internalConvFFT(x̂, weight, usedInds, fftPlan, nothing, An)[1, + y, ∂ = Zygote.pullback((x̂) -> internalConvFFT(x̂, weight, usedInds, fftPlan, nothing, An)[1, 1, 1, 1, @@ -259,12 +266,12 @@ ∂(y) ∂(y) # repeated calls to the derivative were causing errors while argWrapper # was in use - @test minimum(abs.(∇[1][:, 1, 1]) .≈ 2.0f0 / 31) + @test all(abs.(∇[1][:, 1, 1]) .≈ 2.0f0 / 31) # no bias, not analytic and real valued output # no bias, analytic (so complex valued) fftPlan = plan_fft(xbc, (1,)) - ∇ = gradient((x̂) -> abs(applyWeight(x̂, + ∇ = Flux.gradient((x̂) -> abs(applyWeight(x̂, weight[1], usedInds, fftPlan, @@ -274,7 +281,7 @@ 1, 1]), x̂) - @test minimum(abs.(∇[1][:, 1, 1]) .≈ 2.0f0 / 31) + @test all(abs.(∇[1][:, 1, 1]) .≈ 2.0f0 / 31) # no bias, not analytic, complex valued, but still symmetric real(applyWeight(x̂, @@ -284,7 +291,7 @@ nothing, FourierFilterFlux.RealWaveletRealSignal())) fftPlan = plan_fft(xbc, (1,)) - ∇ = gradient((x̂) -> real(applyWeight(x̂, + ∇ = Flux.gradient((x̂) -> real(applyWeight(x̂, weight[1], usedInds, fftPlan, @@ -294,7 +301,7 @@ 1, 1]), x̂) - @test minimum(abs.(∇[1][2:end, 1, 1]) .≈ 2 * 2.0f0 / 31) + @test all(abs.(∇[1][2:end, 1, 1]) .≈ 2 * 2.0f0 / 31) @test abs(∇[1][1, 1, 1]) ≈ 2.0f0 / 31 # internal methods tests @@ -309,7 +316,7 @@ nextLayer = FourierFilterFlux.internalConvFFT(x̂, shears.weight, usedInds, shears.fftPlan, shears.bias, shears.analytic) - ∇ = gradient((x̂) -> FourierFilterFlux.internalConvFFT(x̂, + ∇ = Flux.gradient((x̂) -> FourierFilterFlux.internalConvFFT(x̂, shears.weight, usedInds, shears.fftPlan, @@ -319,7 +326,7 @@ 1, 1], x̂) - @test minimum(abs.(∇[1][:, 1, 1]) .≈ 2.0f0 / 31) + @test all(abs.(∇[1][:, 1, 1]) .≈ 2.0f0 / 31) # # no bias, not analytic and real valued output @@ -327,8 +334,8 @@ # no bias, not analytic, complex valued, but still symmetric # biased (and one of the others, doesn't matter which) - ∇ = gradient((x̂) -> (shears.fftPlan\(x̂.*shears.weight[1]))[1, 1, 1, 1], x̂) - @test minimum(abs.(∇[1][:, :, 1, 1]) .≈ 1.0f0 / 31 * 2) + ∇ = Flux.gradient((x̂) -> (shears.fftPlan\(x̂.*shears.weight[1]))[1, 1, 1, 1], x̂) + @test all(abs.(∇[1][:, :, 1, 1]) .≈ 1.0f0 / 31 * 2) sheared = shears(x) @test size(sheared) == (21, 1, 1, 10) @@ -336,7 +343,7 @@ if CUDA.functional() gpuVer = shears |> gpu @test gpuVer.weight[1] isa CuArray - @test gpuVer.fftPlan isa CUFFT.rCuFFTPlan + @test gpuVer.fftPlan isa CUDA.CUFFT.CuFFTPlan end # extra channel dimension originalSize = (20, 16, 1, 10) diff --git a/test/ConvFFTtransform.jl b/test/ConvFFTtransform.jl index b0ef468..afb6273 100644 --- a/test/ConvFFTtransform.jl +++ b/test/ConvFFTtransform.jl @@ -1,57 +1,92 @@ -if CUDA.functional() - onGpu = gpu -else - onGpu = identity -end @testset "ConvFFT transform" begin - @testset "ConvFFT 2D" begin + @testset "ConvFFT 2D - CPU" begin originalSize = (10, 10, 1, 2) tmp = zeros(originalSize) - init = zeros(originalSize) + init = zeros(Float32, originalSize) init[5, 5, 1, 2] = Float32(1) - init = init |> onGpu - shears = ConvFFT(originalSize) |> onGpu + shears = ConvFFT(originalSize) res = shears(init) @test size(res) == (10, 10, 5, 1, 2) - # TODO: for the other boundary conditions. This is just periodic + function minimalTransform(shears, init) equivalent = zeros(10, 10, 5, 1, 2) for i = 1:5 equivalent[:, :, i, :, :] = irfft(rfft(init, (1, 2)) .* - cpu(shears.weight[i]), 10, (1, 2)) .+ cpu(shears.bias[i]) + shears.weight[i], 10, (1, 2)) .+ shears.bias[i] end - equivalent = equivalent |> onGpu return equivalent end - #@info "" minimum(res), minimum(minimalTransform(shears, init)) - @test minimalTransform(shears, cpu(init)) ≈ res - - shears = ConvFFT(originalSize, 5, abs) |> onGpu + @test minimalTransform(shears, init) ≈ res + + shears = ConvFFT(originalSize, 5, abs) res = shears(init) - @test abs.(minimalTransform(shears, cpu(init))) ≈ res + @test abs.(minimalTransform(shears, init)) ≈ res end - @testset "ConvFFT 1D" begin + + # Run tests on both CPU and GPU (if available) + # Begin with 2D test, then 1D later. + if CUDA.functional() + @testset "ConvFFT 2D - GPU" begin + originalSize = (10, 10, 1, 2) + init = zeros(Float32, originalSize) + init[5, 5, 1, 2] = Float32(1) + + # Create CPU version first + shears_cpu = ConvFFT(originalSize) + res_cpu = shears_cpu(init) + + # Move SAME model to GPU (not creating a new one) + init_gpu = CuArray(init) + shears_gpu = gpu(shears_cpu) + res_gpu = shears_gpu(init_gpu) + + @test res_gpu isa CuArray + @test size(res_gpu) == (10, 10, 5, 1, 2) + @test cpu(res_gpu) ≈ res_cpu rtol=1e-5 + end + end + + @testset "ConvFFT 1D - CPU" begin originalSize = (10, 1, 2) - init = zeros(originalSize) + init = zeros(Float32, originalSize) init[5, 1, 2] = Float32(1) shears = ConvFFT(originalSize, nConvDims = 1, boundary = Pad(-1)) res = shears(init) @test size(res) == (10, 5, 1, 2) - # TODO: this is only padded + function minimalTransform(shears, init) equivalent = zeros(16, 5, 1, 2) for i = 1:5 equivalent[:, i, :, :] = irfft(rfft(pad(init, shears.bc.padBy), (1,)) .* - shears.weight[i], - 16, - (1,)) .+ shears.bias[i] + shears.weight[i], 16, (1,)) .+ shears.bias[i] end return equivalent[4:13, :, :, :] end - @test minimalTransform(shears, cpu(init)) ≈ res - + @test minimalTransform(shears, init) ≈ res + shears = ConvFFT(originalSize, 5, abs, nConvDims = 1, boundary = Pad(-1)) res = shears(init) @test abs.(minimalTransform(shears, init)) ≈ res end + + if CUDA.functional() + @testset "ConvFFT 1D - GPU" begin + originalSize = (10, 1, 2) + init = zeros(Float32, originalSize) + init[5, 1, 2] = Float32(1) + + # Create CPU version first + shears_cpu = ConvFFT(originalSize, nConvDims = 1, boundary = Pad(-1)) + res_cpu = shears_cpu(init) + + # Move SAME model to GPU + init_gpu = CuArray(init) + shears_gpu = gpu(shears_cpu) + res_gpu = shears_gpu(init_gpu) + + @test res_gpu isa CuArray + @test size(res_gpu) == (10, 5, 1, 2) + @test cpu(res_gpu) ≈ res_cpu rtol=1e-5 + end + end end diff --git a/test/boundaryTests.jl b/test/boundaryTests.jl index 4d52d4e..c643783 100644 --- a/test/boundaryTests.jl +++ b/test/boundaryTests.jl @@ -14,21 +14,21 @@ input = ones(Float64, 10, 2) padd = pad(input, 5) @test size(padd) == (20, 2) - @test minimum(padd[1, :] .== 0.0) - @test minimum(padd[6, :] .== 1.0) + @test all(padd[1, :] .== 0.0) + @test all(padd[6, :] .== 1.0) @test eltype(padd) == eltype(input) # two meta dims, different type input = ones(ComplexF64, 10, 5, 3) padd = pad(input, 5) @test size(padd) == (20, 5, 3) - @test minimum(padd[1, :, :] .== 0.0) - @test minimum(padd[6, :, :] .== 1.0) + @test all(padd[1, :, :] .== 0.0) + @test all(padd[6, :, :] .== 1.0) @test eltype(padd) == eltype(input) # test the gradient is just an indicator with the right offset i = 6 - grad = gradient(x -> pad(x, 5)[i, 1, 1], randn(5, 5, 2))[1] + grad = Flux.gradient(x -> pad(x, 5)[i, 1, 1], randn(5, 5, 2))[1] @test size(grad) == (5, 5, 2) @test grad[1, 1, 1] == 1 end @@ -46,22 +46,22 @@ input = ones(Float64, 10, 10, 2) padd = pad(input, (5, 5)) @test size(padd) == (20, 20, 2) - @test minimum(padd[1, 1, :] .== 0.0) - @test minimum(padd[6, 6, :] .== 1.0) + @test all(padd[1, 1, :] .== 0.0) + @test all(padd[6, 6, :] .== 1.0) @test eltype(padd) == eltype(input) # two meta dims, different type input = ones(ComplexF64, 10, 10, 5, 3) padd = pad(input, (5, 5)) @test size(padd) == (20, 20, 5, 3) - @test minimum(padd[1, 1, :, :] .== 0.0) - @test minimum(padd[6, 6, :, :] .== 1.0) + @test all(padd[1, 1, :, :] .== 0.0) + @test all(padd[6, 6, :, :] .== 1.0) @test eltype(padd) == eltype(input) # test the gradient is just an indicator with the right offset i = 6 j = 7 - grad = gradient(x -> pad(x, (5, 6))[i, j, 1, 1], randn(5, 5, 5, 2))[1] + grad = Flux.gradient(x -> pad(x, (5, 6))[i, j, 1, 1], randn(5, 5, 5, 2))[1] @test size(grad) == (5, 5, 5, 2) @test grad[1, 1, 1, 1] == 1 end