Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
18bef70
Added testvalue for LinearAlgera.Diagonal
JordiManyer Apr 29, 2026
7ebd3d2
Updated news
JordiManyer Apr 29, 2026
4418a72
Added a bunch of tests that fail
JordiManyer Apr 29, 2026
ba57cf7
More tests that fail
JordiManyer Apr 29, 2026
322a427
Reverted most changes from #1277 in favour of a less wonky implementa…
JordiManyer Apr 30, 2026
8668967
Merge branch 'master' of github.com:gridap/Gridap.jl into bugfix-empt…
JordiManyer Apr 30, 2026
0f89b2a
Fixed the consistency of `testvalue(::Type{LinearCombinationFieldVect…
JordiManyer Apr 30, 2026
9b07a85
Started fixing the diagonal mess
JordiManyer Apr 30, 2026
7ff4920
fix for tests introduced in #1277 and #1290
Antoinemarteau Apr 30, 2026
96bed20
Making types consistent accross evaluate and return_type
JordiManyer May 1, 2026
488b383
Minor
JordiManyer May 1, 2026
b1fa79d
Fixed type instanbility in tensor operations
JordiManyer May 1, 2026
277fe82
Updated news
JordiManyer May 1, 2026
313cfd4
Add more meaningful error messages for type inference functions
JordiManyer May 1, 2026
bac7201
Merge branch 'bugfix-tensor-type-instability' of github.com:gridap/Gr…
JordiManyer May 1, 2026
a4c66a6
Bugfix where non-concrete types would arise in bases
JordiManyer May 1, 2026
f357352
Updated NEWS
JordiManyer May 1, 2026
36275ef
Merge branch 'bugfix-basis-type-instability' of github.com:gridap/Gri…
JordiManyer May 1, 2026
dbef316
Made the feature a new function make_concretetype
JordiManyer May 1, 2026
8088455
Merge branch 'bugfix-basis-type-instability' of github.com:gridap/Gri…
JordiManyer May 1, 2026
4139e31
Cleanup and more tests
JordiManyer May 1, 2026
ea8b881
Minor
JordiManyer May 1, 2026
49a83be
Minor
JordiManyer May 4, 2026
cef3e53
Minor
JordiManyer May 4, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]

### Added

- Make `compute_facet_owners(...)` more flexible, allowing the user to provide a function to select the owner from neighboring cells. Since PR[#1291](https://github.com/gridap/Gridap.jl/pull/1291).

### Fixed

- Fixed type instability for tensor operations between `MultiValue` and scalars. Since PR[#1293](https://github.com/gridap/Gridap.jl/pull/1293).
- Fixed type instability in basis construction when user gives a non-concrete output type. Since PR[#1294](https://github.com/gridap/Gridap.jl/pull/1294).
- Reverted most changes from PR[#1277](https://github.com/gridap/Gridap.jl/pull/1277), which were having unnexpected consequences, in favour of simpler changes. Since PR[#1290](https://github.com/gridap/Gridap.jl/pull/1290).

## [0.20.5] - 2026-04-28

### Fixed
Expand Down
8 changes: 0 additions & 8 deletions src/Arrays/ArrayBlocks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -219,14 +219,6 @@ function testvalue(::Type{ArrayBlock{A,N}}) where {A,N}
ArrayBlock(array,touched)
end

function testitem(a::Fill{T}) where T <: ArrayBlock{<:AbstractArray{<:Number}}
if length(a) > 0
a.value
else
testvalue(T)
end::T
end

# CachedArray methods

function CachedArray(a::ArrayBlock)
Expand Down
9 changes: 0 additions & 9 deletions src/Arrays/CompressedArrays.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,6 @@ function testitem(a::CompressedArray{T}) where T
end::T
end

# This is needed for zero-sized arrays of evaluated quantities.
function testitem(a::CompressedArray{T}) where T <: Union{AbstractArray{<:Number}, ArrayBlock{<:Number}, ArrayBlock{<:AbstractArray{<:Number}}}
if length(a.ptrs) == 0
testvalue(T)
else
a.values[first(a.ptrs)]
end::T
end

size(a::CompressedArray) = size(a.ptrs)

@propagate_inbounds function getindex(a::CompressedArray,i::Integer)
Expand Down
21 changes: 11 additions & 10 deletions src/Arrays/Interface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -181,15 +181,6 @@ function testitem(a::Fill)
a.value
end

# This is needed for zero-sized arrays of evaluated quantities.
function testitem(a::Fill{T}) where T <: AbstractArray{<:Number}
if length(a) > 0
a.value
else
testvalue(T)
end::T
end

function testitem(a::Number)
a
end
Expand Down Expand Up @@ -226,12 +217,22 @@ function testvalue(::Type{T}) where T<:Transpose{E,A} where {E,A}
Transpose(a)
end

function testvalue(::Type{T}) where T<:Diagonal{E,A} where {E,A}
a = testvalue(A)
Diagonal(a)
end

testvalue(::Type{Base.OneTo{T}}) where T = Base.OneTo(zero(T))

testvalue(::Type{Base.UnitRange{T}}) where T = UnitRange(one(T),zero(T))

function testvalue(::Type{T}) where T<:Fill{E,N,A} where {E,N,A}
Fill(zero(E),testvalue(A))
Fill(testvalue(E),testvalue(A))
#Fill(zero(E),testvalue(A))
end

function testvalue(::Type{T}) where T<:FillArrays.AbstractFill{E,N,A} where {E,N,A}
@notimplemented
end

function testvalue(::Type{T}) where T<:Tuple
Expand Down
37 changes: 21 additions & 16 deletions src/Arrays/LazyArrays.jl
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,7 @@ Base.size(a::LazyArray) = size(a.maps)
Base.size(a::LazyArray{G,T,1} where {G,T}) = (length(a.maps),)

function Base.sum(a::LazyArray)
isempty(a) && return zero(testitem(a))
cache = array_cache(a)
lazy_sum(cache,a)
end
Expand All @@ -270,8 +271,13 @@ end

lazy_collect(a::AbstractArray) = a

function lazy_collect(a::LazyArray{A,T} where A) where T
function lazy_collect(a::LazyArray)
isempty(a) && return fill(testitem(a),size(a))
cache = array_cache(a)
lazy_collect(cache,a)
end

function lazy_collect(cache, a::LazyArray{A,T} where A) where T
r = Array{T}(undef,size(a))
@check axes(a) == axes(r)
@inbounds for i in eachindex(a)
Expand All @@ -291,35 +297,34 @@ function testitem(a::LazyArray{A,T} where A) where T
end::T
end

# This is needed for zero-sized arrays of evaluated quantities.
function testitem(a::LazyArray{A,T} where A) where T <: Union{AbstractArray{<:Number},ArrayBlock{<:Number},ArrayBlock{<:AbstractArray{<:Number}}}
if length(a) > 0
first(a)
else
testvalue(T)
end::T
end

# Particular implementations for Fill

function lazy_map(::typeof(evaluate),f::Fill, a::Fill...)
ai = map(ai->ai.value,a)
r = evaluate(f.value, ai...)
s = _common_size(f, a...)
Fill(r, s)
if prod(s) > 0
r = evaluate(f.value, ai...)
else
r = return_value(f.value, ai...)
end
return Fill(r, s)
end

function lazy_map(::typeof(evaluate),::Type{T}, f::Fill, a::Fill...) where T
ai = map(ai->ai.value,a)
r = evaluate(f.value, ai...)
s = _common_size(f, a...)
Fill(r, s)
if prod(s) > 0
r = evaluate(f.value, ai...)
else
r = return_value(f.value, ai...)
end :: T
return Fill(r, s)
end

function _common_size(a::AbstractArray...)
a1, = a
@check all(map(ai->length(a1) == length(ai),a)) "Array sizes $(map(size,a)) are not compatible."
if all( map(ai->size(a1) == size(ai),a) )
@check all(ai -> length(a1) == length(ai), a) "Array sizes $(map(size,a)) are not compatible."
if all(ai -> size(a1) == size(ai), a)
size(a1)
else
(length(a1),)
Expand Down
106 changes: 41 additions & 65 deletions src/Arrays/Maps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,9 @@ macro.
function test_map(y,f,x...;cmp=(==))
z = evaluate(f,x...)
@test cmp(z,y)
@test typeof(z) == return_type(f,x...)
T = return_type(f,x...)
@test typeof(z) == T
@test typeof(return_value(f,x...)) == T
cache = return_cache(f,x...)
z = evaluate!(cache,f,x...)
@test cmp(z,y)
Expand Down Expand Up @@ -185,90 +187,64 @@ struct Broadcasting{F} <: Map
f::F
end

return_cache(f::Broadcasting,x...) = nothing

evaluate!(cache,f::Broadcasting,x...) = broadcast(f.f,x...)

function return_value(f::Broadcasting,x...)
broadcast( (y...) -> f.f(testargs(f.f,y...)...), x... )
end
return_cache(f::Broadcasting,x...) = nothing
evaluate!(cache,f::Broadcasting,x...) = broadcast(f.f,x...)

function evaluate!(cache,f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...)
r = _bcast_setsize!(cache,x...)
a = r.array
broadcast!(f.f,a,x...)
a
end
return_value(f::Broadcasting,x::Number...) = return_value(f.f,x...)
return_cache(f::Broadcasting,x::Number...) = nothing
evaluate!(cache,f::Broadcasting,args::Number...) = f.f(args...)

function evaluate!(cache,f::Broadcasting,x::AbstractArray{<:Number})
setsize!(cache,size(x))
a = cache.array
@check axes(a) == axes(x)
@inbounds for i in eachindex(x)
a[i] = f.f(x[i])
end
a
end
# `_bcast_size` would be `size` if our TensorValues would return size(x) = (), which they do not...
_bcast_size(x) = size(x)
_bcast_size(::Number) = ()

function evaluate!(cache,f::Broadcasting,args::Number...)
f.f(args...)
end
_bcast_size_zero(x) = map(i -> ifelse(isone(i),i,0), size(x))
_bcast_size_zero(::Number) = ()

function return_value(f::Broadcasting,x::Number...)
return_value(f.f,x...)
function _bcast_setsize!(cache,x...)
s = map(_bcast_size,x)
bs = Base.Broadcast.broadcast_shape(s...)
setsize!(cache,bs)
return cache
end

function return_cache(f::Broadcasting,x::Number...)
nothing
function return_value(f::Broadcasting,x::AbstractArray{<:Number})
T = return_type(f.f,testitem(x))
return fill(testvalue(T),_bcast_size_zero(x))
end
function return_cache(f::Broadcasting,x::AbstractArray{<:Number})
T = return_type(f.f,testitem(x))
r = fill(testvalue(T),size(x))
return CachedArray(r)
end
function evaluate!(cache,f::Broadcasting,x::AbstractArray{<:Number})
setsize!(cache,size(x))
r = cache.array
broadcast!(f.f,r,x)
return r
end

function return_value(f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...)
s = map(_bcast_size,x)
s = map(_bcast_size_zero,x)
bs = Base.Broadcast.broadcast_shape(s...)
T = return_type(f.f,map(testitem,x)...)
r = fill(testvalue(T),bs)
r
return r
end

function return_cache(f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...)
s = map(_bcast_size,x)
bs = Base.Broadcast.broadcast_shape(s...)
T = return_type(f.f,map(testitem,x)...)
r = fill(testvalue(T),bs)
cache = CachedArray(r)
_bcast_setsize!(cache,x...)
cache
r = return_value(f,x...)
return CachedArray(r)
end

function _bcast_setsize!(c,x...)
s = map(_bcast_size,x)
bs = Base.Broadcast.broadcast_shape(s...)
if bs != size(c)
setsize!(c,bs)
end
c
function evaluate!(cache,f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...)
_bcast_setsize!(cache,x...)
r = cache.array
broadcast!(f.f,r,x...)
return r
end

# `_bcast_size` would be `size` if our TensorValues would return size(x) = (), which they do not...
_bcast_size(x) = size(x)
_bcast_size(::Number) = ()

# These two have been replaced by size
#
# _size(a) = size(a)
# _size(a::Number) = (1,)
#
# function _size_zero(a)
# s = size(a)
# if length(a) == 0
# r = map(i-> (i==0 ? 1 : i) ,s)
# else
# r = s
# end
# r
# end
# _size_zero(a::Number) = (1,)

"""
OperationMap(f,args)

Expand Down
7 changes: 6 additions & 1 deletion src/Arrays/Reindex.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,12 @@ function testargs(k::Reindex,i::Integer...)
map(one,i)
end
function return_value(k::Reindex,i...)
length(k.values)!=0 ? evaluate(k,testargs(k,i...)...) : testitem(k.values)
T = eltype(k.values)
if !isempty(k.values)
evaluate(k,testargs(k,i...)...)
else
testitem(k.values)
end :: T
end
return_cache(k::Reindex,i...) = array_cache(k.values)
evaluate!(cache,k::Reindex,i...) = getindex!(cache,k.values,i...)
Expand Down
22 changes: 22 additions & 0 deletions src/Fields/DiffOperators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ Abstract divergence operator, formally equivalent to `f -> ∇⋅f`.
"""
divergence(f) = Operation(tr)(∇(f))

function return_value(::Broadcasting{typeof(divergence)},f)
Broadcasting(Operation(tr))(Broadcasting(∇)(f))
end

function evaluate!(cache,::Broadcasting{typeof(divergence)},f)
Broadcasting(Operation(tr))(Broadcasting(∇)(f))
end
Expand All @@ -28,6 +32,10 @@ Abstract symmetric gradient operator, formally equivalent to `f -> ½(∇f + (
"""
symmetric_gradient(f) = Operation(symmetric_part)(gradient(f))

function return_value(::Broadcasting{typeof(symmetric_gradient)},f)
Broadcasting(Operation(symmetric_part))(Broadcasting(∇)(f))
end

function evaluate!(cache,::Broadcasting{typeof(symmetric_gradient)},f)
Broadcasting(Operation(symmetric_part))(Broadcasting(∇)(f))
end
Expand All @@ -46,6 +54,10 @@ Abstract skew symmetric gradient operator, formally equivalent to `f -> ½(∇f
"""
skew_symmetric_gradient(f) = Operation(skew_symmetric_part)(gradient(f))

function return_value(::Broadcasting{typeof(skew_symmetric_gradient)},f)
Broadcasting(Operation(skew_symmetric_part))(Broadcasting(∇)(f))
end

function evaluate!(cache,::Broadcasting{typeof(skew_symmetric_gradient)},f)
Broadcasting(Operation(skew_symmetric_part))(Broadcasting(∇)(f))
end
Expand All @@ -59,6 +71,10 @@ Abstract curl operator, formally equivalent to
"""
curl(f) = Operation(grad2curl)(∇(f))

function return_value(::Broadcasting{typeof(curl)},f)
Broadcasting(Operation(grad2curl))(Broadcasting(∇)(f))
end

function evaluate!(cache,::Broadcasting{typeof(curl)},f)
Broadcasting(Operation(grad2curl))(Broadcasting(∇)(f))
end
Expand Down Expand Up @@ -172,6 +188,12 @@ end

(s::ShiftedNabla)(f::Function) = s(GenericField(f))

function return_value(k::Broadcasting{<:ShiftedNabla},f)
s = k.f
g = Broadcasting(∇)(f)
Broadcasting(Operation((a,b)->a+s.v⊗b))(g,f)
end

function evaluate!(cache,k::Broadcasting{<:ShiftedNabla},f)
s = k.f
g = Broadcasting(∇)(f)
Expand Down
Loading