diff --git a/NEWS.md b/NEWS.md index 618bb6ab9..cc66c8e93 100644 --- a/NEWS.md +++ b/NEWS.md @@ -8,8 +8,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added + - Make `compute_facet_owners(...)` more flexible, allowing the user to provide a function to select the owner from neighboring cells. Since PR[#1291](https://github.com/gridap/Gridap.jl/pull/1291). +### Fixed + +- Fixed type instability for tensor operations between `MultiValue` and scalars. Since PR[#1293](https://github.com/gridap/Gridap.jl/pull/1293). +- Fixed type instability in basis construction when user gives a non-concrete output type. Since PR[#1294](https://github.com/gridap/Gridap.jl/pull/1294). +- Reverted most changes from PR[#1277](https://github.com/gridap/Gridap.jl/pull/1277), which were having unnexpected consequences, in favour of simpler changes. Since PR[#1290](https://github.com/gridap/Gridap.jl/pull/1290). + ## [0.20.5] - 2026-04-28 ### Fixed diff --git a/src/Arrays/ArrayBlocks.jl b/src/Arrays/ArrayBlocks.jl index 2105b3994..cb419985e 100644 --- a/src/Arrays/ArrayBlocks.jl +++ b/src/Arrays/ArrayBlocks.jl @@ -219,14 +219,6 @@ function testvalue(::Type{ArrayBlock{A,N}}) where {A,N} ArrayBlock(array,touched) end -function testitem(a::Fill{T}) where T <: ArrayBlock{<:AbstractArray{<:Number}} - if length(a) > 0 - a.value - else - testvalue(T) - end::T -end - # CachedArray methods function CachedArray(a::ArrayBlock) diff --git a/src/Arrays/CompressedArrays.jl b/src/Arrays/CompressedArrays.jl index 48e012627..45864f61d 100644 --- a/src/Arrays/CompressedArrays.jl +++ b/src/Arrays/CompressedArrays.jl @@ -47,15 +47,6 @@ function testitem(a::CompressedArray{T}) where T end::T end -# This is needed for zero-sized arrays of evaluated quantities. -function testitem(a::CompressedArray{T}) where T <: Union{AbstractArray{<:Number}, ArrayBlock{<:Number}, ArrayBlock{<:AbstractArray{<:Number}}} - if length(a.ptrs) == 0 - testvalue(T) - else - a.values[first(a.ptrs)] - end::T -end - size(a::CompressedArray) = size(a.ptrs) @propagate_inbounds function getindex(a::CompressedArray,i::Integer) diff --git a/src/Arrays/Interface.jl b/src/Arrays/Interface.jl index 529e52a22..f0eb99c8b 100644 --- a/src/Arrays/Interface.jl +++ b/src/Arrays/Interface.jl @@ -181,15 +181,6 @@ function testitem(a::Fill) a.value end -# This is needed for zero-sized arrays of evaluated quantities. -function testitem(a::Fill{T}) where T <: AbstractArray{<:Number} - if length(a) > 0 - a.value - else - testvalue(T) - end::T -end - function testitem(a::Number) a end @@ -226,12 +217,22 @@ function testvalue(::Type{T}) where T<:Transpose{E,A} where {E,A} Transpose(a) end +function testvalue(::Type{T}) where T<:Diagonal{E,A} where {E,A} + a = testvalue(A) + Diagonal(a) +end + testvalue(::Type{Base.OneTo{T}}) where T = Base.OneTo(zero(T)) testvalue(::Type{Base.UnitRange{T}}) where T = UnitRange(one(T),zero(T)) function testvalue(::Type{T}) where T<:Fill{E,N,A} where {E,N,A} - Fill(zero(E),testvalue(A)) + Fill(testvalue(E),testvalue(A)) + #Fill(zero(E),testvalue(A)) +end + +function testvalue(::Type{T}) where T<:FillArrays.AbstractFill{E,N,A} where {E,N,A} + @notimplemented end function testvalue(::Type{T}) where T<:Tuple diff --git a/src/Arrays/LazyArrays.jl b/src/Arrays/LazyArrays.jl index 159ee006d..fa5c92495 100644 --- a/src/Arrays/LazyArrays.jl +++ b/src/Arrays/LazyArrays.jl @@ -255,6 +255,7 @@ Base.size(a::LazyArray) = size(a.maps) Base.size(a::LazyArray{G,T,1} where {G,T}) = (length(a.maps),) function Base.sum(a::LazyArray) + isempty(a) && return zero(testitem(a)) cache = array_cache(a) lazy_sum(cache,a) end @@ -270,8 +271,13 @@ end lazy_collect(a::AbstractArray) = a -function lazy_collect(a::LazyArray{A,T} where A) where T +function lazy_collect(a::LazyArray) + isempty(a) && return fill(testitem(a),size(a)) cache = array_cache(a) + lazy_collect(cache,a) +end + +function lazy_collect(cache, a::LazyArray{A,T} where A) where T r = Array{T}(undef,size(a)) @check axes(a) == axes(r) @inbounds for i in eachindex(a) @@ -291,35 +297,34 @@ function testitem(a::LazyArray{A,T} where A) where T end::T end -# This is needed for zero-sized arrays of evaluated quantities. -function testitem(a::LazyArray{A,T} where A) where T <: Union{AbstractArray{<:Number},ArrayBlock{<:Number},ArrayBlock{<:AbstractArray{<:Number}}} - if length(a) > 0 - first(a) - else - testvalue(T) - end::T -end - # Particular implementations for Fill function lazy_map(::typeof(evaluate),f::Fill, a::Fill...) ai = map(ai->ai.value,a) - r = evaluate(f.value, ai...) s = _common_size(f, a...) - Fill(r, s) + if prod(s) > 0 + r = evaluate(f.value, ai...) + else + r = return_value(f.value, ai...) + end + return Fill(r, s) end function lazy_map(::typeof(evaluate),::Type{T}, f::Fill, a::Fill...) where T ai = map(ai->ai.value,a) - r = evaluate(f.value, ai...) s = _common_size(f, a...) - Fill(r, s) + if prod(s) > 0 + r = evaluate(f.value, ai...) + else + r = return_value(f.value, ai...) + end :: T + return Fill(r, s) end function _common_size(a::AbstractArray...) a1, = a - @check all(map(ai->length(a1) == length(ai),a)) "Array sizes $(map(size,a)) are not compatible." - if all( map(ai->size(a1) == size(ai),a) ) + @check all(ai -> length(a1) == length(ai), a) "Array sizes $(map(size,a)) are not compatible." + if all(ai -> size(a1) == size(ai), a) size(a1) else (length(a1),) diff --git a/src/Arrays/Maps.jl b/src/Arrays/Maps.jl index 78e37ee87..ebf167b2a 100644 --- a/src/Arrays/Maps.jl +++ b/src/Arrays/Maps.jl @@ -146,7 +146,9 @@ macro. function test_map(y,f,x...;cmp=(==)) z = evaluate(f,x...) @test cmp(z,y) - @test typeof(z) == return_type(f,x...) + T = return_type(f,x...) + @test typeof(z) == T + @test typeof(return_value(f,x...)) == T cache = return_cache(f,x...) z = evaluate!(cache,f,x...) @test cmp(z,y) @@ -185,90 +187,64 @@ struct Broadcasting{F} <: Map f::F end -return_cache(f::Broadcasting,x...) = nothing - -evaluate!(cache,f::Broadcasting,x...) = broadcast(f.f,x...) - function return_value(f::Broadcasting,x...) broadcast( (y...) -> f.f(testargs(f.f,y...)...), x... ) end +return_cache(f::Broadcasting,x...) = nothing +evaluate!(cache,f::Broadcasting,x...) = broadcast(f.f,x...) -function evaluate!(cache,f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...) - r = _bcast_setsize!(cache,x...) - a = r.array - broadcast!(f.f,a,x...) - a -end +return_value(f::Broadcasting,x::Number...) = return_value(f.f,x...) +return_cache(f::Broadcasting,x::Number...) = nothing +evaluate!(cache,f::Broadcasting,args::Number...) = f.f(args...) -function evaluate!(cache,f::Broadcasting,x::AbstractArray{<:Number}) - setsize!(cache,size(x)) - a = cache.array - @check axes(a) == axes(x) - @inbounds for i in eachindex(x) - a[i] = f.f(x[i]) - end - a -end +# `_bcast_size` would be `size` if our TensorValues would return size(x) = (), which they do not... +_bcast_size(x) = size(x) +_bcast_size(::Number) = () -function evaluate!(cache,f::Broadcasting,args::Number...) - f.f(args...) -end +_bcast_size_zero(x) = map(i -> ifelse(isone(i),i,0), size(x)) +_bcast_size_zero(::Number) = () -function return_value(f::Broadcasting,x::Number...) - return_value(f.f,x...) +function _bcast_setsize!(cache,x...) + s = map(_bcast_size,x) + bs = Base.Broadcast.broadcast_shape(s...) + setsize!(cache,bs) + return cache end -function return_cache(f::Broadcasting,x::Number...) - nothing +function return_value(f::Broadcasting,x::AbstractArray{<:Number}) + T = return_type(f.f,testitem(x)) + return fill(testvalue(T),_bcast_size_zero(x)) +end +function return_cache(f::Broadcasting,x::AbstractArray{<:Number}) + T = return_type(f.f,testitem(x)) + r = fill(testvalue(T),size(x)) + return CachedArray(r) +end +function evaluate!(cache,f::Broadcasting,x::AbstractArray{<:Number}) + setsize!(cache,size(x)) + r = cache.array + broadcast!(f.f,r,x) + return r end function return_value(f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...) - s = map(_bcast_size,x) + s = map(_bcast_size_zero,x) bs = Base.Broadcast.broadcast_shape(s...) T = return_type(f.f,map(testitem,x)...) r = fill(testvalue(T),bs) - r + return r end - function return_cache(f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...) - s = map(_bcast_size,x) - bs = Base.Broadcast.broadcast_shape(s...) - T = return_type(f.f,map(testitem,x)...) - r = fill(testvalue(T),bs) - cache = CachedArray(r) - _bcast_setsize!(cache,x...) - cache + r = return_value(f,x...) + return CachedArray(r) end - -function _bcast_setsize!(c,x...) - s = map(_bcast_size,x) - bs = Base.Broadcast.broadcast_shape(s...) - if bs != size(c) - setsize!(c,bs) - end - c +function evaluate!(cache,f::Broadcasting,x::Union{Number,AbstractArray{<:Number}}...) + _bcast_setsize!(cache,x...) + r = cache.array + broadcast!(f.f,r,x...) + return r end -# `_bcast_size` would be `size` if our TensorValues would return size(x) = (), which they do not... -_bcast_size(x) = size(x) -_bcast_size(::Number) = () - -# These two have been replaced by size -# -# _size(a) = size(a) -# _size(a::Number) = (1,) -# -# function _size_zero(a) -# s = size(a) -# if length(a) == 0 -# r = map(i-> (i==0 ? 1 : i) ,s) -# else -# r = s -# end -# r -# end -# _size_zero(a::Number) = (1,) - """ OperationMap(f,args) diff --git a/src/Arrays/Reindex.jl b/src/Arrays/Reindex.jl index 7c6442790..84421cbe2 100644 --- a/src/Arrays/Reindex.jl +++ b/src/Arrays/Reindex.jl @@ -15,7 +15,12 @@ function testargs(k::Reindex,i::Integer...) map(one,i) end function return_value(k::Reindex,i...) - length(k.values)!=0 ? evaluate(k,testargs(k,i...)...) : testitem(k.values) + T = eltype(k.values) + if !isempty(k.values) + evaluate(k,testargs(k,i...)...) + else + testitem(k.values) + end :: T end return_cache(k::Reindex,i...) = array_cache(k.values) evaluate!(cache,k::Reindex,i...) = getindex!(cache,k.values,i...) diff --git a/src/Fields/DiffOperators.jl b/src/Fields/DiffOperators.jl index f760f6a76..8a46fbc37 100644 --- a/src/Fields/DiffOperators.jl +++ b/src/Fields/DiffOperators.jl @@ -6,6 +6,10 @@ Abstract divergence operator, formally equivalent to `f -> ∇⋅f`. """ divergence(f) = Operation(tr)(∇(f)) +function return_value(::Broadcasting{typeof(divergence)},f) + Broadcasting(Operation(tr))(Broadcasting(∇)(f)) +end + function evaluate!(cache,::Broadcasting{typeof(divergence)},f) Broadcasting(Operation(tr))(Broadcasting(∇)(f)) end @@ -28,6 +32,10 @@ Abstract symmetric gradient operator, formally equivalent to `f -> ½(∇f + ( """ symmetric_gradient(f) = Operation(symmetric_part)(gradient(f)) +function return_value(::Broadcasting{typeof(symmetric_gradient)},f) + Broadcasting(Operation(symmetric_part))(Broadcasting(∇)(f)) +end + function evaluate!(cache,::Broadcasting{typeof(symmetric_gradient)},f) Broadcasting(Operation(symmetric_part))(Broadcasting(∇)(f)) end @@ -46,6 +54,10 @@ Abstract skew symmetric gradient operator, formally equivalent to `f -> ½(∇f """ skew_symmetric_gradient(f) = Operation(skew_symmetric_part)(gradient(f)) +function return_value(::Broadcasting{typeof(skew_symmetric_gradient)},f) + Broadcasting(Operation(skew_symmetric_part))(Broadcasting(∇)(f)) +end + function evaluate!(cache,::Broadcasting{typeof(skew_symmetric_gradient)},f) Broadcasting(Operation(skew_symmetric_part))(Broadcasting(∇)(f)) end @@ -59,6 +71,10 @@ Abstract curl operator, formally equivalent to """ curl(f) = Operation(grad2curl)(∇(f)) +function return_value(::Broadcasting{typeof(curl)},f) + Broadcasting(Operation(grad2curl))(Broadcasting(∇)(f)) +end + function evaluate!(cache,::Broadcasting{typeof(curl)},f) Broadcasting(Operation(grad2curl))(Broadcasting(∇)(f)) end @@ -172,6 +188,12 @@ end (s::ShiftedNabla)(f::Function) = s(GenericField(f)) +function return_value(k::Broadcasting{<:ShiftedNabla},f) + s = k.f + g = Broadcasting(∇)(f) + Broadcasting(Operation((a,b)->a+s.v⊗b))(g,f) +end + function evaluate!(cache,k::Broadcasting{<:ShiftedNabla},f) s = k.f g = Broadcasting(∇)(f) diff --git a/src/Fields/FieldArrays.jl b/src/Fields/FieldArrays.jl index cd8fca7b0..255e2ae95 100644 --- a/src/Fields/FieldArrays.jl +++ b/src/Fields/FieldArrays.jl @@ -88,6 +88,8 @@ function test_field_array(f::AbstractArray{<:Field}, x, v, cmp=(==); grad=nothin if gradgrad != nothing test_map(gradgrad,Broadcasting(∇∇)(f),x;cmp=cmp) end + @test typeof(testvalue(typeof(f))) == typeof(f) + @test typeof(testitem(f)) == eltype(f) true end @@ -202,7 +204,7 @@ end function testvalue(::Type{LinearCombinationField{V,F}}) where {V,F} fields = testvalue(F) - values = testvalue(V) + values = zeros(eltype(V), length(fields), 0) LinearCombinationField(values,fields,1) end @@ -246,7 +248,11 @@ Base.IndexStyle(::Type{<:LinearCombinationField}) = IndexLinear() function testvalue(::Type{LinearCombinationFieldVector{V,F}}) where {V,F} fields = testvalue(F) - values = zeros(eltype(V), length(fields), 0) + if V <: Diagonal + values = Diagonal(zeros(eltype(V), length(fields))) + else + values = zeros(eltype(V), length(fields), 0) + end::V LinearCombinationFieldVector(values,fields) end @@ -310,8 +316,10 @@ struct LinearCombinationMap{T} <: Map end @inline function linear_combination_eltype(v,fx) - T = Base.promote_op(⊗, eltype(fx), eltype(v)) - return Base.promote_op(+,T,T) + T = Base.promote_op(outer, eltype(fx), eltype(v)) + TT = Base.promote_op(+,T,T) + @check isconcretetype(TT) "linear_combination_eltype($(eltype(v)), $(eltype(fx))) is not concrete." + return TT end function return_value(k::LinearCombinationMap{<:Integer},v::AbstractVector,fx::AbstractVector) @@ -324,31 +332,24 @@ function return_cache(k::LinearCombinationMap{<:Integer},v::AbstractVector,fx::A end function evaluate!(cache,k::LinearCombinationMap{<:Integer},v::AbstractArray,fx::AbstractVector) - z = zero(return_type(outer,testitem(fx),testitem(v))) + z = zero(linear_combination_eltype(v,fx)) @check length(fx) == size(v,1) @inbounds for i in eachindex(fx) # We need to do the product in this way # so that the gradient also works z += outer(fx[i],v[i,k.column]) end - z + return z end function return_value(k::LinearCombinationMap{<:Integer},v::AbstractArray,fx::AbstractMatrix) - if size(fx,2) == size(v,1) - evaluate(k,v,fx) - else - c = return_cache(k,v,fx) - c.array - end + T = linear_combination_eltype(v,fx) + return zeros(T,0) end function return_cache(k::LinearCombinationMap{<:Integer},v::AbstractArray,fx::AbstractMatrix) - vf = testitem(fx) - vv = testitem(v) - T = typeof( vf⊗vv + vf⊗vv ) - r = zeros(T,size(fx,1)) - CachedArray(r) + T = linear_combination_eltype(v,fx) + return CachedArray(zeros(T,size(fx,1))) end function evaluate!(cache,k::LinearCombinationMap{<:Integer},v::AbstractArray,fx::AbstractMatrix) @@ -363,7 +364,7 @@ function evaluate!(cache,k::LinearCombinationMap{<:Integer},v::AbstractArray,fx: end r[p] = rp end - r + return r end function evaluate!(cache,k::LinearCombinationMap{Colon},v::AbstractVector,fx::AbstractVector) @@ -386,12 +387,14 @@ function evaluate!(cache,k::LinearCombinationMap{Colon},v::AbstractVector,fx::Ab evaluate!(cache,LinearCombinationMap(1),v,fx) end +function return_value(k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::AbstractVector) + T = linear_combination_eltype(v,fx) + return zeros(T,0) +end + function return_cache(k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::AbstractVector) - vf = testitem(fx) - vv = testitem(v) - T = typeof( vf⊗vv + vf⊗vv ) - r = zeros(T,size(v,2)) - CachedArray(r) + T = linear_combination_eltype(v,fx) + return CachedArray(zeros(T,size(v,2))) end function evaluate!(cache,k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::AbstractVector) @@ -405,7 +408,7 @@ function evaluate!(cache,k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::Ab end r[j] = rj end - r + return r end function evaluate!(cache,k::LinearCombinationMap{Colon},v::LinearAlgebra.Diagonal,fx::AbstractVector) @@ -415,15 +418,18 @@ function evaluate!(cache,k::LinearCombinationMap{Colon},v::LinearAlgebra.Diagona @inbounds for j in eachindex(fx) r[j] = outer(fx[j],v.diag[j]) end - r + return r +end + +function return_value(k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::AbstractMatrix) + T = linear_combination_eltype(v,fx) + return zeros(T,0,0) end function return_cache(k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::AbstractMatrix) - vf = testitem(fx) - vv = testitem(v) - T = typeof( vf⊗vv + vf⊗vv ) + T = linear_combination_eltype(v,fx) r = zeros(T,size(fx,1),size(v,2)) - CachedArray(r) + return CachedArray(r) end # r = fx * v i.e. r[p,j] = Σᵢ fx[p,i]*v[i,j] @@ -440,7 +446,7 @@ function evaluate!(cache,k::LinearCombinationMap{Colon},v::AbstractMatrix,fx::Ab r[p,j] = rj end end - r + return r end function evaluate!(cache,k::LinearCombinationMap{Colon},v::LinearAlgebra.Diagonal,fx::AbstractMatrix) @@ -452,7 +458,7 @@ function evaluate!(cache,k::LinearCombinationMap{Colon},v::LinearAlgebra.Diagona r[p,j] = outer(fx[p,j],v.diag[j]) end end - r + return r end # Optimizing transpose @@ -588,7 +594,7 @@ for T in (:(Point),:(AbstractArray{<:Point})) function return_cache(f::BroadcastOpFieldArray,x::$T) cfs = map(fi -> return_cache(fi,x),f.args) - rs = map(fi -> return_value(fi,x),f.args) + rs = map((ci, fi) -> evaluate!(ci,fi,x), cfs, f.args) bm = BroadcastingFieldOpMap(f.op) r = return_cache(bm,rs...) r, cfs @@ -641,7 +647,7 @@ return_value(a::BroadcastingFieldOpMap,args::AbstractArray...) = return_value(Br return_cache(a::BroadcastingFieldOpMap,args::AbstractArray...) = return_cache(Broadcasting(a.op),args...) evaluate!(cache,a::BroadcastingFieldOpMap,args::AbstractArray...) = evaluate!(cache,Broadcasting(a.op),args...) -# Follow optimizations are very important to achieve performance +# The following optimizations are very important to achieve performance function evaluate!( cache, diff --git a/src/Fields/FieldsInterfaces.jl b/src/Fields/FieldsInterfaces.jl index 8841d911f..367bb3202 100644 --- a/src/Fields/FieldsInterfaces.jl +++ b/src/Fields/FieldsInterfaces.jl @@ -200,7 +200,6 @@ struct GenericField{T} <: Field object::T end -#Field(f) = GenericField(f) GenericField(f::Field) = f testargs(a::GenericField,x::Point) = testargs(a.object,x) @@ -208,6 +207,15 @@ return_value(a::GenericField,x::Point) = return_value(a.object,x) return_cache(a::GenericField,x::Point) = return_cache(a.object,x) evaluate!(cache,a::GenericField,x::Point) = evaluate!(cache,a.object,x) +function testvalue(::Type{GenericField{T}}) where T + if hasproperty(T,:instance) + # This is because typeof(function) does not have a singleton constructor O() + GenericField(T.instance) + else + GenericField(T()) + end :: GenericField{T} +end + function return_cache(f::FieldGradient{N,<:GenericField},x::Point) where N return_cache(FieldGradient{N}(f.object.object),x) end @@ -386,46 +394,38 @@ end function return_value(c::OperationField,x::Point) fx = map(f -> return_value(f,x),c.fields) - return_value(c.op,fx...) + return return_value(c.op,fx...) end function return_cache(c::OperationField,x::Point) cl = map(fi -> return_cache(fi,x),c.fields) lx = map(fi -> return_value(fi,x),c.fields) ck = return_cache(c.op,lx...) - ck, cl + return ck, cl end function evaluate!(cache,c::OperationField,x::Point) ck, cf = cache lx = map((ci,fi) -> evaluate!(ci,fi,x),cf,c.fields) - evaluate!(ck,c.op,lx...) + return evaluate!(ck,c.op,lx...) end function return_value(c::OperationField,x::AbstractArray{<:Point}) fx = map(f -> return_value(f,x),c.fields) - c.op.(fx...) + return return_value(Broadcasting(c.op),fx...) end function return_cache(c::OperationField,x::AbstractArray{<:Point}) cf = map(fi -> return_cache(fi,x),c.fields) lx = map((ci,fi) -> evaluate!(ci,fi,x),cf,c.fields) - ck = return_cache(c.op,map(testitem,lx)...) - r = c.op.(lx...) - ca = CachedArray(r) - ca, ck, cf + ck = return_cache(Broadcasting(c.op),lx...) + return cf, ck end function evaluate!(cache,c::OperationField,x::AbstractArray{<:Point}) - ca, ck, cf = cache - sx = size(x) - setsize!(ca,sx) + cf, ck = cache lx = map((ci,fi) -> evaluate!(ci,fi,x),cf,c.fields) - r = ca.array - for i in eachindex(x) - @inbounds r[i] = evaluate!(ck,c.op,map(lxi -> lxi[i], lx)...) - end - r + return evaluate!(ck,Broadcasting(c.op),lx...) end evaluate!(cache,op::Operation,x::Field...) = OperationField(op.op,x) @@ -628,13 +628,17 @@ struct IntegrationMap <: Map end @inline function integrate_eltype(aq,w) T = Base.promote_op(*,eltype(aq),eltype(w)) - return Base.promote_op(+,T,T) + TT = Base.promote_op(+,T,T) + @check isconcretetype(TT) "integrate_eltype($(eltype(aq)), $(eltype(w))) is not concrete." + return TT end @inline function integrate_eltype(aq,w,jq) T1 = Base.promote_op(*,eltype(aq),eltype(w)) Tj = Base.promote_op(meas,eltype(jq)) T2 = Base.promote_op(*,T1,Tj) - return Base.promote_op(+,T2,T2) + TT = Base.promote_op(+,T2,T2) + @check isconcretetype(TT) "integrate_eltype($(eltype(aq)), $(eltype(w)), $(eltype(jq))) is not concrete." + return TT end return_value(::IntegrationMap,aq::AbstractVector,w) = zero(integrate_eltype(aq,w)) diff --git a/src/Fields/MockFields.jl b/src/Fields/MockFields.jl index e8f61ae9d..7c89cfb90 100644 --- a/src/Fields/MockFields.jl +++ b/src/Fields/MockFields.jl @@ -41,6 +41,10 @@ Base.size(a::MockFieldArray) = size(a.values) Base.IndexStyle(::Type{<:MockFieldArray}) = IndexLinear() Base.getindex(a::MockFieldArray,i::Integer) = GenericField(nothing) +function testvalue(::Type{MockFieldArray{N,A}}) where {N,A} + MockFieldArray(testvalue(A)) +end + function return_cache(f::MockFieldArray,x::Point) nothing end diff --git a/src/MultiField/MultiFieldFESpaces.jl b/src/MultiField/MultiFieldFESpaces.jl index b23c79927..d20dad22e 100644 --- a/src/MultiField/MultiFieldFESpaces.jl +++ b/src/MultiField/MultiFieldFESpaces.jl @@ -490,6 +490,11 @@ end function Arrays.return_value(k::Broadcasting{typeof(_sum_if_first_positive)},dofs::VectorBlock,o) T = return_type(k,testitem(dofs.array),o) array = Vector{T}(undef,length(dofs.array)) + for i in 1:length(dofs.array) + if dofs.touched[i] + array[i] = testvalue(T) + end + end ArrayBlock(array,dofs.touched) end diff --git a/src/Polynomials/BernsteinBases.jl b/src/Polynomials/BernsteinBases.jl index d300abde3..79cdede84 100644 --- a/src/Polynomials/BernsteinBases.jl +++ b/src/Polynomials/BernsteinBases.jl @@ -247,10 +247,11 @@ struct BernsteinBasisOnSimplex{D,V,M,K} <: PolynomialBasis{D,V,Bernstein} function BernsteinBasisOnSimplex{D}(::Type{V},order::Int,vertices=nothing) where {D,V} _simplex_vertices_checks(Val(D), vertices) + VV = make_concretetype(V) cart_to_bary_matrix = _compute_cart_to_bary_matrix(vertices, Val(D+1)) M = typeof(cart_to_bary_matrix) # Nothing or SMatrix K = order - new{D,V,M,K}(cart_to_bary_matrix) + new{D,VV,M,K}(cart_to_bary_matrix) end end diff --git a/src/Polynomials/CartProdPolyBases.jl b/src/Polynomials/CartProdPolyBases.jl index 343753e88..7fb0d5d5b 100644 --- a/src/Polynomials/CartProdPolyBases.jl +++ b/src/Polynomials/CartProdPolyBases.jl @@ -39,12 +39,13 @@ struct CartProdPolyBasis{D,V,PT} <: PolynomialBasis{D,V,PT} orders::NTuple{D,Int}, terms::Vector{CartesianIndex{D}}) where {D,V,PT<:Polynomial} + VV = make_concretetype(V) @check isconcretetype(PT) "PT needs to be a concrete <:Polynomial type" K = maximum(orders; init=0) msg = "Some term contain a higher index than the maximum degree + 1." @check all( term -> (maximum(Tuple(term), init=0) <= K+1), terms) msg - new{D,V,PT}(K,orders,terms) + new{D,VV,PT}(K,orders,terms) end end diff --git a/src/Polynomials/CompWiseTensorPolyBases.jl b/src/Polynomials/CompWiseTensorPolyBases.jl index 1aa9da975..7653afbcd 100644 --- a/src/Polynomials/CompWiseTensorPolyBases.jl +++ b/src/Polynomials/CompWiseTensorPolyBases.jl @@ -59,12 +59,13 @@ struct CompWiseTensorPolyBasis{D,V,PT} <: PolynomialBasis{D,V,PT} @check size(orders,2) == D msg3 @check isconcretetype(PT) "PT needs to be a concrete <:Polynomial type" + VV = make_concretetype(V) K = maximum(orders) num_poly = mapreduce(length, +, comp_terms) #TODO check orders in `orders` greater or equal than max index in terms - new{D,V,PT}(num_poly,K,orders,comp_terms) + new{D,VV,PT}(num_poly,K,orders,comp_terms) end end diff --git a/src/Polynomials/ModalC0Bases.jl b/src/Polynomials/ModalC0Bases.jl index 08f4e9132..dce1b0a81 100644 --- a/src/Polynomials/ModalC0Bases.jl +++ b/src/Polynomials/ModalC0Bases.jl @@ -43,9 +43,10 @@ struct ModalC0Basis{D,V,T} <: PolynomialBasis{D,V,ModalC0} _msg = "The number of bounding box points in a and b should match the number of terms" @check length(terms) == length(a) == length(b) _msg @check T == eltype(V) "Point and polynomial values should have the same scalar body" + VV = make_concretetype(V) K = maximum(orders, init=0) - new{D,V,T}(K,orders,terms,a,b) + new{D,VV,T}(K,orders,terms,a,b) end end diff --git a/src/ReferenceFEs/GeometricDecompositions.jl b/src/ReferenceFEs/GeometricDecompositions.jl index 4fa2a6bb6..b2dbf1b9b 100644 --- a/src/ReferenceFEs/GeometricDecompositions.jl +++ b/src/ReferenceFEs/GeometricDecompositions.jl @@ -197,7 +197,7 @@ function apply_face_signflip( facet_range = get_dimrange(p,D-1) face_own_funs = get_face_own_funs(b,p,conf) - sign_flip = MVector(tfill(1, Val(length(b)))...) + sign_flip = fill(1, length(b)) for (face, own_funs) in enumerate(face_own_funs) if face ∈ facet_range @@ -396,7 +396,7 @@ function _CompWiseTensorPolyBasis_facet_signflip( facet_range = get_dimrange(p,D-1) face_own_funs = get_face_own_funs(b,p,conf) - sign_flip = MVector(tfill(1, Val(length(b)))...) + sign_flip = fill(1, length(b)) for (face, own_funs) in enumerate(face_own_funs) if face ∈ facet_range diff --git a/src/ReferenceFEs/MomentBasedReferenceFEs.jl b/src/ReferenceFEs/MomentBasedReferenceFEs.jl index 98594ced0..f20e1ea69 100644 --- a/src/ReferenceFEs/MomentBasedReferenceFEs.jl +++ b/src/ReferenceFEs/MomentBasedReferenceFEs.jl @@ -283,7 +283,6 @@ function MomentBasedDofBasis( # vals : (nN, nμ, nφ), coords : (nN) vals, coords = evaluate!(cache,σ,op_φ,μ,ds) - # test_moment(σ,prebasis,μ,ds) mom_offset = face_n_moms[face] node_offset = first(face_nodes[face]) + face_n_nodes[face] - 1 diff --git a/src/ReferenceFEs/ReferenceFEInterfaces.jl b/src/ReferenceFEs/ReferenceFEInterfaces.jl index 88ba87b1a..29da76a2a 100644 --- a/src/ReferenceFEs/ReferenceFEInterfaces.jl +++ b/src/ReferenceFEs/ReferenceFEInterfaces.jl @@ -693,7 +693,9 @@ instead extract the required fields before and pass them to the computationally face_own_dofs, # Trick to be able to eval dofs af shapefuns in physical space # cf /test/FESpacesTests/PhysicalFESpacesTests.jl - linear_combination(Eye{Int}(ndofs), shapefuns)) + #linear_combination(Eye{Int}(ndofs), shapefuns) + linear_combination(Diagonal(ones(ndofs)), shapefuns) + ) end end diff --git a/src/TensorValues/MultiValueTypes.jl b/src/TensorValues/MultiValueTypes.jl index 47999353e..e67f740f3 100644 --- a/src/TensorValues/MultiValueTypes.jl +++ b/src/TensorValues/MultiValueTypes.jl @@ -71,6 +71,8 @@ has 6 and a `SymTracelessTensorValue{3}` has 5. But they all have 9 (non indepen num_indep_components(::Type{T}) where T<:Number = num_components(T) num_indep_components(::T) where T<:Number = num_indep_components(T) +get_indep_components(a::MultiValue) = Tuple(a) + """ !!! warning Deprecated in favor on [`num_components`](@ref). @@ -95,6 +97,11 @@ function rand(rng::AbstractRNG,::Random.SamplerType{V}) where V<:MultiValue{D,T} V(Tuple(vrand)) end +function make_concretetype(::Type{T}) where T <: Number + TT = ifelse(isconcretetype(T),T,typeof(zero(T))) + @check isconcretetype(TT) "Type $(T) cannot be made concrete." + return TT +end ## ATM it is not possible to implement array like axes because lazy_mapping ## operations / broadcast rely on axes(::MultiValue) adopting the Number convention to return (). diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 65ad3f53f..768857fce 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -86,14 +86,12 @@ for op in (:+,:-) @eval begin function ($op)(a::T) where T<:MultiValue - Li = num_indep_components(T) - r = map($op, Tuple(a)[1:Li]) + r = map($op, get_indep_components(a)) T(r) end function ($op)(a::V, b::V) where V<:MultiValue - Li = num_indep_components(V) - r = map(($op), Tuple(a)[1:Li], Tuple(b)[1:Li]) + r = map(($op), get_indep_components(a), get_indep_components(b)) V(r) end end @@ -134,16 +132,14 @@ end for op in (:+,:-,:*) @eval begin function ($op)(a::MultiValue, b::_Scalar) - Li = num_indep_components(a) - r = _bc($op, Tuple(a)[1:Li], b) + r = _bc($op, get_indep_components(a), b) T = _eltype($op, r, a, b) M = change_eltype(a, T) M(r) end function ($op)(a::_Scalar, b::MultiValue) - Li = num_indep_components(b) - r = _bc($op, a, Tuple(b)[1:Li]) + r = _bc($op, a, get_indep_components(b)) T = _eltype($op, r, a, b) M = change_eltype(b, T) M(r) @@ -159,8 +155,7 @@ _err = "This operation is undefined for traceless tensors" (-)(::_Scalar, ::_AbstractTracelessTensor) = error(_err) function (/)(a::MultiValue, b::_Scalar) - Li = num_indep_components(a) - r = _bc(/, Tuple(a)[1:Li], b) + r = _bc(/, get_indep_components(a), b) T = _eltype(/, r, a, b) P = change_eltype(a, T) P(r) diff --git a/src/TensorValues/SymTracelessTensorValueTypes.jl b/src/TensorValues/SymTracelessTensorValueTypes.jl index 8ddb80ffd..eba6c9787 100644 --- a/src/TensorValues/SymTracelessTensorValueTypes.jl +++ b/src/TensorValues/SymTracelessTensorValueTypes.jl @@ -141,3 +141,5 @@ change_eltype(::Type{SymTracelessTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2 num_indep_components(::Type{<:SymTracelessTensorValue{0}}) = 0 num_indep_components(::Type{<:SymTracelessTensorValue{D}}) where {D} = D*(D+1)÷2-1 +get_indep_components(a::SymTracelessTensorValue) = Base.front(Tuple(a)) + diff --git a/src/TensorValues/TensorValues.jl b/src/TensorValues/TensorValues.jl index 5fd2121f9..7b970ea63 100644 --- a/src/TensorValues/TensorValues.jl +++ b/src/TensorValues/TensorValues.jl @@ -66,6 +66,7 @@ export skew_symmetric_part export num_components export num_indep_components export change_eltype +export make_concretetype export diagonal_tensor export ⊙ export ⊗ diff --git a/test/ArraysTests/InterfaceTests.jl b/test/ArraysTests/InterfaceTests.jl index 1317a860e..63b9f9539 100644 --- a/test/ArraysTests/InterfaceTests.jl +++ b/test/ArraysTests/InterfaceTests.jl @@ -1,6 +1,7 @@ module InterfaceTests using Test +using LinearAlgebra using Gridap.Arrays import Gridap.Arrays: getindex! @@ -122,6 +123,7 @@ Arrays.getindex!(cache, cm::IndexcartMatrix, i::Integer, j::Integer) = begin @wa @test_nowarn Arrays.getindex!(nothing, cm, 1:2,2) # cache skipped test_array(m, cm) +# testvalue for complex types t = (1,2.0,1im,[3,4]) @test testvalue(t) == (0,0.0,0im,Int[]) @@ -131,4 +133,10 @@ for n in 0:10 @test testvalue(t) == ntuple(i->0.0,n) end +t = transpose(rand(3,3)) +@test testvalue(t) == transpose(zeros(0,0)) + +d = Diagonal([1,2,3]) +@test testvalue(d) == Diagonal(Int[]) + end # module diff --git a/test/FESpacesTests/DivConformingFESpacesTests.jl b/test/FESpacesTests/DivConformingFESpacesTests.jl index d6706f6f0..233427e89 100644 --- a/test/FESpacesTests/DivConformingFESpacesTests.jl +++ b/test/FESpacesTests/DivConformingFESpacesTests.jl @@ -77,6 +77,9 @@ end test_div_v_q_equiv(U,V,P,Q,Ω) + v = get_fe_basis(V) + dv = DIV(v) + #using Gridap.Visualization # #writevtk(Ω,"trian",nsubcells=10,cellfields=["uh"=>uh]) diff --git a/test/FieldsTests/DiffOperatorsTests.jl b/test/FieldsTests/DiffOperatorsTests.jl index 2d22a2b2b..29c1ec011 100644 --- a/test/FieldsTests/DiffOperatorsTests.jl +++ b/test/FieldsTests/DiffOperatorsTests.jl @@ -4,7 +4,9 @@ using Test using Gridap.Arrays using Gridap.TensorValues using Gridap.Fields -using Gridap.Fields: MockField +using Gridap.Fields: MockField, MockFieldArray +using Gridap.Fields: skew_symmetric_gradient, ShiftedNabla + using LinearAlgebra using FillArrays @@ -230,4 +232,18 @@ for x in xs @test Δ(u)(x) == (∇⋅∇u)(x) end +# FieldArray diff ops + +same_type(x,y) = typeof(x) == typeof(y) + +x = [ Point(0.,0.), Point(1.,0.), Point(0.,1.), Point(1.,1.) ] +f = MockFieldArray( [ VectorValue(1.0,2.0), VectorValue(3.0,4.0), VectorValue(5.0,6.0) ] ) + +@test same_type(evaluate(Broadcasting(gradient),f), return_value(Broadcasting(gradient),f)) +@test same_type(evaluate(Broadcasting(divergence),f), return_value(Broadcasting(divergence),f)) +@test same_type(evaluate(Broadcasting(curl),f), return_value(Broadcasting(curl),f)) +@test same_type(evaluate(Broadcasting(symmetric_gradient),f), return_value(Broadcasting(symmetric_gradient),f)) +@test same_type(evaluate(Broadcasting(laplacian),f), return_value(Broadcasting(laplacian),f)) +@test same_type(evaluate(Broadcasting(skew_symmetric_gradient),f), return_value(Broadcasting(skew_symmetric_gradient),f)) + end # module diff --git a/test/FieldsTests/FieldArraysTests.jl b/test/FieldsTests/FieldArraysTests.jl index d0b591dcb..046ae0654 100644 --- a/test/FieldsTests/FieldArraysTests.jl +++ b/test/FieldsTests/FieldArraysTests.jl @@ -1,9 +1,12 @@ module FieldArraysTests +using Gridap using Gridap.Fields using Gridap.Arrays using Gridap.TensorValues +using Gridap.Polynomials +using LinearAlgebra using Test # Testing the default interface for field arrays @@ -214,7 +217,6 @@ v = VectorValue{2,Float64}[(1,1),(4,2),(3,5)] b = MockField.(v) f = Broadcasting(Operation(*))(a,b) - fp = Broadcasting(*)(a(p),v) ∇fp = Broadcasting(⊗)(∇(a)(p),v) @@ -356,6 +358,15 @@ avals = rand(4) a = ConstantField.(avals) b = zeros(VectorValue{2,Float64},4,3) f = linear_combination(b,a) +fp = transpose(b)*avals +∇fp = zeros(TensorValue{2,2,Float64,4},3) + +test_field_array(f,p,fp) +test_field_array(f,p,fp,grad=∇fp) +test_field_array(f,x,result(f,x)) +test_field_array(f,x,result(f,x),grad=result(∇.(f),x)) +test_field_array(f,z,result(f,z)) +test_field_array(f,z,result(f,z),grad=result(∇.(f),z)) ff = Broadcasting(Operation(meas))(f) evaluate(ff,p) @@ -366,6 +377,13 @@ ff = evaluate(Broadcasting(∘),f,g) evaluate(ff,p) @test isa(testvalue(ff),typeof(ff)) +avals = rand(4) +a = MockFieldArray(avals) +b = Diagonal(ones(4)) +f = linear_combination(b,a) +fp = transpose(b)*evaluate(a,p) +test_field_array(f,p,fp) + ## Test MockField # #np = 4 diff --git a/test/GridapTests/EmptyDomainsTests.jl b/test/GridapTests/EmptyDomainsTests.jl index bbb5c3dd7..5af494dcb 100644 --- a/test/GridapTests/EmptyDomainsTests.jl +++ b/test/GridapTests/EmptyDomainsTests.jl @@ -2,7 +2,7 @@ module EmptyDomainsTests using Gridap import Gridap: ∇ -using Gridap.Geometry +using Gridap.Geometry, Gridap.CellData using Test # Analytical functions @@ -18,9 +18,7 @@ g(x) = tr(∇u(x)) ∇(::typeof(p)) = ∇p # Mesh -cells = (10,10) -domain = (0,1,0,1) -model = CartesianDiscreteModel(domain,cells) +model = CartesianDiscreteModel((0,1,0,1),(10,10)) labels = get_face_labeling(model) add_tag_from_tags!(labels,"dirichlet",[1,2,5]) add_tag_from_tags!(labels,"neumann",[6,7,8]) @@ -93,6 +91,28 @@ dΛ = Measure(Λ,2) a((u,p),(v,q)) = ∫(jump(∇(u)) ⊙ jump(∇(v)))dΛ assemble_matrix(a,W,W) +############################################### +# Multidomain + +Ωa = Triangulation(model,collect(Int32,1:50)) +Ωb = Triangulation(model,collect(Int32,51:100)) +Ωc = Triangulation(model,Int32[]) +Γ = Interface(Ωa,Ωb) + +Va = TestFESpace(Ωa, ReferenceFE(lagrangian, Float64, 1)) +Vb = TestFESpace(Ωb, ReferenceFE(lagrangian, Float64, 1)) +X = MultiFieldFESpace([Va,Vb]) + +Π(u,Ω) = change_domain(u,Ω,DomainStyle(u)) +dΩa = Measure(Ωa,2) +dΩb = Measure(Ωb,2) +dΩc = Measure(Ωc,2) +dΓ = Measure(Γ,2) +a1((ua,ub),(va,vb)) = ∫( ∇(va)⊙∇(ub) + vb*ub )*dΩa + ∫( ∇(vb)⊙∇(ua) + va*(ua+ub))*dΩb + ∫( mean(va*ub) + mean(vb*ua) )*dΓ +assemble_matrix(a1,X,X) +a2((ua,ub),(va,vb)) = ∫(Π(ua,Ωb)*Π(vb,Ωa))dΩc +assemble_matrix(a2,X,X) + ############################################### # Autodiff when the target domain is empty diff --git a/test/PolynomialsTests/BernsteinBasesTests.jl b/test/PolynomialsTests/BernsteinBasesTests.jl index 07c2699b5..e87a9aa30 100644 --- a/test/PolynomialsTests/BernsteinBasesTests.jl +++ b/test/PolynomialsTests/BernsteinBasesTests.jl @@ -345,4 +345,11 @@ Hbx = _Hbx(D,order,x,H,x2λ) test_field_array(b,x,bx,≈, grad=Gbx, gradgrad=Hbx) test_field_array(b,x1,bx[1,:],≈,grad=Gbx[1,:],gradgrad=Hbx[1,:]) +# value_type must be concrete even when user passes a non-concrete tensor type +for V in (TensorValue{2,2,Float64}, SymTensorValue{2,Float64}, + SkewSymTensorValue{2,Float64}, SymFourthOrderTensorValue{2,Float64}) + @test isconcretetype(value_type(BernsteinBasis(Val(2), V, 1))) + @test isconcretetype(value_type(BernsteinBasisOnSimplex{2}(V, 1))) +end + end # module diff --git a/test/PolynomialsTests/CompWiseTensorPolyBasesTests.jl b/test/PolynomialsTests/CompWiseTensorPolyBasesTests.jl index b7b92f704..44d3be7d3 100644 --- a/test/PolynomialsTests/CompWiseTensorPolyBasesTests.jl +++ b/test/PolynomialsTests/CompWiseTensorPolyBasesTests.jl @@ -59,5 +59,10 @@ Gbx = hcat( [ getindex.(bi,1) .* VectorValue(1.,0)⊗V(1.,0) for bi in evaluate( test_field_array(b,x,bx,≈, grad=Gbx) +# value_type must be concrete even when user passes a non-concrete tensor type +for V in (TensorValue{2,2,Float64}, SymTensorValue{2,Float64}, SymFourthOrderTensorValue{2,Float64}) + L = num_indep_components(V) + @test isconcretetype(value_type(CompWiseTensorPolyBasis{2}(Monomial, V, ones(Int, L, 2)))) +end end # module diff --git a/test/PolynomialsTests/ModalC0BasesTests.jl b/test/PolynomialsTests/ModalC0BasesTests.jl index f03024561..1f6960a19 100644 --- a/test/PolynomialsTests/ModalC0BasesTests.jl +++ b/test/PolynomialsTests/ModalC0BasesTests.jl @@ -141,5 +141,10 @@ G = gradient_type(V,x1) r = zeros(G, (1,1)) @test_throws ErrorException Polynomials._set_derivative_mc0!(r,1,s,0,0,V) +# value_type must be concrete even when user passes a non-concrete tensor type +for V in (TensorValue{2,2,Float64}, SymTensorValue{2,Float64}, + SkewSymTensorValue{2,Float64}, SymFourthOrderTensorValue{2,Float64}) + @test isconcretetype(value_type(ModalC0Basis{2}(V, 1))) +end end # module diff --git a/test/PolynomialsTests/MonomialBasesTests.jl b/test/PolynomialsTests/MonomialBasesTests.jl index 1edd4f042..12d3e404c 100644 --- a/test/PolynomialsTests/MonomialBasesTests.jl +++ b/test/PolynomialsTests/MonomialBasesTests.jl @@ -278,4 +278,10 @@ order = 2 @test _ph_filter( (1,1) ,order) == true @test _ph_filter( (3,1) ,order) == false +# value_type must be concrete even when user passes a non-concrete tensor type +for V in (TensorValue{2,2,Float64}, SymTensorValue{2,Float64}, + SkewSymTensorValue{2,Float64}, SymFourthOrderTensorValue{2,Float64}) + @test isconcretetype(value_type(MonomialBasis{2}(V, (1,1)))) +end + end # module diff --git a/test/ReferenceFEsTests/RaviartThomasRefFEsTests.jl b/test/ReferenceFEsTests/RaviartThomasRefFEsTests.jl index 81b96e610..b2f4e1e81 100644 --- a/test/ReferenceFEsTests/RaviartThomasRefFEsTests.jl +++ b/test/ReferenceFEsTests/RaviartThomasRefFEsTests.jl @@ -14,6 +14,9 @@ using Gridap.Visualization using Gridap using Gridap.FESpaces +using LinearAlgebra +using FillArrays + p = QUAD D = num_dims(QUAD) et = Float64 @@ -110,7 +113,6 @@ test_reference_fe(reffe) @test get_order(get_prebasis(reffe)) == 1 @test Conformity(reffe) == DivConformity() - p = TET D = num_dims(p) et = Float64 @@ -166,4 +168,22 @@ reffe = ReferenceFE(TET,raviart_thomas,0) @test RaviartThomas() == raviart_thomas +# Pullback +x = Point{2,Float64}[(0.0,0.0), (1.0,0.0), (0.0,1.0), (1.0,1.0)] +J = ConstantField(TensorValue{2,2,Float64}((1.0,0.0,0.0,1.0))) + +reffe = ReferenceFE(QUAD,raviart_thomas,Float64,1) +basis = get_shapefuns(reffe) +test_field_array(basis,x,evaluate(basis,x)) + +pb_basis = evaluate(Broadcasting(Operation(ContraVariantPiolaMap())),basis,J) +test_field_array(pb_basis,x,evaluate(pb_basis,x)) + +lcpb_basis = linear_combination(Diagonal(ones(num_dofs(reffe))),pb_basis) +test_field_array(lcpb_basis,x,evaluate(lcpb_basis,x)) + +ncells = 0 +arr = lazy_map(linear_combination, Fill(Diagonal(ones(num_dofs(reffe))), ncells), Fill(basis, ncells)) +arr = lazy_map(linear_combination, Fill(zeros(num_dofs(reffe),2), ncells), Fill(basis, ncells)) + end # module diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index b0b4f4e16..d7c38d9e5 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -1585,4 +1585,51 @@ ref4 = [sum(T3b[i,j,k]*B[i,k] for i in 1:3, k in 1:3) for j in 1:2] @test_throws ArgumentError tensor_contraction(VectorValue(1,2), VectorValue(1,2), (3,), (1,)) @test_throws ArgumentError tensor_contraction(TensorValue{2,2}(1:4...), VectorValue(1,2), (1,1), (1,1)) + +# promote_op + +function test_op_promote(T, op, args...) + @test Base.promote_op(op, map(typeof, args)...) == T +end + +v = VectorValue(1, 2, 3) +t = TensorValue(1, 2, 3, 4, 5, 6, 7, 8, 9) +st = SymTensorValue(1, 2, 3, 5, 6, 9) +qt = SymTracelessTensorValue(1, 2, 3, 5, 6) +sk = SkewSymTensorValue(1, 2, 3) +fo = one(SymFourthOrderTensorValue{2, Int}) + +test_op_promote(typeof(v * 2), *, v, 2) +test_op_promote(typeof(v * 2.0), *, v, 2.0) +test_op_promote(typeof(2 * v), *, 2, v) +test_op_promote(typeof(2.0 * v), *, 2.0, v) +test_op_promote(typeof(t * 2), *, t, 2) +test_op_promote(typeof(t * 2.0), *, t, 2.0) +test_op_promote(typeof(st * 2), *, st, 2) +test_op_promote(typeof(st * 2.0), *, st, 2.0) +test_op_promote(typeof(qt * 2), *, qt, 2) +test_op_promote(typeof(qt * 2.0), *, qt, 2.0) +test_op_promote(typeof(sk * 2), *, sk, 2) +test_op_promote(typeof(sk * 2.0), *, sk, 2.0) +test_op_promote(typeof(fo * 2), *, fo, 2) +test_op_promote(typeof(fo * 2.0), *, fo, 2.0) + +test_op_promote(typeof(v / 2.0), /, v, 2.0) +test_op_promote(typeof(t / 2.0), /, t, 2.0) +test_op_promote(typeof(st / 2.0), /, st, 2.0) +test_op_promote(typeof(qt / 2.0), /, qt, 2.0) +test_op_promote(typeof(sk / 2.0), /, sk, 2.0) +test_op_promote(typeof(fo / 2.0), /, fo, 2.0) + +test_op_promote(typeof(v ⊗ 2), ⊗, v, 2) +test_op_promote(typeof(v ⊗ 2.0), ⊗, v, 2.0) +test_op_promote(typeof(st ⊗ 2.0), ⊗, st, 2.0) +test_op_promote(typeof(qt ⊗ 2.0), ⊗, qt, 2.0) +test_op_promote(typeof(sk ⊗ 2.0), ⊗, sk, 2.0) + +test_op_promote(typeof(v ⊗ v), ⊗, v, v) +test_op_promote(typeof(v ⊗ st), ⊗, v, st) +test_op_promote(typeof(st ⊗ st), ⊗, st, st) +test_op_promote(typeof(qt ⊗ qt), ⊗, qt, qt) + end # module OperationsTests