Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multivariate functions #185

Merged
merged 31 commits into from
Nov 28, 2022
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
9199067
first version for multivariate functions of type trivialtensorizer
benjione Sep 1, 2022
4e6a8ab
performance improvements
benjione Sep 5, 2022
00c8118
added simple test cases
benjione Sep 5, 2022
bb0f8f6
fix test and extension
benjione Sep 5, 2022
2758897
comment broken test out for old Julia versions
benjione Sep 8, 2022
568c3f5
fixed ordering of tensorization
benjione Sep 8, 2022
00d75e3
simd change and vector type
benjione Sep 12, 2022
a821c96
small changes
benjione Sep 15, 2022
9263003
enabled broken test
benjione Sep 15, 2022
03e123a
u
benjione Oct 12, 2022
2f23248
fixed unbound parameters
benjione Oct 12, 2022
775ace3
type fix
benjione Oct 12, 2022
64c9b54
Merge branch 'master' of github.com:JuliaApproximation/ApproxFunBase.jl
benjione Oct 17, 2022
2d61874
type fixes
benjione Oct 17, 2022
2cfe115
add Polynomials for testing in Project toml
benjione Oct 18, 2022
e9a0032
Merge branch 'JuliaApproximation:master' into master
benjione Nov 16, 2022
236eab4
invoked 2D case
benjione Nov 17, 2022
98eaa53
move multivariate tests to orthogonal polynomial repository
benjione Nov 17, 2022
c9430a8
Merge branch 'master' of github.com:benjione/ApproxFunBase.jl
benjione Nov 17, 2022
f25efa7
Merge branch 'JuliaApproximation:master' into master
benjione Nov 17, 2022
2033c82
delete multivariate
benjione Nov 17, 2022
effb5a4
Merge branch 'master' of github.com:benjione/ApproxFunBase.jl
benjione Nov 17, 2022
582c232
Merge branch 'master' into master
jishnub Nov 19, 2022
5adac34
fixed block type and rewritten trivialtensor iterator
benjione Nov 19, 2022
d08dcb8
Merge branch 'master' of github.com:benjione/ApproxFunBase.jl
benjione Nov 19, 2022
dd3cb6b
remove Orthogonal polynomials from Project toml
benjione Nov 19, 2022
d7313fc
adding TensorIteratorFun
benjione Nov 21, 2022
e1fb701
changed project toml
benjione Nov 21, 2022
6640875
rename TrivialTensorFun and add check if all spaces in Tensorspace ar…
benjione Nov 21, 2022
656a717
performance optimization
benjione Nov 21, 2022
36d9411
added ProductFun which is not working as a comment
benjione Nov 23, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ BandedMatrices = "aae01518-5342-5314-be14-df237901396f"
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
BlockBandedMatrices = "ffab5731-97b5-5995-9138-79e8c1846df0"
Calculus = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa"
DSP = "717857b8-e6f2-59f4-9121-6e50c889abd2"
DomainSets = "5b8099bc-c8ec-5219-889f-1d9e522a28bf"
DualNumbers = "fa6b7ba4-c1ee-5f82-b5fc-ecf0adba8f74"
Expand All @@ -32,6 +33,7 @@ BandedMatrices = "0.16, 0.17"
BlockArrays = "0.14, 0.15, 0.16"
BlockBandedMatrices = "0.10, 0.11"
Calculus = "0.5"
Combinatorics = "1.0.2"
DSP = "0.6, 0.7"
DomainSets = "0.5"
DualNumbers = "0.6.2"
Expand All @@ -47,8 +49,9 @@ StaticArrays = "0.12, 1.0"
julia = "1.6"

[extras]
ApproxFunOrthogonalPolynomials = "b70543e2-c0d9-56b8-a290-0d4d6d4de211"
benjione marked this conversation as resolved.
Show resolved Hide resolved
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"

[targets]
test = ["Aqua", "Random"]
test = ["Aqua", "Random", "ApproxFunOrthogonalPolynomials"]
benjione marked this conversation as resolved.
Show resolved Hide resolved
2 changes: 2 additions & 0 deletions src/ApproxFunBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ import Base.Broadcast: BroadcastStyle, Broadcasted, AbstractArrayStyle,

import Statistics: mean

import Combinatorics: multiexponents

import LinearAlgebra: BlasInt, BlasFloat, norm, ldiv!, mul!, det, cross,
qr, qr!, rank, isdiag, istril, istriu, issymmetric,
Tridiagonal, diagm, diagm_container, factorize,
Expand Down
50 changes: 48 additions & 2 deletions src/Multivariate/ProductFun.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,15 @@

export ProductFun

## TODO:
## In a newer version, an abstract type of ProducFun is needed, where different implementations are possible
## however, refactoring this is a lot of effort...
struct TensorIteratorFun{d, SS<:TensorSpace{<:NTuple{d, <:UnivariateSpace}}, T<:Number} <: MultivariateFun{T, d}
space::SS
coefficients::Vector{T}
iterator::TrivialTensorizer{d}
orders::Block
benjione marked this conversation as resolved.
Show resolved Hide resolved
end
"""
ProductFun(f, space::TensorSpace; [tol=eps()])

Expand All @@ -28,6 +37,7 @@ julia> coefficients(P) # power only at the (1,1) Chebyshev mode
0.0 1.0
```
"""

struct ProductFun{S<:UnivariateSpace,V<:UnivariateSpace,SS<:AbstractProductSpace,T} <: BivariateFun{T}
coefficients::Vector{VFun{S,T}} # coefficients are in x
space::SS
Expand Down Expand Up @@ -85,6 +95,15 @@ function ProductFun(cfs::AbstractMatrix{T},sp::AbstractProductSpace{Tuple{S,V},D
end
end

## TODO: This Product Fun actually does not return a productfun, dirty but was easiest to implement. Probably an abstract type of ProductFuns
# is needed in the future.
function ProductFun(iter::TrivialTensorizer{d},cfs::Vector{T},blk::Block, sp::AbstractProductSpace{<:NTuple{d, <:UnivariateSpace}}) where {T<:Number,d}
benjione marked this conversation as resolved.
Show resolved Hide resolved

@assert d>2

TensorIteratorFun(sp, cfs, iter, blk) # This is not a ProductFun
end

## Construction in a ProductSpace via a Vector of Funs
"""
ProductFun(M::AbstractVector{<:Fun{<:UnivariateSpace}}, sp::UnivariateSpace)
Expand Down Expand Up @@ -241,7 +260,8 @@ function coefficients(f::ProductFun,ox::Space,oy::Space)
end

(f::ProductFun)(x,y) = evaluate(f,x,y)
(f::ProductFun)(x,y,z) = evaluate(f,x,y,z)
benjione marked this conversation as resolved.
Show resolved Hide resolved

(f::TensorIteratorFun)(x...) = evaluate(f, x...)

coefficients(f::ProductFun,ox::TensorSpace) = coefficients(f,ox[1],ox[2])

Expand Down Expand Up @@ -276,7 +296,6 @@ canonicalevaluate(f::ProductFun,xx::AbstractVector,yy::AbstractVector) =


evaluate(f::ProductFun,x,y) = canonicalevaluate(f,tocanonical(f,x,y)...)
evaluate(f::ProductFun,x,y,z) = canonicalevaluate(f,tocanonical(f,x,y,z)...)

# TensorSpace does not use map
evaluate(f::ProductFun{S,V,SS,T},x::Number,::Colon) where {S<:UnivariateSpace,V<:UnivariateSpace,SS<:TensorSpace,T} =
Expand All @@ -286,6 +305,33 @@ evaluate(f::ProductFun{S,V,SS,T},x::Number,y::Number) where {S<:UnivariateSpace,
evaluate(f,x,:)(y)


# TensorSpace evaluation
function evaluate(f::TensorIteratorFun{d, SS, T},x...) where {d, SS, T}
highest_order = f.orders.n[1]
n = length(f.coefficients)

# this could be lazy evaluated for the sparse case
A = [Fun(f.space.spaces[i], [zeros(k);1])(x[i]) for k=0:highest_order, i=1:d]
result::T = 0
coef_counter::Int = 1
for i in f.iterator
tmp = f.coefficients[coef_counter]
if tmp != 0
tmp_res = 1
for k=1:d
tmp_res *= A[i[k], k]
end
result += tmp * tmp_res
end
coef_counter += 1
if coef_counter > n
break
end
end
return result
end


evaluate(f::ProductFun,x) = evaluate(f,x...)

*(c::Number,f::F) where {F<:ProductFun} = F(c*f.coefficients,f.space)
Expand Down
125 changes: 119 additions & 6 deletions src/Multivariate/TensorSpace.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,105 @@ struct Tensorizer{DMS<:Tuple}
blocks::DMS
end

const Tensorizer2D{AA, BB} = Tensorizer{Tuple{AA, BB}}
const TrivialTensorizer{d} = Tensorizer{NTuple{d,Ones{Int,1,Tuple{OneToInf{Int}}}}}

Base.eltype(a::Tensorizer) = NTuple{length(a.blocks),Int}
Base.eltype(::Tensorizer{<:NTuple{d}}) where {d} = NTuple{d,Int}
dimensions(a::Tensorizer) = map(sum,a.blocks)
Base.length(a::Tensorizer) = mapreduce(sum,*,a.blocks)


function start(a::TrivialTensorizer{d}) where {d}
if d==2
return invoke(start, Tuple{Tensorizer2D}, a)
else
# ((block_dim_1, block_dim_2,...), (itaration_number, iterator, iterator_state)), (sub_dim_1, dub_dim_2,...), (shift_dim_1, shift_dim_2,...), (numblock_1, numblock_2,...), (itemssofar, length)
return (ntuple(i->1, d),(0, nothing, nothing)), ntuple(i->1, d), ntuple(i->0, d), ntuple(i->a.blocks[i][1], d), (0,length(a))
end
end

function next(a::TrivialTensorizer{d}, iterator_tuple) where {d}

if d==2
return invoke(next, Tuple{Tensorizer2D, Tuple}, a, iterator_tuple)
end

(block, (j, iterator, iter_state)), subblock, shift, numblock, (i,tot) = iterator_tuple

# increase subblock, so that last elements in tuple are increased first
function increase_subblock!(subblock)
c=d
while c > 0
if subblock[c] < numblock[c]
subblock[c] = subblock[c]+1
return
end
end
# should never happen, since numblock == subblock is checked first
throw(error("Should not have happened!"))
end

function increase_block(j, iterator, iter_state)
res, iter_state = iterate(iterator, iter_state)
block = tuple((res.+1)...)
j = j+1
return block, j, iter_state
end

@inline function check_block_finished()
if iterator === nothing
return true
end
# there are N-1 over d-1 combinations in a block
amount_combinations_block = binomial(sum(block)-1, d-1)
# check if all combinations have been iterated over
amount_combinations_block <= j
end

ret = ntuple(i->subblock[i]+shift[i], d)
ret = reverse(ret)

# check if reached end of current block (subblock == numblock)
if numblock == subblock # end of block
if check_block_finished() # end of new block

# set up iterator for new block
current_sum = sum(block)
iterator = multiexponents(d, current_sum+1-d)
iter_state = nothing
j = 0
end

# increase block, or initialize new block
block, j, iter_state = increase_block(j, iterator, iter_state)

subblock = ntuple(i->1, d) # set all subblocks back to 1

if i+1 < tot # set new shifts and limits if not done yet
numblock = ntuple(i->a.blocks[i][block[i]], d) # I think for the TrivialTensorizer this is always 1, than this can be simplified
shift = ntuple(i->sum(a.blocks[i][1:(block[i]-1)]), d)
end
else
increase_subblock!(subblock)
end
ret, ((block, (j, iterator, iter_state)), subblock, shift, numblock, (i,tot))
end


function done(a::TrivialTensorizer{d}, iterator_tuple) where {d}
if d==2
return invoke(done, Tuple{Tensorizer2D, Tuple}, a, iterator_tuple)
end
(_, _, _, _, (i,tot)) = iterator_tuple
return i ≥ tot
end


# (blockrow,blockcol), (subrow,subcol), (rowshift,colshift), (numblockrows,numblockcols), (itemssofar, length)
start(a::Tensorizer{Tuple{AA,BB}}) where {AA,BB} = (1,1), (1,1), (0,0), (a.blocks[1][1],a.blocks[2][1]), (0,length(a))
start(a::Tensorizer2D{AA, BB}) where {AA,BB} = (1,1), (1,1), (0,0), (a.blocks[1][1],a.blocks[2][1]), (0,length(a))

function next(a::Tensorizer{Tuple{AA,BB}}, ((K,J), (k,j), (rsh,csh), (n,m), (i,tot))) where {AA,BB}
function next(a::Tensorizer2D{AA, BB}, ((K,J), (k,j), (rsh,csh), (n,m), (i,tot))) where {AA,BB}
ret = k+rsh,j+csh
if k==n && j==m # end of block
if J == 1 || K == length(a.blocks[1]) # end of new block
Expand All @@ -59,7 +147,7 @@ function next(a::Tensorizer{Tuple{AA,BB}}, ((K,J), (k,j), (rsh,csh), (n,m), (i,t
end


done(a::Tensorizer, ((K,J), (k,j), (rsh,csh), (n,m), (i,tot))) = i ≥ tot
done(a::Tensorizer2D, ((K,J), (k,j), (rsh,csh), (n,m), (i,tot))) = i ≥ tot

iterate(a::Tensorizer) = next(a, start(a))
function iterate(a::Tensorizer, st)
Expand Down Expand Up @@ -104,6 +192,14 @@ block(ci::CachedIterator{T,TrivialTensorizer{2}},k::Int) where {T} =
block(::TrivialTensorizer{2},n::Int) =
Block(floor(Integer,sqrt(2n) + 1/2))

function block(::TrivialTensorizer{d},n::Int) where {d}
order::Int = 0
while binomial(order+d, d) < n
order = order + 1
end
return Block(order+1)
end

block(sp::Tensorizer{<:Tuple{<:AbstractFill{S},<:AbstractFill{T}}},n::Int) where {S,T} =
Block(floor(Integer,sqrt(2floor(Integer,(n-1)/(getindex_value(sp.blocks[1])*getindex_value(sp.blocks[2])))+1) + 1/2))
_cumsum(x) = cumsum(x)
Expand Down Expand Up @@ -473,6 +569,8 @@ end

fromtensor(S::Space,M::AbstractMatrix) = fromtensor(tensorizer(S),M)
totensor(S::Space,M::AbstractVector) = totensor(tensorizer(S),M)
totensor(SS::TensorSpace{<:NTuple{d}},M::AbstractVector) where {d} =
if d>2; totensoriterator(tensorizer(SS),M) else totensor(tensorizer(SS),M) end

function fromtensor(it::Tensorizer,M::AbstractMatrix)
n,m=size(M)
Expand All @@ -496,19 +594,27 @@ function totensor(it::Tensorizer,M::AbstractVector)
B=block(it,n)
ds = dimensions(it)

#ret=zeros(eltype(M),[sum(it.blocks[i][1:min(B.n[1],length(it.blocks[i]))]) for i=1:length(it.blocks)]...)

ret=zeros(eltype(M),sum(it.blocks[1][1:min(B.n[1],length(it.blocks[1]))]),
sum(it.blocks[2][1:min(B.n[1],length(it.blocks[2]))]))

k=1
for (K,J) in it
for index in it
if k > n
break
end
ret[K,J] = M[k]
ret[index...] = M[k]
k += 1
end
ret
end

@inline function totensoriterator(it::TrivialTensorizer{d},M::AbstractVector) where {d}
B=block(it,length(M))
return it, M, B
end

for OP in (:block,:blockstart,:blockstop)
@eval begin
$OP(s::TensorSpace, ::PosInfinity) = ℵ₀
Expand Down Expand Up @@ -542,7 +648,14 @@ end

itransform(sp::TensorSpace,cfs::AbstractVector) = vec(itransform!(sp,coefficientmatrix(Fun(sp,cfs))))

evaluate(f::AbstractVector,S::AbstractProductSpace,x) = ProductFun(totensor(S,f),S)(x...)
function evaluate(f::AbstractVector,S::AbstractProductSpace,x)
t = totensor(S,f)
if typeof(t) <: Tuple
return ProductFun(t..., S)(x...)
else
return ProductFun(t, S)(x...)
end
end
evaluate(f::AbstractVector,S::AbstractProductSpace,x,y) = ProductFun(totensor(S,f),S)(x,y)


Expand Down