From e322d2c637f4bcd67d3f82451c6715a48cc9373a Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 12:17:13 +0100 Subject: [PATCH 01/35] Fix style --- src/KernelFunctions.jl | 58 ++++++++++++++++++++------------ src/matrix/kernelkroneckermat.jl | 14 +++----- src/matrix/kernelmatrix.jl | 20 +++-------- src/matrix/kernelpdmat.jl | 32 ++++++++++-------- 4 files changed, 63 insertions(+), 61 deletions(-) diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 156f50f10..15bf5ce51 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -53,33 +53,49 @@ abstract type Kernel end abstract type SimpleKernel <: Kernel end include("utils.jl") -include("distances/pairwise.jl") -include("distances/dotproduct.jl") -include("distances/delta.jl") -include("distances/sinus.jl") -include("transform/transform.jl") - -for f in readdir(joinpath(@__DIR__, "basekernels")) - endswith(f, ".jl") && include(joinpath("basekernels", f)) -end - -include("kernels/transformedkernel.jl") -include("kernels/scaledkernel.jl") -include("matrix/kernelmatrix.jl") -include("kernels/kernelsum.jl") -include("kernels/kernelproduct.jl") -include("kernels/tensorproduct.jl") -include("approximations/nystrom.jl") +include(joinpath("distances", "pairwise.jl")) +include(joinpath("distances", "dotproduct.jl")) +include(joinpath("distances", "delta.jl")) +include(joinpath("distances", "sinus.jl")) +include(joinpath("transform", "transform.jl")) + +include(joinpath("basekernels", "constant.jl")) +include(joinpath("basekernels", "cosine.jl")) +include(joinpath("basekernels", "exponential.jl")) +include(joinpath("basekernels", "exponentiated.jl")) +include(joinpath("basekernels", "fbm.jl")) +include(joinpath("basekernels", "gabor.jl")) +include(joinpath("basekernels", "maha.jl")) +include(joinpath("basekernels", "matern.jl")) +include(joinpath("basekernels", "nn.jl")) +include(joinpath("basekernels", "periodic.jl")) +include(joinpath("basekernels", "piecewisepolynomial.jl")) +include(joinpath("basekernels", "polynomial.jl")) +include(joinpath("basekernels", "rationalquad.jl")) +include(joinpath("basekernels", "sm.jl")) +include(joinpath("basekernels", "wiener.jl")) + +include(joinpath("kernels", "transformedkernel.jl")) +include(joinpath("kernels", "scaledkernel.jl")) +include(joinpath("matrix", "kernelmatrix.jl")) +include(joinpath("kernels", "kernelsum.jl")) +include(joinpath("kernels", "kernelproduct.jl")) +include(joinpath("kernels", "tensorproduct.jl")) +include(joinpath("approximations", "nystrom.jl")) include("generic.jl") -include("mokernels/moinput.jl") -include("mokernels/independent.jl") +include(joinpath("mokernels", "moinput.jl")) +include(joinpath("mokernels", "independent.jl")) include("zygote_adjoints.jl") function __init__() - @require Kronecker="2c470bb0-bcc8-11e8-3dad-c9649493f05e" include("matrix/kernelkroneckermat.jl") - @require PDMats="90014a1f-27ba-587c-ab20-58faa44d9150" include("matrix/kernelpdmat.jl") + @require Kronecker="2c470bb0-bcc8-11e8-3dad-c9649493f05e" begin + include(joinpath("matrix", "kernelkroneckermat.jl")) + end + @require PDMats="90014a1f-27ba-587c-ab20-58faa44d9150" begin + include(joinpath("matrix", "kernelpdmat.jl")) + end end end diff --git a/src/matrix/kernelkroneckermat.jl b/src/matrix/kernelkroneckermat.jl index 0ccf4c182..38a805c7c 100644 --- a/src/matrix/kernelkroneckermat.jl +++ b/src/matrix/kernelkroneckermat.jl @@ -2,22 +2,16 @@ using .Kronecker export kernelkronmat -function kernelkronmat( - κ::Kernel, - X::AbstractVector, - dims::Int - ) +function kernelkronmat(κ::Kernel, X::AbstractVector, dims::Int) @assert iskroncompatible(κ) "The chosen kernel is not compatible for kroenecker matrices (see [`iskroncompatible`](@ref))" k = kernelmatrix(κ, X) kronecker(k, dims) end function kernelkronmat( - κ::Kernel, - X::AbstractVector{<:AbstractVector}; - obsdim::Int=defaultobs - ) - @assert iskroncompatible(κ) "The chosen kernel is not compatible for kroenecker matrices" + κ::Kernel, X::AbstractVector{<:AbstractVector}; obsdim::Int=defaultobs, +) + @assert iskroncompatible(κ) "The chosen kernel is not compatible for Kronecker matrices" Ks = kernelmatrix.(κ, X) K = reduce(⊗, Ks) end diff --git a/src/matrix/kernelmatrix.jl b/src/matrix/kernelmatrix.jl index cb3208244..a0101c7da 100644 --- a/src/matrix/kernelmatrix.jl +++ b/src/matrix/kernelmatrix.jl @@ -74,10 +74,7 @@ function kernelmatrix!(K::AbstractMatrix, κ::SimpleKernel, x::AbstractVector) end function kernelmatrix!( - K::AbstractMatrix, - κ::SimpleKernel, - x::AbstractVector, - y::AbstractVector, + K::AbstractMatrix, κ::SimpleKernel, x::AbstractVector, y::AbstractVector, ) validate_inplace_dims(K, x, y) pairwise!(K, metric(κ), x, y) @@ -102,19 +99,13 @@ end const defaultobs = 2 function kernelmatrix!( - K::AbstractMatrix, - κ::Kernel, - X::AbstractMatrix; - obsdim::Int = defaultobs + K::AbstractMatrix, κ::Kernel, X::AbstractMatrix; obsdim::Int = defaultobs ) return kernelmatrix!(K, κ, vec_of_vecs(X; obsdim=obsdim)) end function kernelmatrix!( - K::AbstractMatrix, - κ::Kernel, - X::AbstractMatrix, - Y::AbstractMatrix; + K::AbstractMatrix, κ::Kernel, X::AbstractMatrix, Y::AbstractMatrix; obsdim::Int = defaultobs ) x = vec_of_vecs(X; obsdim=obsdim) @@ -133,10 +124,7 @@ function kernelmatrix(κ::Kernel, X::AbstractMatrix, Y::AbstractMatrix; obsdim=d end function kerneldiagmatrix!( - K::AbstractVector, - κ::Kernel, - X::AbstractMatrix; - obsdim::Int = defaultobs + K::AbstractVector, κ::Kernel, X::AbstractMatrix; obsdim::Int = defaultobs ) return kerneldiagmatrix!(K, κ, vec_of_vecs(X; obsdim=obsdim)) end diff --git a/src/matrix/kernelpdmat.jl b/src/matrix/kernelpdmat.jl index fa617eaaf..55879b087 100644 --- a/src/matrix/kernelpdmat.jl +++ b/src/matrix/kernelpdmat.jl @@ -3,24 +3,28 @@ using .PDMats: PDMat export kernelpdmat """ - Compute a positive-definite matrix in the form of a `PDMat` matrix see [PDMats.jl]() with the cholesky decomposition precomputed - The algorithm recursively tries to add recursively a diagonal nugget until positive definiteness is achieved or that the noise is too big + Compute a positive-definite matrix in the form of a `PDMat` matrix see [PDMats.jl]() + with the cholesky decomposition precomputed. + The algorithm recursively tries to add recursively a diagonal nugget until positive + definiteness is achieved or that the noise is too big. """ -function kernelpdmat( - κ::Kernel, - X::AbstractMatrix; - obsdim::Int = defaultobs - ) - K = kernelmatrix(κ,X,obsdim=obsdim) - Kmax =maximum(K) +function kernelpdmat(κ::Kernel, X::AbstractMatrix; obsdim::Int=defaultobs) + K = kernelmatrix(κ, X; obsdim=obsdim) + Kmax = maximum(K) α = eps(eltype(K)) - while !isposdef(K+α*I) && α < 0.01*Kmax + while !isposdef(K + α * I) && α < 0.01 * Kmax α *= 2.0 end - if α >= 0.01*Kmax - throw(ErrorException("Adding noise on the diagonal was not sufficient to build a positive-definite matrix:\n\t- Check that your kernel parameters are not extreme\n\t- Check that your data is sufficiently sparse\n\t- Maybe use a different kernel")) + if α >= 0.01 * Kmax + error( + "Adding noise on the diagonal was not sufficient to build a positive-definite" * + " matrix:\n\t- Check that your kernel parameters are not extreme\n\t- Check" * + " that your data is sufficiently sparse\n\t- Maybe use a different kernel", + ) end - return PDMat(K+α*I) + return PDMat(K + α * I) end -kernelpdmat(κ::Kernel,X::AbstractVector{<:Real};obsdim=defaultobs) = kernelpdmat(κ,reshape(X,1,:),obsdim=2) +function kernelpdmat(κ::Kernel, X::AbstractVector{<:Real}; obsdim=defaultobs) + return kernelpdmat(κ, reshape(X, 1, :); obsdim=2) +end From 81b2bb8e3be254214531cd2054086ca0d8f8fa7a Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 14:21:07 +0100 Subject: [PATCH 02/35] Fix convention --- src/basekernels/exponential.jl | 14 ++++++++------ src/basekernels/gabor.jl | 25 +++++++++---------------- test/basekernels/exponential.jl | 25 +++++++++++++++---------- test/basekernels/gabor.jl | 2 +- test/basekernels/sm.jl | 13 +++++++++---- test/kernels/custom.jl | 11 ----------- test/matrix/kernelmatrix.jl | 2 +- test/runtests.jl | 6 +----- 8 files changed, 44 insertions(+), 54 deletions(-) delete mode 100644 test/kernels/custom.jl diff --git a/src/basekernels/exponential.jl b/src/basekernels/exponential.jl index e9af3fd2c..731fb6acc 100644 --- a/src/basekernels/exponential.jl +++ b/src/basekernels/exponential.jl @@ -11,7 +11,7 @@ related form of the kernel or [`GammaExponentialKernel`](@ref) for a generalizat """ struct SqExponentialKernel <: SimpleKernel end -kappa(κ::SqExponentialKernel, d²::Real) = exp(-d²) +kappa(κ::SqExponentialKernel, d²::Real) = exp(-d² / 2) metric(::SqExponentialKernel) = SqEuclidean() @@ -50,10 +50,10 @@ const LaplacianKernel = ExponentialKernel The γ-exponential kernel is an isotropic Mercer kernel given by the formula: ``` - κ(x,y) = exp(-‖x-y‖^(2γ)) + κ(x,y) = exp(-‖x-y‖^γ) ``` Where `γ > 0`, (the keyword `γ` can be replaced by `gamma`) -For `γ = 1`, see `SqExponentialKernel` and `γ = 0.5`, see `ExponentialKernel` +For `γ = 2`, see `SqExponentialKernel` and `γ = 1`, see `ExponentialKernel` """ struct GammaExponentialKernel{Tγ<:Real} <: SimpleKernel γ::Vector{Tγ} @@ -65,10 +65,12 @@ end @functor GammaExponentialKernel -kappa(κ::GammaExponentialKernel, d²::Real) = exp(-d²^first(κ.γ)) +kappa(κ::GammaExponentialKernel, d::Real) = exp(-d^first(κ.γ)) -metric(::GammaExponentialKernel) = SqEuclidean() +metric(::GammaExponentialKernel) = Euclidean() iskroncompatible(::GammaExponentialKernel) = true -Base.show(io::IO, κ::GammaExponentialKernel) = print(io, "Gamma Exponential Kernel (γ = ", first(κ.γ), ")") +function Base.show(io::IO, κ::GammaExponentialKernel) + print(io, "Gamma Exponential Kernel (γ = ", first(κ.γ), ")") +end diff --git a/src/basekernels/gabor.jl b/src/basekernels/gabor.jl index 1798d12a6..bf3cf115e 100644 --- a/src/basekernels/gabor.jl +++ b/src/basekernels/gabor.jl @@ -11,7 +11,7 @@ struct GaborKernel{K<:Kernel} <: Kernel kernel::K function GaborKernel(;ell=nothing, p=nothing) k = _gabor(ell=ell, p=p) - new{typeof(k)}(k) + return new{typeof(k)}(k) end end @@ -57,24 +57,17 @@ end Base.show(io::IO, κ::GaborKernel) = print(io, "Gabor Kernel (ell = ", κ.ell, ", p = ", κ.p, ")") -function kernelmatrix( - κ::GaborKernel, - X::AbstractMatrix; - obsdim::Int=defaultobs) - kernelmatrix(κ.kernel, X; obsdim=obsdim) +function kernelmatrix(κ::GaborKernel, X::AbstractMatrix; obsdim::Int=defaultobs) + return kernelmatrix(κ.kernel, X; obsdim=obsdim) end function kernelmatrix( - κ::GaborKernel, - X::AbstractMatrix, - Y::AbstractMatrix; - obsdim::Int=defaultobs) - kernelmatrix(κ.kernel, X, Y; obsdim=obsdim) + κ::GaborKernel, X::AbstractMatrix, Y::AbstractMatrix; + obsdim::Int=defaultobs, +) + return kernelmatrix(κ.kernel, X, Y; obsdim=obsdim) end -function kerneldiagmatrix( - κ::GaborKernel, - X::AbstractMatrix; - obsdim::Int=defaultobs) #TODO Add test - kerneldiagmatrix(κ.kernel, X; obsdim=obsdim) +function kerneldiagmatrix(κ::GaborKernel, X::AbstractMatrix; obsdim::Int=defaultobs) #TODO Add test + return kerneldiagmatrix(κ.kernel, X; obsdim=obsdim) end diff --git a/test/basekernels/exponential.jl b/test/basekernels/exponential.jl index 692e0983c..b385fb088 100644 --- a/test/basekernels/exponential.jl +++ b/test/basekernels/exponential.jl @@ -5,8 +5,8 @@ v2 = rand(rng, 3) @testset "SqExponentialKernel" begin k = SqExponentialKernel() - @test kappa(k,x) ≈ exp(-x) - @test k(v1,v2) ≈ exp(-norm(v1-v2)^2) + @test kappa(k,x) ≈ exp(-x / 2) + @test k(v1,v2) ≈ exp(-norm(v1-v2)^2 / 2) @test kappa(SqExponentialKernel(),x) == kappa(k,x) @test metric(SqExponentialKernel()) == SqEuclidean() @test RBFKernel == SqExponentialKernel @@ -30,19 +30,24 @@ @testset "GammaExponentialKernel" begin γ = 2.0 k = GammaExponentialKernel(γ=γ) - @test kappa(k,x) ≈ exp(-(x)^(γ)) - @test k(v1,v2) ≈ exp(-norm(v1-v2)^(2γ)) - @test kappa(GammaExponentialKernel(),x) == kappa(k,x) + @test k(v1, v2) ≈ exp(-norm(v1 - v2)^γ) + @test kappa(GammaExponentialKernel(), x) == kappa(k, x) @test GammaExponentialKernel(gamma=γ).γ == [γ] - @test metric(GammaExponentialKernel()) == SqEuclidean() - @test metric(GammaExponentialKernel(γ=2.0)) == SqEuclidean() + @test metric(GammaExponentialKernel()) == Euclidean() + @test metric(GammaExponentialKernel(γ=2.0)) == Euclidean() @test repr(k) == "Gamma Exponential Kernel (γ = $(γ))" @test KernelFunctions.iskroncompatible(k) == true - test_ADs(γ -> GammaExponentialKernel(gamma=first(γ)), [γ], ADs = [:ForwardDiff, :ReverseDiff]) + test_ADs( + γ -> GammaExponentialKernel(gamma=first(γ)), [1.0]; + ADs = [:ForwardDiff, :ReverseDiff], + ) @test_broken "Zygote gradient given γ" test_params(k, ([γ],)) #Coherence : - @test GammaExponentialKernel(γ=1.0)(v1,v2) ≈ SqExponentialKernel()(v1,v2) - @test GammaExponentialKernel(γ=0.5)(v1,v2) ≈ ExponentialKernel()(v1,v2) + @test isapprox( + GammaExponentialKernel(γ=2.0)(sqrt(0.5) * v1, sqrt(0.5) * v2), + SqExponentialKernel()(v1,v2), + ) + @test GammaExponentialKernel(γ=1.0)(v1, v2) ≈ ExponentialKernel()(v1, v2) end end diff --git a/test/basekernels/gabor.jl b/test/basekernels/gabor.jl index 052a53eac..1b3fdf7a6 100644 --- a/test/basekernels/gabor.jl +++ b/test/basekernels/gabor.jl @@ -6,7 +6,7 @@ @test k.ell ≈ ell atol=1e-5 @test k.p ≈ p atol=1e-5 - k_manual = exp(-sqeuclidean(v1, v2) / (k.ell^2)) * cospi(euclidean(v1, v2) / k.p) + k_manual = exp(-sqeuclidean(v1, v2) / (2 * k.ell^2)) * cospi(euclidean(v1, v2) / k.p) @test k(v1,v2) ≈ k_manual atol=1e-5 lhs_manual = transform(SqExponentialKernel(), 1/k.ell)(v1,v2) diff --git a/test/basekernels/sm.jl b/test/basekernels/sm.jl index daef2bd62..91e55f52b 100644 --- a/test/basekernels/sm.jl +++ b/test/basekernels/sm.jl @@ -12,11 +12,16 @@ t = v1 - v2 - @test k1(v1, v2) ≈ sum(αs₁ .* exp.(-(t' * γs)'.^2) .* - cospi.((t' * ωs)')) atol=1e-5 + @test k1(v1, v2) ≈ sum(αs₁ .* exp.(-(t' * γs)'.^2 ./ 2) .* cospi.((t' * ωs)')) atol=1e-5 - @test k2(v1, v2) ≈ prod(sum(αs₂[i,:]' .* exp.(-(γs[i,:]' * t[i]).^2) .* - cospi.(ωs[i,:]' * t[i])) for i in 1:length(t)) atol=1e-5 + @test isapprox( + k2(v1, v2), + prod( + [sum(αs₂[i,:]' .* exp.(-(γs[i,:]' * t[i]).^2 ./ 2) .* + cospi.(ωs[i,:]' * t[i])) for i in 1:length(t)], + ); + atol=1e-5, + ) @test_throws DimensionMismatch spectral_mixture_kernel(rand(5) ,rand(4,3), rand(4,3)) @test_throws DimensionMismatch spectral_mixture_kernel(rand(3) ,rand(4,3), rand(5,3)) diff --git a/test/kernels/custom.jl b/test/kernels/custom.jl deleted file mode 100644 index 27fb5b6c9..000000000 --- a/test/kernels/custom.jl +++ /dev/null @@ -1,11 +0,0 @@ -# minimal definition of a custom kernel -struct MyKernel <: SimpleKernel end - -KernelFunctions.kappa(::MyKernel, d2::Real) = exp(-d2) -KernelFunctions.metric(::MyKernel) = SqEuclidean() - -@testset "custom" begin - @test kappa(MyKernel(), 3) == kappa(SqExponentialKernel(), 3) - @test kernelmatrix(MyKernel(), [1 2; 3 4], [5 6; 7 8]) == kernelmatrix(SqExponentialKernel(), [1 2; 3 4], [5 6; 7 8]) - @test kernelmatrix(MyKernel(), [1 2; 3 4]) == kernelmatrix(SqExponentialKernel(), [1 2; 3 4]) -end diff --git a/test/matrix/kernelmatrix.jl b/test/matrix/kernelmatrix.jl index 0f0c4882d..2f825c1ae 100644 --- a/test/matrix/kernelmatrix.jl +++ b/test/matrix/kernelmatrix.jl @@ -1,7 +1,7 @@ # Custom Kernel implementation that only defines how to evaluate itself. This is used to # test that fallback kernelmatrix / kerneldiagmatrix methods work properly. struct BaseSE <: KernelFunctions.Kernel end -(k::BaseSE)(x, y) = exp(-evaluate(SqEuclidean(), x, y)) +(k::BaseSE)(x, y) = exp(-evaluate(SqEuclidean(), x, y) / 2) # Custom kernel to test `SimpleKernel` interface on, independently the `SimpleKernel`s that # are implemented in the package. That this happens to be an exponentiated quadratic kernel diff --git a/test/runtests.jl b/test/runtests.jl index 82f90107b..f3d1020f3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -86,8 +86,8 @@ include("test_utils.jl") include(joinpath("basekernels", "matern.jl")) include(joinpath("basekernels", "nn.jl")) include(joinpath("basekernels", "periodic.jl")) - include(joinpath("basekernels", "polynomial.jl")) include(joinpath("basekernels", "piecewisepolynomial.jl")) + include(joinpath("basekernels", "polynomial.jl")) include(joinpath("basekernels", "rationalquad.jl")) include(joinpath("basekernels", "sm.jl")) include(joinpath("basekernels", "wiener.jl")) @@ -100,10 +100,6 @@ include("test_utils.jl") include(joinpath("kernels", "scaledkernel.jl")) include(joinpath("kernels", "tensorproduct.jl")) include(joinpath("kernels", "transformedkernel.jl")) - - # Legacy tests that don't correspond to anything meaningful in src. Unclear how - # helpful these are. - include(joinpath("kernels", "custom.jl")) end @info "Ran tests on Kernel" From c795cdb0840869837246a8650016303f4c515dfa Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 15:58:27 +0100 Subject: [PATCH 03/35] First pass over test set implementation --- src/KernelFunctions.jl | 2 + src/test_utils.jl | 113 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 src/test_utils.jl diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 15bf5ce51..0a6bcd7c3 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -89,6 +89,8 @@ include(joinpath("mokernels", "independent.jl")) include("zygote_adjoints.jl") +include("test_utils.jl") + function __init__() @require Kronecker="2c470bb0-bcc8-11e8-3dad-c9649493f05e" begin include(joinpath("matrix", "kernelkroneckermat.jl")) diff --git a/src/test_utils.jl b/src/test_utils.jl new file mode 100644 index 000000000..bd6cefbbc --- /dev/null +++ b/src/test_utils.jl @@ -0,0 +1,113 @@ +module TestUtils + +const __ATOL = 1e-9 + +using KernelFunctions + +""" + test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) + +Run various consistency checks on `k` at the inputs `x0`, `x1`, and `x2`. +`x0` and `x1` should be of the same length with different values, while `x0` and `x2` should +be of different lengths. + + test_interface([rng::AbstractRNG], k::Kernel, T::Type{<:AbstractVector}; atol=__ATOL) + +`test_interface` offers certain types of test data generation to make running these tests +require less code for common input types. For example, `Vector{<:Real}`, `ColVecs{<:Real}`, +and `RowVecs{<:Real}` are supported. For other input vector types, please provide the data +manually. +""" +function test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) + + # TODO: uncomment the tests of ternary kerneldiagmatrix. + + # Ensure that we have the required inputs. + @assert length(x0) == length(x1) + @assert length(x0) ≠ length(x2) + @assert length(x0) == length(x1) + @assert length(x0) ≠ length(x2) + + # Check that kerneldiagmatrix basically works. + # @test kerneldiagmatrix(k, x0, x1) isa AbstractVector + # @test length(kerneldiagmatrix(k, x0, x1)) == length(x0) + + # Check that pairwise basically works. + @test kernelmatrix(k, x0, x2) isa AbstractMatrix + @test size(kernelmatrix(k, x0, x2)) == (length(x0), length(x2)) + + # Check that elementwise is consistent with pairwise. + # @test kerneldiagmatrix(k, x0, x1) ≈ diag(kernelmatrix(k, x0, x1)) atol=atol + + # Check additional binary elementwise properties for kernels. + # @test kerneldiagmatrix(k, x0, x1) ≈ kerneldiagmatrix(k, x1, x0) + @test kernelmatrix(k, x0, x2) ≈ kernelmatrix(k, x2, x0)' atol=atol + + # Check that unary elementwise basically works. + @test kerneldiagmatrix(k, x0) isa AbstractVector + @test length(kerneldiagmatrix(k, x0)) == length(x0) + + # Check that unary pairwise basically works. + @test kernelmatrix(k, x0) isa AbstractMatrix + @test size(kernelmatrix(k, x0)) == (length(x0), length(x0)) + @test kernelmatrix(k, x0) ≈ kernelmatrix(k, x0)' atol=atol + + # Check that unary elementwise is consistent with unary pairwise. + @test kerneldiagmatrix(k, x0) ≈ diag(kernelmatrix(k, x0)) atol=atol + + # Check that unary pairwise produces a positive definite matrix (approximately). + @test all(eigvals(Matrix(kernelmatrix(k, x0))) .> -atol) + + # Check that unary elementwise / pairwise are consistent with the binary versions. + # @test kerneldiagmatrix(k, x0) ≈ kerneldiagmatrix(k, x0, x0) atol=atol + @test kernelmatrix(k, x0) ≈ kernelmatrix(k, x0, x0) atol=atol + + # Check that basic kernel evaluation succeeds and is consistent with `kernelmatrix`. + @test kernelmatrix(k, x0, x2) ≈ [k(xl, xr) for xl in x0, xr in x2] +end + +function test_interface( + rng::AbstractRNG, k::Kernel, ::Type{Vector{T}}; atol=__ATOL, +) where {T<:Real} + test_interface(k, randn(rng, T, 3), randn(rng, T, 3), randn(rng, T, 2); atol=atol) +end + +function test_interface( + rng::AbstractRNG, k::Kernel, ::Type{<:ColVecs{T}}; atol=__ATOL, +) where {T<:Real} + test_interface( + k, + ColVecs(randn(rng, T, 2, 3)), + ColVecs(randn(rng, T, 2, 3)), + ColVecs(randn(rng, T, 2, 2)); + atol=atol, + ) +end + +function test_interface( + rng::AbstractRNG, k::Kernel, ::Type{<:RowVecs{T}}; atol=__ATOL, +) where {T<:Real} + test_interface( + k, + RowVecs(randn(rng, T, 3, 2)), + RowVecs(randn(rng, T, 3, 2)), + RowVecs(randn(rng, T, 2, 2)); + atol=atol, + ) +end + +function test_interface(k::Kernel, T::Type{<:AbstractVector}; atol=__ATOL) + test_interface(Random.GLOBAL_RNG, k, T) +end + +function test_interface(rng::AbstractRNG, k::Kernel, T::Type{<:Real}; atol=__ATOL) + test_interface(rng, k, Vector{T}; atol=atol) + test_interface(rng, k, ColVecs{T} atol=atol) + test_interface(rng, k, RowVecs{T} atol=atol) +end + +function test_interface(k::Kernel, T::Type{<:Real}; atol=__ATOL) + test_interface(Random.GLOBAL_RNG, k, T; atol=atol) +end + +end # module From 24b0422b9492cf2024a7012506ed32421f635aa8 Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 16:10:48 +0100 Subject: [PATCH 04/35] Add standardised tests to BaseKernels --- src/test_utils.jl | 2 ++ test/basekernels/constant.jl | 19 +++++++++--- test/basekernels/cosine.jl | 9 ++++-- test/basekernels/exponential.jl | 8 +++++ test/basekernels/exponentiated.jl | 5 ++- test/basekernels/fbm.jl | 25 ++++----------- test/basekernels/gabor.jl | 6 +++- test/basekernels/maha.jl | 1 + test/basekernels/matern.jl | 15 +++++++-- test/basekernels/nn.jl | 41 ++----------------------- test/basekernels/periodic.jl | 7 ++++- test/basekernels/piecewisepolynomial.jl | 21 ++----------- test/basekernels/polynomial.jl | 9 +++++- test/basekernels/rationalquad.jl | 6 ++++ test/basekernels/sm.jl | 3 ++ test/basekernels/wiener.jl | 30 ++++-------------- 16 files changed, 92 insertions(+), 115 deletions(-) diff --git a/src/test_utils.jl b/src/test_utils.jl index bd6cefbbc..b2c5b4589 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -21,6 +21,7 @@ manually. function test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) # TODO: uncomment the tests of ternary kerneldiagmatrix. + # TODO: add in-place tests. # Ensure that we have the required inputs. @assert length(x0) == length(x1) @@ -63,6 +64,7 @@ function test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) @test kernelmatrix(k, x0) ≈ kernelmatrix(k, x0, x0) atol=atol # Check that basic kernel evaluation succeeds and is consistent with `kernelmatrix`. + @test k(first(x0), first(x1)) isa Real @test kernelmatrix(k, x0, x2) ≈ [k(xl, xr) for xl in x0, xr in x2] end diff --git a/test/basekernels/constant.jl b/test/basekernels/constant.jl index 5a2049675..a98f3e514 100644 --- a/test/basekernels/constant.jl +++ b/test/basekernels/constant.jl @@ -2,31 +2,40 @@ @testset "ZeroKernel" begin k = ZeroKernel() @test eltype(k) == Any - @test kappa(k,2.0) == 0.0 + @test kappa(k, 2.0) == 0.0 @test KernelFunctions.metric(ZeroKernel()) == KernelFunctions.Delta() @test repr(k) == "Zero Kernel" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(ZeroKernel) end @testset "WhiteKernel" begin k = WhiteKernel() @test eltype(k) == Any - @test kappa(k,1.0) == 1.0 - @test kappa(k,0.0) == 0.0 + @test kappa(k, 1.0) == 1.0 + @test kappa(k, 0.0) == 0.0 @test EyeKernel == WhiteKernel @test metric(WhiteKernel()) == KernelFunctions.Delta() @test repr(k) == "White Kernel" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(WhiteKernel) end @testset "ConstantKernel" begin c = 2.0 k = ConstantKernel(c=c) @test eltype(k) == Any - @test kappa(k,1.0) == c - @test kappa(k,0.5) == c + @test kappa(k, 1.0) == c + @test kappa(k, 0.5) == c @test metric(ConstantKernel()) == KernelFunctions.Delta() @test metric(ConstantKernel(c=2.0)) == KernelFunctions.Delta() @test repr(k) == "Constant Kernel (c = $(c))" test_params(k, ([c],)) + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(c->ConstantKernel(c=first(c)), [c]) end end diff --git a/test/basekernels/cosine.jl b/test/basekernels/cosine.jl index bf4c060b4..fe318fd93 100644 --- a/test/basekernels/cosine.jl +++ b/test/basekernels/cosine.jl @@ -1,6 +1,6 @@ @testset "cosine" begin rng = MersenneTwister(123456) - x = rand(rng)*2 + x = rand(rng) * 2 v1 = rand(rng, 3) v2 = rand(rng, 3) @@ -9,8 +9,11 @@ @test kappa(k, 1.0) ≈ -1.0 atol=1e-5 @test kappa(k, 2.0) ≈ 1.0 atol=1e-5 @test kappa(k, 1.5) ≈ 0.0 atol=1e-5 - @test kappa(k,x) ≈ cospi(x) atol=1e-5 - @test k(v1, v2) ≈ cospi(sqrt(sum(abs2.(v1-v2)))) atol=1e-5 + @test kappa(k, x) ≈ cospi(x) atol=1e-5 + @test k(v1, v2) ≈ cospi(sqrt(sum(abs2.(v1 - v2)))) atol=1e-5 @test repr(k) == "Cosine Kernel" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(CosineKernel) end diff --git a/test/basekernels/exponential.jl b/test/basekernels/exponential.jl index b385fb088..d6f6ba180 100644 --- a/test/basekernels/exponential.jl +++ b/test/basekernels/exponential.jl @@ -14,6 +14,9 @@ @test SEKernel == SqExponentialKernel @test repr(k) == "Squared Exponential Kernel" @test KernelFunctions.iskroncompatible(k) == true + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(SEKernel) end @testset "ExponentialKernel" begin @@ -25,6 +28,9 @@ @test repr(k) == "Exponential Kernel" @test LaplacianKernel == ExponentialKernel @test KernelFunctions.iskroncompatible(k) == true + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(ExponentialKernel) end @testset "GammaExponentialKernel" begin @@ -37,6 +43,8 @@ @test metric(GammaExponentialKernel(γ=2.0)) == Euclidean() @test repr(k) == "Gamma Exponential Kernel (γ = $(γ))" @test KernelFunctions.iskroncompatible(k) == true + + TestUtils.test_interface(k, Float64) test_ADs( γ -> GammaExponentialKernel(gamma=first(γ)), [1.0]; ADs = [:ForwardDiff, :ReverseDiff], diff --git a/test/basekernels/exponentiated.jl b/test/basekernels/exponentiated.jl index a8c117b3b..5e608e7ca 100644 --- a/test/basekernels/exponentiated.jl +++ b/test/basekernels/exponentiated.jl @@ -1,6 +1,6 @@ @testset "exponentiated" begin rng = MersenneTwister(123456) - x = rand(rng)*2 + x = rand(rng) * 2 v1 = rand(rng, 3) v2 = rand(rng, 3) @@ -10,5 +10,8 @@ @test k(v1,v2) ≈ exp(dot(v1,v2)) @test metric(ExponentiatedKernel()) == KernelFunctions.DotProduct() @test repr(k) == "Exponentiated Kernel" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(ExponentiatedKernel) end diff --git a/test/basekernels/fbm.jl b/test/basekernels/fbm.jl index 0428d8c80..867201d9c 100644 --- a/test/basekernels/fbm.jl +++ b/test/basekernels/fbm.jl @@ -2,28 +2,15 @@ rng = MersenneTwister(42) h = 0.3 k = FBMKernel(h = h) - v1 = rand(rng, 3); v2 = rand(rng, 3) + v1 = rand(rng, 3) + v2 = rand(rng, 3) @test k(v1,v2) ≈ (sqeuclidean(v1, zero(v1))^h + sqeuclidean(v2, zero(v2))^h - sqeuclidean(v1-v2, zero(v1-v2))^h)/2 atol=1e-5 + @test repr(k) == "Fractional Brownian Motion Kernel (h = $(h))" - # kernelmatrix tests - m1 = rand(rng, 3, 3) - m2 = rand(rng, 3, 3) - Kref = kernelmatrix(k, m1, m1) - @test kernelmatrix(k, m1) ≈ Kref atol=1e-5 - K = zeros(3, 3) - kernelmatrix!(K, k, m1, m1) - @test K ≈ Kref atol=1e-5 - fill!(K, 0) - kernelmatrix!(K, k, m1) - @test K ≈ Kref atol=1e-5 - - x1 = rand(rng) - x2 = rand(rng) - @test kernelmatrix(k, x1*ones(1,1), x2*ones(1,1))[1] ≈ k(x1, x2) atol=1e-5 - @test repr(k) == "Fractional Brownian Motion Kernel (h = $(h))" - test_ADs(FBMKernel, ADs = [:ReverseDiff]) + # Standardised tests. + TestUtils.test_interface(k, Float64) + test_ADs(FBMKernel; ADs = [:ReverseDiff]) @test_broken "Tests failing for kernelmatrix(k, x) for ForwardDiff and Zygote" - test_params(k, ([h],)) end diff --git a/test/basekernels/gabor.jl b/test/basekernels/gabor.jl index 1b3fdf7a6..a74226940 100644 --- a/test/basekernels/gabor.jl +++ b/test/basekernels/gabor.jl @@ -1,5 +1,6 @@ @testset "Gabor" begin - v1 = rand(3); v2 = rand(3) + v1 = rand(3) + v2 = rand(3) ell = abs(rand()) p = abs(rand()) k = GaborKernel(ell=ell, p=p) @@ -17,6 +18,9 @@ @test k.ell ≈ 1.0 atol=1e-5 @test k.p ≈ 1.0 atol=1e-5 @test repr(k) == "Gabor Kernel (ell = 1.0, p = 1.0)" + + # Standardised tests. + TestUtils.test_interface(k, Float64) #test_ADs(x -> GaborKernel(ell = x[1], p = x[2]), [ell, p])#, ADs = [:ForwardDiff, :ReverseDiff]) @test_broken "Tests failing for Zygote on differentiating through ell and p" # Tests are also failing randomly for ForwardDiff and ReverseDiff but randomly diff --git a/test/basekernels/maha.jl b/test/basekernels/maha.jl index 898df7b6e..c4351d2ac 100644 --- a/test/basekernels/maha.jl +++ b/test/basekernels/maha.jl @@ -14,5 +14,6 @@ # test_ADs(P -> MahalanobisKernel(P=P), P) @test_broken "Nothing passes (problem with Mahalanobis distance in Distances)" + TestUtils.test_interface(k, Float64) test_params(k, (P,)) end diff --git a/test/basekernels/matern.jl b/test/basekernels/matern.jl index 332e20cb6..061601b3e 100644 --- a/test/basekernels/matern.jl +++ b/test/basekernels/matern.jl @@ -16,6 +16,9 @@ @test repr(k) == "Matern Kernel (ν = $(ν))" # test_ADs(x->MaternKernel(nu=first(x)),[ν]) @test_broken "All fails (because of logabsgamma for ForwardDiff and ReverseDiff and because of nu for Zygote)" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_params(k, ([ν],)) end @testset "Matern32Kernel" begin @@ -25,6 +28,9 @@ @test kappa(Matern32Kernel(),x) == kappa(k,x) @test metric(Matern32Kernel()) == Euclidean() @test repr(k) == "Matern 3/2 Kernel" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(Matern32Kernel) end @testset "Matern52Kernel" begin @@ -34,11 +40,14 @@ @test kappa(Matern52Kernel(),x) == kappa(k,x) @test metric(Matern52Kernel()) == Euclidean() @test repr(k) == "Matern 5/2 Kernel" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(Matern52Kernel) end @testset "Coherence Materns" begin - @test kappa(MaternKernel(ν=0.5),x) ≈ kappa(ExponentialKernel(),x) - @test kappa(MaternKernel(ν=1.5),x) ≈ kappa(Matern32Kernel(),x) - @test kappa(MaternKernel(ν=2.5),x) ≈ kappa(Matern52Kernel(),x) + @test kappa(MaternKernel(ν=0.5), x) ≈ kappa(ExponentialKernel(), x) + @test kappa(MaternKernel(ν=1.5), x) ≈ kappa(Matern32Kernel(), x) + @test kappa(MaternKernel(ν=2.5), x) ≈ kappa(Matern52Kernel(), x) end end diff --git a/test/basekernels/nn.jl b/test/basekernels/nn.jl index 6d6bb272c..bad33511e 100644 --- a/test/basekernels/nn.jl +++ b/test/basekernels/nn.jl @@ -4,45 +4,8 @@ v1 = rand(3); v2 = rand(3) @test k(v1,v2) ≈ asin(v1' * v2 / sqrt((1 + v1' * v1) * (1 + v2' * v2))) atol=1e-5 - # kernelmatrix tests - m1 = rand(3,4) - m2 = rand(3,4) - @test kernelmatrix(k, m1, m1) ≈ kernelmatrix(k, m1) atol=1e-5 - @test_broken kernelmatrix(k, m1, m2) ≈ k(m1, m2) atol=1e-5 - - - x1 = rand() - x2 = rand() - @test kernelmatrix(k, x1*ones(1,1), x2*ones(1,1))[1] ≈ k(x1, x2) atol=1e-5 - - @test k(v1, v2) ≈ k(v1, v2) atol=1e-5 - @test typeof(k(v1, v2)) <: Real - - @test_broken size(k(m1, m2)) == (4, 4) - @test_broken size(k(m1)) == (4, 4) - - A1 = ones(4, 4) - kernelmatrix!(A1, k, m1, m2) - @test A1 ≈ kernelmatrix(k, m1, m2) atol=1e-5 - - A2 = ones(4, 4) - kernelmatrix!(A2, k, m1) - @test A2 ≈ kernelmatrix(k, m1) atol=1e-5 - - @test size(kerneldiagmatrix(k, m1)) == (4,) - A3 = kernelmatrix(k, m1) - @test kerneldiagmatrix(k, m1) ≈ [A3[i, i] for i in 1:LinearAlgebra.checksquare(A3)] atol=1e-5 - - A4 = ones(4) - kerneldiagmatrix!(A4, k, m1) - @test kerneldiagmatrix(k, m1) ≈ A4 atol=1e-5 - - A5 = ones(4,4) - @test_throws AssertionError kernelmatrix!(A5, k, m1, m2, obsdim=3) - @test_throws AssertionError kernelmatrix!(A5, k, m1, obsdim=3) - @test_throws DimensionMismatch kernelmatrix!(A5, k, ones(4,3), ones(3,4)) - - @test k([x1], [x2]) ≈ k(x1, x2) atol=1e-5 + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(NeuralNetworkKernel, ADs = [:ForwardDiff, :ReverseDiff]) @test_broken "Zygote uncompatible with BaseKernel" end diff --git a/test/basekernels/periodic.jl b/test/basekernels/periodic.jl index 901a359f0..b11553bc6 100644 --- a/test/basekernels/periodic.jl +++ b/test/basekernels/periodic.jl @@ -1,5 +1,7 @@ @testset "Periodic Kernel" begin - x = rand()*2; v1 = rand(3); v2 = rand(3); + x = rand()*2 + v1 = rand(3) + v2 = rand(3) r = rand(3) k = PeriodicKernel(r = r) @test kappa(k, x) ≈ exp(-0.5x) @@ -7,6 +9,9 @@ @test k(v1, v2) == k(v2, v1) @test PeriodicKernel(3)(v1, v2) == PeriodicKernel(r = ones(3))(v1, v2) @test repr(k) == "Periodic Kernel, length(r) = $(length(r)))" + + # Standardised tests. + TestUtils.test_interface(k, Vector{Float64}) # test_ADs(r->PeriodicKernel(r =exp.(r)), log.(r), ADs = [:ForwardDiff, :ReverseDiff]) @test_broken "Undefined adjoint for Sinus metric, and failing randomly for ForwardDiff and ReverseDiff" test_params(k, (r,)) diff --git a/test/basekernels/piecewisepolynomial.jl b/test/basekernels/piecewisepolynomial.jl index 7aa71e8b4..d6d4e6ed5 100644 --- a/test/basekernels/piecewisepolynomial.jl +++ b/test/basekernels/piecewisepolynomial.jl @@ -11,27 +11,12 @@ @test k2(v1, v2) ≈ k(v1, v2) atol=1e-5 - @test typeof(k(v1, v2)) <: Real - @test size(kernelmatrix(k, m1, m2)) == (4, 4) - @test size(kernelmatrix(k, m1)) == (4, 4) - - A1 = ones(4, 4) - kernelmatrix!(A1, k, m1, m2) - @test A1 ≈ kernelmatrix(k, m1, m2) atol=1e-5 - - A2 = ones(4, 4) - kernelmatrix!(A2, k, m1) - @test A2 ≈ kernelmatrix(k, m1) atol=1e-5 - - @test size(kerneldiagmatrix(k, m1)) == (4,) - @test kerneldiagmatrix(k, m1) == ones(4) - A3 = ones(4) - kerneldiagmatrix!(A3, k, m1) - @test A3 == kerneldiagmatrix(k, m1) - @test_throws ErrorException PiecewisePolynomialKernel{4}(maha) @test repr(k) == "Piecewise Polynomial Kernel (v = $(v), size(maha) = $(size(maha)))" + + # Standardised tests. + TestUtils.test_interface(k, Float64) # test_ADs(maha-> PiecewisePolynomialKernel(v=2, maha = maha), maha) @test_broken "Nothing passes (problem with Mahalanobis distance in Distances)" diff --git a/test/basekernels/polynomial.jl b/test/basekernels/polynomial.jl index 5c76f21eb..a9e6d8e5d 100644 --- a/test/basekernels/polynomial.jl +++ b/test/basekernels/polynomial.jl @@ -12,6 +12,9 @@ @test metric(LinearKernel()) == KernelFunctions.DotProduct() @test metric(LinearKernel(c=2.0)) == KernelFunctions.DotProduct() @test repr(k) == "Linear Kernel (c = 0.0)" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(x->LinearKernel(c=x[1]), [c]) test_params(LinearKernel(; c=c), ([c],)) end @@ -21,11 +24,15 @@ @test k(v1,v2) ≈ dot(v1,v2)^2 @test kappa(PolynomialKernel(),x) == kappa(k,x) @test repr(k) == "Polynomial Kernel (c = 0.0, d = 2.0)" - #Coherence test + + # Coherence tests. @test kappa(PolynomialKernel(d=1.0,c=c),x) ≈ kappa(LinearKernel(c=c),x) @test metric(PolynomialKernel()) == KernelFunctions.DotProduct() @test metric(PolynomialKernel(d=3.0)) == KernelFunctions.DotProduct() @test metric(PolynomialKernel(d=3.0,c=2.0)) == KernelFunctions.DotProduct() + + # Standardised tests. + TestUtils.test_interface(k, Float64) # test_ADs(x->PolynomialKernel(d=x[1], c=x[2]),[2.0, c]) @test_broken "All, because of the power" test_params(PolynomialKernel(; d=x, c=c), ([x], [c])) diff --git a/test/basekernels/rationalquad.jl b/test/basekernels/rationalquad.jl index 6df59ae81..0f2a27bab 100644 --- a/test/basekernels/rationalquad.jl +++ b/test/basekernels/rationalquad.jl @@ -13,6 +13,9 @@ @test metric(RationalQuadraticKernel()) == SqEuclidean() @test metric(RationalQuadraticKernel(α=2.0)) == SqEuclidean() @test repr(k) == "Rational Quadratic Kernel (α = $(α))" + + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(x->RationalQuadraticKernel(alpha=x[1]),[α]) test_params(k, ([α],)) end @@ -29,6 +32,9 @@ @test metric(GammaRationalQuadraticKernel()) == SqEuclidean() @test metric(GammaRationalQuadraticKernel(γ=2.0)) == SqEuclidean() @test metric(GammaRationalQuadraticKernel(γ=2.0, α=3.0)) == SqEuclidean() + + # Standardised tests. + TestUtils.test_interface(k, Float64) # test_ADs(x->GammaRationalQuadraticKernel(α=x[1], γ=x[2]), [a, 2.0]) @test_broken "All (problem with power operation)" test_params(GammaRationalQuadraticKernel(; α=a, γ=x), ([a], [x])) diff --git a/test/basekernels/sm.jl b/test/basekernels/sm.jl index 91e55f52b..7ccccd2b6 100644 --- a/test/basekernels/sm.jl +++ b/test/basekernels/sm.jl @@ -26,6 +26,9 @@ @test_throws DimensionMismatch spectral_mixture_kernel(rand(5) ,rand(4,3), rand(4,3)) @test_throws DimensionMismatch spectral_mixture_kernel(rand(3) ,rand(4,3), rand(5,3)) @test_throws DimensionMismatch spectral_mixture_product_kernel(rand(5,3) ,rand(4,3), rand(5,3)) + + # Standardised tests. + TestUtils.test_interface(k, Float64) # test_ADs(x->spectral_mixture_kernel(exp.(x[1:3]), reshape(x[4:18], 5, 3), reshape(x[19:end], 5, 3)), vcat(log.(αs₁), γs[:], ωs[:]), dims = [5,5]) @test_broken "No tests passing (BaseKernel)" end diff --git a/test/basekernels/wiener.jl b/test/basekernels/wiener.jl index 624837b8c..9d269f7ba 100644 --- a/test/basekernels/wiener.jl +++ b/test/basekernels/wiener.jl @@ -3,16 +3,16 @@ @test typeof(k_1) <: WhiteKernel k0 = WienerKernel() - @test typeof(k0) <: WienerKernel{0} + @test k0 isa WienerKernel{0} k1 = WienerKernel(i=1) - @test typeof(k1) <: WienerKernel{1} + @test k1 isa WienerKernel{1} k2 = WienerKernel(i=2) - @test typeof(k2) <: WienerKernel{2} + @test k2 isa WienerKernel{2} k3 = WienerKernel(i=3) - @test typeof(k3) <: WienerKernel{3} + @test k3 isa WienerKernel{3} @test_throws AssertionError WienerKernel(i=4) @test_throws AssertionError WienerKernel(i=-2) @@ -31,26 +31,8 @@ @test k3(v1, v2) ≈ 1 / 252 * minXY^7 + 1 / 720 * minXY^4 * euclidean(v1, v2) * ( 5 * max(X, Y)^2 + 2 * X * Y + 3 * minXY^2 ) - # kernelmatrix tests - m1 = rand(3,4) - m2 = rand(3,4) - @test kernelmatrix(k0, m1, m1) ≈ kernelmatrix(k0, m1) atol=1e-5 - - K = zeros(4,4) - kernelmatrix!(K,k0,m1,m2) - @test K ≈ kernelmatrix(k0, m1, m2) atol=1e-5 - - V = zeros(4) - kerneldiagmatrix!(V,k0,m1) - @test V ≈ kerneldiagmatrix(k0,m1) atol=1e-5 - - x1 = rand() - x2 = rand() - @test kernelmatrix(k0, x1*ones(1,1), x2*ones(1,1))[1] ≈ k0(x1, x2) atol=1e-5 - @test kernelmatrix(k1, x1*ones(1,1), x2*ones(1,1))[1] ≈ k1(x1, x2) atol=1e-5 - @test kernelmatrix(k2, x1*ones(1,1), x2*ones(1,1))[1] ≈ k2(x1, x2) atol=1e-5 - @test kernelmatrix(k3, x1*ones(1,1), x2*ones(1,1))[1] ≈ k3(x1, x2) atol=1e-5 - + # Standardised tests. + TestUtils.test_interface(k, Float64) # test_ADs(()->WienerKernel(i=1)) @test_broken "No tests passing" end From 32877de64c34a48de82fd77eb17c462278b7da0b Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 16:13:49 +0100 Subject: [PATCH 05/35] Test composite kernels --- test/kernels/kernelproduct.jl | 41 +++------------ test/kernels/kernelsum.jl | 40 +++------------ test/kernels/scaledkernel.jl | 33 +----------- test/kernels/tensorproduct.jl | 84 ++++--------------------------- test/kernels/transformedkernel.jl | 32 +----------- 5 files changed, 24 insertions(+), 206 deletions(-) diff --git a/test/kernels/kernelproduct.jl b/test/kernels/kernelproduct.jl index 3304a15a8..4d9afc065 100644 --- a/test/kernels/kernelproduct.jl +++ b/test/kernels/kernelproduct.jl @@ -32,41 +32,12 @@ @test (KernelProduct((k1, k2)) * k3).kernels == (k1, k2, k3) @test (k3 * KernelProduct((k1, k2))).kernels == (k3, k1, k2) - @testset "kernelmatrix" begin - rng = MersenneTwister(123456) - - Nx = 5 - Ny = 4 - D = 3 - - w1 = rand(rng) + 1e-3 - w2 = rand(rng) + 1e-3 - k1 = w1 * SqExponentialKernel() - k2 = w2 * LinearKernel() - k = k1 * k2 - - @testset "$(typeof(x))" for (x, y) in [ - (ColVecs(randn(rng, D, Nx)), ColVecs(randn(rng, D, Ny))), - (RowVecs(randn(rng, Nx, D)), RowVecs(randn(rng, Ny, D))), - ] - @test kernelmatrix(k, x, y) ≈ kernelmatrix(k1, x, y) .* kernelmatrix(k2, x, y) - - @test kernelmatrix(k, x) ≈ kernelmatrix(k1, x) .* kernelmatrix(k2, x) - - K_diag_manual = kerneldiagmatrix(k1, x) .* kerneldiagmatrix(k2, x) - @test kerneldiagmatrix(k, x) ≈ K_diag_manual - - tmp = Matrix{Float64}(undef, length(x), length(y)) - @test kernelmatrix!(tmp, k, x, y) ≈ kernelmatrix(k, x, y) - - tmp_square = Matrix{Float64}(undef, length(x), length(x)) - @test kernelmatrix!(tmp_square, k, x) ≈ kernelmatrix(k, x) - - tmp_diag = Vector{Float64}(undef, length(x)) - @test kerneldiagmatrix!(tmp_diag, k, x) ≈ kerneldiagmatrix(k, x) - end - end - test_ADs(x->SqExponentialKernel() * LinearKernel(c= x[1]), rand(1), ADs = [:ForwardDiff, :ReverseDiff, :Zygote]) + # Standardised tests. + TestUtils.test_interface(k, Float64) + test_ADs( + x->SqExponentialKernel() * LinearKernel(c= x[1]), rand(1); + ADs = [:ForwardDiff, :ReverseDiff, :Zygote], + ) test_params(k1 * k2, (k1, k2)) end diff --git a/test/kernels/kernelsum.jl b/test/kernels/kernelsum.jl index 0e05978ae..13aaa2143 100644 --- a/test/kernels/kernelsum.jl +++ b/test/kernels/kernelsum.jl @@ -32,40 +32,12 @@ @test (KernelSum((k1, k2)) + k3).kernels == (k1, k2, k3) @test (k3 + KernelSum((k1, k2))).kernels == (k3, k1, k2) - @testset "kernelmatrix" begin - rng = MersenneTwister(123456) - - Nx = 5 - Ny = 4 - D = 3 - - w1 = rand(rng) + 1e-3 - w2 = rand(rng) + 1e-3 - k1 = w1 * SqExponentialKernel() - k2 = w2 * LinearKernel() - k = k1 + k2 - - @testset "$(typeof(x))" for (x, y) in [ - (ColVecs(randn(rng, D, Nx)), ColVecs(randn(rng, D, Ny))), - (RowVecs(randn(rng, Nx, D)), RowVecs(randn(rng, Ny, D))), - ] - @test kernelmatrix(k, x, y) ≈ kernelmatrix(k1, x, y) + kernelmatrix(k2, x, y) - - @test kernelmatrix(k, x) ≈ kernelmatrix(k1, x) + kernelmatrix(k2, x) - - @test kerneldiagmatrix(k, x) ≈ kerneldiagmatrix(k1, x) + kerneldiagmatrix(k2, x) - - tmp = Matrix{Float64}(undef, length(x), length(y)) - @test kernelmatrix!(tmp, k, x, y) ≈ kernelmatrix(k, x, y) - - tmp_square = Matrix{Float64}(undef, length(x), length(x)) - @test kernelmatrix!(tmp_square, k, x) ≈ kernelmatrix(k, x) - - tmp_diag = Vector{Float64}(undef, length(x)) - @test kerneldiagmatrix!(tmp_diag, k, x) ≈ kerneldiagmatrix(k, x) - end - end - test_ADs(x->KernelSum(SqExponentialKernel(),LinearKernel(c= x[1])), rand(1), ADs = [:ForwardDiff, :ReverseDiff, :Zygote]) + # Standardised tests. + TestUtils.test_interface(k, Float64) + test_ADs( + x->KernelSum(SqExponentialKernel(),LinearKernel(c= x[1])), rand(1); + ADs = [:ForwardDiff, :ReverseDiff, :Zygote], + ) test_params(k1 + k2, (k1, k2)) end diff --git a/test/kernels/scaledkernel.jl b/test/kernels/scaledkernel.jl index 151d37190..99bc71a18 100644 --- a/test/kernels/scaledkernel.jl +++ b/test/kernels/scaledkernel.jl @@ -9,37 +9,8 @@ @test ks(x, y) == s * k(x, y) @test ks(x, y) == (s * k)(x, y) - @testset "kernelmatrix" begin - rng = MersenneTwister(123456) - - Nx = 5 - Ny = 4 - D = 3 - - k = SqExponentialKernel() - s = rand(rng) + 1e-3 - ks = s * k - - @testset "$(typeof(x))" for (x, y) in [ - (ColVecs(randn(rng, D, Nx)), ColVecs(randn(rng, D, Ny))), - (RowVecs(randn(rng, Nx, D)), RowVecs(randn(rng, Ny, D))), - ] - @test kernelmatrix(ks, x, y) ≈ s .* kernelmatrix(k, x, y) - - @test kernelmatrix(ks, x) ≈ s .* kernelmatrix(k, x) - - @test kerneldiagmatrix(ks, x) ≈ s .* kerneldiagmatrix(k, x) - - tmp = Matrix{Float64}(undef, length(x), length(y)) - @test_broken kernelmatrix!(tmp, ks, x, y) ≈ kernelmatrix(ks, x, y) - - tmp_square = Matrix{Float64}(undef, length(x), length(x)) - @test_broken kernelmatrix!(tmp_square, ks, x) ≈ kernelmatrix(ks, x) - - tmp_diag = Vector{Float64}(undef, length(x)) - @test_broken kerneldiagmatrix!(tmp_diag, ks, x) ≈ kerneldiagmatrix(ks, x) - end - end + # Standardised tests. + TestUtils.test_interface(k, Float64) test_ADs(x->exp(x[1]) * SqExponentialKernel(), rand(1)) test_params(s * k, (k, [s])) diff --git a/test/kernels/tensorproduct.jl b/test/kernels/tensorproduct.jl index 78ed81e26..a81332690 100644 --- a/test/kernels/tensorproduct.jl +++ b/test/kernels/tensorproduct.jl @@ -24,39 +24,13 @@ end end - @testset "kernelmatrix and kerneldiagmatrix" begin - X = rand(rng, 2, 10) - x_cols = ColVecs(X) - x_rows = RowVecs(X') - Y = rand(rng, 2, 10) - y_cols = ColVecs(Y) - y_rows = RowVecs(Y') - - trueX = kernelmatrix(k1, X[1, :]) .* kernelmatrix(k2, X[2, :]) - trueXY = kernelmatrix(k1, X[1, :], Y[1, :]) .* kernelmatrix(k2, X[2, :], Y[2, :]) - tmp = Matrix{Float64}(undef, 10, 10) - tmp_diag = Vector{Float64}(undef, 10) - - for kernel in (kernel1, kernel2), (x, y) in ((x_cols, y_cols), (x_rows, y_rows)) - @test kernelmatrix(kernel, x) ≈ trueX - - @test kernelmatrix(kernel, x, y) ≈ trueXY - - fill!(tmp, 0) - kernelmatrix!(tmp, kernel, x) - @test tmp ≈ trueX - - fill!(tmp, 0) - kernelmatrix!(tmp, kernel, x, y) - @test tmp ≈ trueXY - - @test kerneldiagmatrix(kernel, x) ≈ diag(kernelmatrix(kernel, x)) - - fill!(tmp_diag, 0) - kerneldiagmatrix!(tmp_diag, kernel, x) - @test tmp_diag ≈ diag(kernelmatrix(kernel, x)) - end - end + # Standardised tests. + TestUtils.test_interface(k, Float64) + test_ADs( + ()->TensorProduct(SqExponentialKernel(), LinearKernel()); + dims = [2, 2], + ) # ADs = [:ForwardDiff, :ReverseDiff]) + test_params(TensorProduct(k1, k2), (k1, k2)) @testset "single kernel" begin kernel = TensorProduct(k1) @@ -70,48 +44,8 @@ end end - @testset "kernelmatrix" begin - N = 10 - - x = randn(rng, N) - y = randn(rng, N) - vectors = (x, y) - - X = reshape(x, 1, :) - x_cols = ColVecs(X) - x_rows = RowVecs(X') - Y = reshape(y, 1, :) - y_cols = ColVecs(Y) - y_rows = RowVecs(Y') - - trueX = kernelmatrix(k1, x) - trueXY = kernelmatrix(k1, x, y) - tmp = Matrix{Float64}(undef, N, N) - tmp_diag = Vector{Float64}(undef, N) - - for (x, y) in ((x, y), (x_cols, y_cols), (x_rows, y_rows)) - - @test kernelmatrix(kernel, x) ≈ trueX - - @test kernelmatrix(kernel, x, y) ≈ trueXY - - fill!(tmp, 0) - kernelmatrix!(tmp, kernel, x) - @test tmp ≈ trueX - - fill!(tmp, 0) - kernelmatrix!(tmp, kernel, x, y) - @test tmp ≈ trueXY - - @test kerneldiagmatrix(kernel, x) ≈ diag(kernelmatrix(kernel, x)) - - fill!(tmp_diag, 0) - kerneldiagmatrix!(tmp_diag, kernel, x) - @test tmp_diag ≈ diag(kernelmatrix(kernel, x)) - end + # Standardised tests. + TestUtils.test_interface(k, Float64) end end - test_ADs(()->TensorProduct(SqExponentialKernel(), LinearKernel()), dims = [2, 2]) # ADs = [:ForwardDiff, :ReverseDiff]) - - test_params(TensorProduct(k1, k2), (k1, k2)) end diff --git a/test/kernels/transformedkernel.jl b/test/kernels/transformedkernel.jl index 8332a22b9..fc93aa635 100644 --- a/test/kernels/transformedkernel.jl +++ b/test/kernels/transformedkernel.jl @@ -20,37 +20,7 @@ @test KernelFunctions.kernel(kt) == k @test repr(kt) == repr(k) * "\n\t- " * repr(ScaleTransform(s)) - @testset "kernelmatrix" begin - rng = MersenneTwister(123456) - - Nx = 5 - Ny = 4 - D = 3 - - k = SqExponentialKernel() - t = ScaleTransform(randn(rng)) - kt = TransformedKernel(k, t) - - @testset "$(typeof(x))" for (x, y) in [ - (ColVecs(randn(rng, D, Nx)), ColVecs(randn(rng, D, Ny))), - (RowVecs(randn(rng, Nx, D)), RowVecs(randn(rng, Ny, D))), - ] - @test kernelmatrix(kt, x, y) ≈ kernelmatrix(k, map(t, x), map(t, y)) - - @test kernelmatrix(kt, x) ≈ kernelmatrix(k, map(t, x)) - - @test kerneldiagmatrix(kt, x) ≈ kerneldiagmatrix(k, map(t, x)) - - tmp = Matrix{Float64}(undef, length(x), length(y)) - @test kernelmatrix!(tmp, kt, x, y) ≈ kernelmatrix(kt, x, y) - - tmp_square = Matrix{Float64}(undef, length(x), length(x)) - @test kernelmatrix!(tmp_square, kt, x) ≈ kernelmatrix(kt, x) - - tmp_diag = Vector{Float64}(undef, length(x)) - @test kerneldiagmatrix!(tmp_diag, kt, x) ≈ kerneldiagmatrix(kt, x) - end - end + TestUtils.test_interface(k, Float64) test_ADs(x->transform(SqExponentialKernel(), x[1]), rand(1))# ADs = [:ForwardDiff, :ReverseDiff]) # Test implicit gradients @testset "Implicit gradients" begin From a32eae68a97b47ac8e98f7f8ec05a8fe8fa3ba77 Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 16:56:19 +0100 Subject: [PATCH 06/35] Fix some tests --- Project.toml | 2 + src/KernelFunctions.jl | 2 + src/test_utils.jl | 69 ++++++++++++++++--------- test/basekernels/cosine.jl | 2 +- test/basekernels/periodic.jl | 4 +- test/basekernels/piecewisepolynomial.jl | 5 +- test/basekernels/wiener.jl | 10 +++- 7 files changed, 65 insertions(+), 29 deletions(-) diff --git a/Project.toml b/Project.toml index 5c2167326..6c13ec4f9 100644 --- a/Project.toml +++ b/Project.toml @@ -8,10 +8,12 @@ Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Requires = "ae029012-a4dd-5104-9daa-d747884805df" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" StatsFuns = "4c63d2b9-4356-54db-8cca-17b64c39e42c" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" [compat] diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 0a6bcd7c3..7acd39a25 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -31,6 +31,8 @@ export NystromFact, nystrom export spectral_mixture_kernel, spectral_mixture_product_kernel +export ColVecs, RowVecs + export MOInput export IndependentMOKernel diff --git a/src/test_utils.jl b/src/test_utils.jl index b2c5b4589..6610462ed 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -2,10 +2,19 @@ module TestUtils const __ATOL = 1e-9 +using LinearAlgebra using KernelFunctions +using Random +using Test """ - test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) + test_interface( + k::Kernel, + x0::AbstractVector, + x1::AbstractVector, + x2::AbstractVector; + atol=__ATOL, + ) Run various consistency checks on `k` at the inputs `x0`, `x1`, and `x2`. `x0` and `x1` should be of the same length with different values, while `x0` and `x2` should @@ -18,8 +27,13 @@ require less code for common input types. For example, `Vector{<:Real}`, `ColVec and `RowVecs{<:Real}` are supported. For other input vector types, please provide the data manually. """ -function test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) - +function test_interface( + k::Kernel, + x0::AbstractVector, + x1::AbstractVector, + x2::AbstractVector; + atol=__ATOL, +) # TODO: uncomment the tests of ternary kerneldiagmatrix. # TODO: add in-place tests. @@ -66,50 +80,59 @@ function test_interface(k::Kernel, x0::AV, x1::AV, x2::AV; atol=__ATOL) # Check that basic kernel evaluation succeeds and is consistent with `kernelmatrix`. @test k(first(x0), first(x1)) isa Real @test kernelmatrix(k, x0, x2) ≈ [k(xl, xr) for xl in x0, xr in x2] + + tmp = Matrix{Float64}(undef, length(x0), length(x2)) + @test kernelmatrix!(tmp, k, x0, x2) ≈ kernelmatrix(k, x0, x2) + + tmp_square = Matrix{Float64}(undef, length(x0), length(x0)) + @test kernelmatrix!(tmp_square, k, x0) ≈ kernelmatrix(k, x0) + + tmp_diag = Vector{Float64}(undef, length(x0)) + @test kerneldiagmatrix!(tmp_diag, k, x0) ≈ kerneldiagmatrix(k, x0) end function test_interface( - rng::AbstractRNG, k::Kernel, ::Type{Vector{T}}; atol=__ATOL, + rng::AbstractRNG, k::Kernel, ::Type{Vector{T}}; kwargs... ) where {T<:Real} - test_interface(k, randn(rng, T, 3), randn(rng, T, 3), randn(rng, T, 2); atol=atol) + test_interface(k, randn(rng, T, 3), randn(rng, T, 3), randn(rng, T, 2); kwargs...) end function test_interface( - rng::AbstractRNG, k::Kernel, ::Type{<:ColVecs{T}}; atol=__ATOL, + rng::AbstractRNG, k::Kernel, ::Type{<:ColVecs{T}}; dim_in=2, kwargs..., ) where {T<:Real} test_interface( k, - ColVecs(randn(rng, T, 2, 3)), - ColVecs(randn(rng, T, 2, 3)), - ColVecs(randn(rng, T, 2, 2)); - atol=atol, + ColVecs(randn(rng, T, dim_in, 3)), + ColVecs(randn(rng, T, dim_in, 3)), + ColVecs(randn(rng, T, dim_in, 2)); + kwargs..., ) end function test_interface( - rng::AbstractRNG, k::Kernel, ::Type{<:RowVecs{T}}; atol=__ATOL, + rng::AbstractRNG, k::Kernel, ::Type{<:RowVecs{T}}; dim_in=2, kwargs..., ) where {T<:Real} test_interface( k, - RowVecs(randn(rng, T, 3, 2)), - RowVecs(randn(rng, T, 3, 2)), - RowVecs(randn(rng, T, 2, 2)); - atol=atol, + RowVecs(randn(rng, T, 3, dim_in)), + RowVecs(randn(rng, T, 3, dim_in)), + RowVecs(randn(rng, T, 2, dim_in)); + kwargs..., ) end -function test_interface(k::Kernel, T::Type{<:AbstractVector}; atol=__ATOL) - test_interface(Random.GLOBAL_RNG, k, T) +function test_interface(k::Kernel, T::Type{<:AbstractVector}; kwargs...) + test_interface(Random.GLOBAL_RNG, k, T; kwargs...) end -function test_interface(rng::AbstractRNG, k::Kernel, T::Type{<:Real}; atol=__ATOL) - test_interface(rng, k, Vector{T}; atol=atol) - test_interface(rng, k, ColVecs{T} atol=atol) - test_interface(rng, k, RowVecs{T} atol=atol) +function test_interface(rng::AbstractRNG, k::Kernel, T::Type{<:Real}; kwargs...) + test_interface(rng, k, Vector{T}; kwargs...) + test_interface(rng, k, ColVecs{T}; kwargs...) + test_interface(rng, k, RowVecs{T}; kwargs...) end -function test_interface(k::Kernel, T::Type{<:Real}; atol=__ATOL) - test_interface(Random.GLOBAL_RNG, k, T; atol=atol) +function test_interface(k::Kernel, T::Type{<:Real}; kwargs...) + test_interface(Random.GLOBAL_RNG, k, T; kwargs...) end end # module diff --git a/test/basekernels/cosine.jl b/test/basekernels/cosine.jl index fe318fd93..0c6792767 100644 --- a/test/basekernels/cosine.jl +++ b/test/basekernels/cosine.jl @@ -14,6 +14,6 @@ @test repr(k) == "Cosine Kernel" # Standardised tests. - TestUtils.test_interface(k, Float64) + TestUtils.test_interface(k, Vector{Float64}) test_ADs(CosineKernel) end diff --git a/test/basekernels/periodic.jl b/test/basekernels/periodic.jl index b11553bc6..b5f1eb11c 100644 --- a/test/basekernels/periodic.jl +++ b/test/basekernels/periodic.jl @@ -11,7 +11,9 @@ @test repr(k) == "Periodic Kernel, length(r) = $(length(r)))" # Standardised tests. - TestUtils.test_interface(k, Vector{Float64}) + TestUtils.test_interface(PeriodicKernel(r=[0.9]), Vector{Float64}) + TestUtils.test_interface(PeriodicKernel(r=[0.9, 0.9]), ColVecs{Float64}) + TestUtils.test_interface(PeriodicKernel(r=[0.8, 0.7]), RowVecs{Float64}) # test_ADs(r->PeriodicKernel(r =exp.(r)), log.(r), ADs = [:ForwardDiff, :ReverseDiff]) @test_broken "Undefined adjoint for Sinus metric, and failing randomly for ForwardDiff and ReverseDiff" test_params(k, (r,)) diff --git a/test/basekernels/piecewisepolynomial.jl b/test/basekernels/piecewisepolynomial.jl index d6d4e6ed5..adaea47e6 100644 --- a/test/basekernels/piecewisepolynomial.jl +++ b/test/basekernels/piecewisepolynomial.jl @@ -3,7 +3,7 @@ v2 = rand(3) m1 = rand(3, 4) m2 = rand(3, 4) - maha = ones(3, 3) + maha = Matrix{Float64}(I, 3, 3) v = 3 k = PiecewisePolynomialKernel{v}(maha) @@ -16,7 +16,8 @@ @test repr(k) == "Piecewise Polynomial Kernel (v = $(v), size(maha) = $(size(maha)))" # Standardised tests. - TestUtils.test_interface(k, Float64) + TestUtils.test_interface(k, ColVecs{Float64}; dim_in=3) + TestUtils.test_interface(k, RowVecs{Float64}; dim_in=3) # test_ADs(maha-> PiecewisePolynomialKernel(v=2, maha = maha), maha) @test_broken "Nothing passes (problem with Mahalanobis distance in Distances)" diff --git a/test/basekernels/wiener.jl b/test/basekernels/wiener.jl index 9d269f7ba..55218483a 100644 --- a/test/basekernels/wiener.jl +++ b/test/basekernels/wiener.jl @@ -31,8 +31,14 @@ @test k3(v1, v2) ≈ 1 / 252 * minXY^7 + 1 / 720 * minXY^4 * euclidean(v1, v2) * ( 5 * max(X, Y)^2 + 2 * X * Y + 3 * minXY^2 ) - # Standardised tests. - TestUtils.test_interface(k, Float64) + # Standardised tests. Requires careful input choice. + x0 = rand(3) + x1 = rand(3) + x2 = rand(2) + TestUtils.test_interface(k0, x0, x1, x2) + TestUtils.test_interface(k1, x0, x1, x2) + TestUtils.test_interface(k2, x0, x1, x2) + TestUtils.test_interface(k3, x0, x1, x2) # test_ADs(()->WienerKernel(i=1)) @test_broken "No tests passing" end From 5815b4193155db9bbd4a290682820246242ededf Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 17:31:35 +0100 Subject: [PATCH 07/35] Fix maha --- src/KernelFunctions.jl | 1 + src/distances/mahalanobis.jl | 2 ++ test/basekernels/maha.jl | 22 ++++++++++++++++++---- test/distances/mahalanobis.jl | 3 +++ 4 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 src/distances/mahalanobis.jl create mode 100644 test/distances/mahalanobis.jl diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 7acd39a25..9919b4694 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -58,6 +58,7 @@ include("utils.jl") include(joinpath("distances", "pairwise.jl")) include(joinpath("distances", "dotproduct.jl")) include(joinpath("distances", "delta.jl")) +include(joinpath("distances", "mahalanobis")) include(joinpath("distances", "sinus.jl")) include(joinpath("transform", "transform.jl")) diff --git a/src/distances/mahalanobis.jl b/src/distances/mahalanobis.jl new file mode 100644 index 000000000..b11a1f4de --- /dev/null +++ b/src/distances/mahalanobis.jl @@ -0,0 +1,2 @@ +Distances.pairwise(d::SqMahalanobis, x::ColVecs, y::ColVecs) = pairwise(d, x.X, y.X; dims=2) +Distances.pairwise(d::SqMahalanobis, x::RowVecs, y::RowVecs) = pairwise(d, x.X, y.X; dims=1) diff --git a/test/basekernels/maha.jl b/test/basekernels/maha.jl index c4351d2ac..08638578a 100644 --- a/test/basekernels/maha.jl +++ b/test/basekernels/maha.jl @@ -1,10 +1,12 @@ @testset "maha" begin rng = MersenneTwister(123456) x = 2 * rand(rng) - v1 = rand(rng, 3) - v2 = rand(rng, 3) + D_in = 3 + v1 = rand(rng, D_in) + v2 = rand(rng, D_in) - P = rand(rng, 3, 3) + P_ = randn(3, 3) + P = collect(Symmetric(P_ * P_' + I)) k = MahalanobisKernel(P=P) @test kappa(k, x) == exp(-x) @@ -14,6 +16,18 @@ # test_ADs(P -> MahalanobisKernel(P=P), P) @test_broken "Nothing passes (problem with Mahalanobis distance in Distances)" - TestUtils.test_interface(k, Float64) + # Standardised tests. + @testset "ColVecs" begin + x0 = ColVecs(randn(D_in, 3)) + x1 = ColVecs(randn(D_in, 3)) + x2 = ColVecs(randn(D_in, 2)) + TestUtils.test_interface(k, Float64) + end + @testset "RowVecs" begin + x0 = ColVecs(randn(3, D_in)) + x1 = ColVecs(randn(3, D_in)) + x2 = ColVecs(randn(2, D_in)) + TestUtils.test_interface(k, Float64) + end test_params(k, (P,)) end diff --git a/test/distances/mahalanobis.jl b/test/distances/mahalanobis.jl new file mode 100644 index 000000000..90b4004af --- /dev/null +++ b/test/distances/mahalanobis.jl @@ -0,0 +1,3 @@ +@testset "mahalanobis" begin + # Couldn't find any tests to write that aren't just copying the code. +end From 14178ce4c20b599a6ef35ef6398a83532d220348 Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 17:49:16 +0100 Subject: [PATCH 08/35] Fix sm --- src/basekernels/sm.jl | 6 +++--- src/utils.jl | 2 ++ test/basekernels/sm.jl | 29 ++++++++++++++++++++++------- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/basekernels/sm.jl b/src/basekernels/sm.jl index 1b4875ecf..cfb8bf2e0 100644 --- a/src/basekernels/sm.jl +++ b/src/basekernels/sm.jl @@ -54,7 +54,7 @@ function spectral_mixture_kernel( γs::AbstractMatrix{<:Real}, ωs::AbstractMatrix{<:Real} ) - spectral_mixture_kernel(SqExponentialKernel(), αs, γs, ωs) + return spectral_mixture_kernel(SqExponentialKernel(), αs, γs, ωs) end """ @@ -95,7 +95,7 @@ function spectral_mixture_product_kernel( throw(DimensionMismatch("The dimensions of αs, γs, ans ωs do not match")) end return TensorProduct(spectral_mixture_kernel(h, α, reshape(γ, 1, :), reshape(ω, 1, :)) - for (α, γ, ω) in zip(eachrow(αs), eachrow(γs), eachrow(ωs))) + for (α, γ, ω) in zip(eachrow(αs), eachrow(γs), eachrow(ωs))) end function spectral_mixture_product_kernel( @@ -103,6 +103,6 @@ function spectral_mixture_product_kernel( γs::AbstractMatrix{<:Real}, ωs::AbstractMatrix{<:Real} ) - spectral_mixture_product_kernel(SqExponentialKernel(), αs, γs, ωs) + return spectral_mixture_product_kernel(SqExponentialKernel(), αs, γs, ωs) end diff --git a/src/utils.jl b/src/utils.jl index b550d9f1e..cf8a7994f 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -64,6 +64,8 @@ struct RowVecs{T, TX<:AbstractMatrix{T}, S} <: AbstractVector{S} end end +RowVecs(x::AbstractVector) = RowVecs(reshape(x, :, 1)) + Base.size(D::RowVecs) = (size(D.X, 1),) Base.getindex(D::RowVecs, i::Int) = view(D.X, i, :) Base.getindex(D::RowVecs, i::CartesianIndex{1}) = view(D.X, i, :) diff --git a/test/basekernels/sm.jl b/test/basekernels/sm.jl index 7ccccd2b6..0505f6560 100644 --- a/test/basekernels/sm.jl +++ b/test/basekernels/sm.jl @@ -1,11 +1,13 @@ @testset "sm" begin - v1 = rand(5) - v2 = rand(5) + + D_in = 5 + v1 = rand(D_in) + v2 = rand(D_in) αs₁ = rand(3) - αs₂ = rand(5, 3) - γs = rand(5, 3) - ωs = rand(5, 3) + αs₂ = rand(D_in, 3) + γs = rand(D_in, 3) + ωs = rand(D_in, 3) k1 = spectral_mixture_kernel(αs₁, γs, ωs) k2 = spectral_mixture_product_kernel(αs₂, γs, ωs) @@ -27,8 +29,21 @@ @test_throws DimensionMismatch spectral_mixture_kernel(rand(3) ,rand(4,3), rand(5,3)) @test_throws DimensionMismatch spectral_mixture_product_kernel(rand(5,3) ,rand(4,3), rand(5,3)) - # Standardised tests. - TestUtils.test_interface(k, Float64) + # Standardised tests. Choose input dims carefully. + @testset "ColVecs" begin + x0 = ColVecs(randn(D_in, 3)) + x1 = ColVecs(randn(D_in, 3)) + x2 = ColVecs(randn(D_in, 2)) + TestUtils.test_interface(k1, x0, x1, x2) + TestUtils.test_interface(k2, x0, x1, x2) + end + @testset "RowVecs" begin + x0 = RowVecs(randn(3, D_in)) + x1 = RowVecs(randn(3, D_in)) + x2 = RowVecs(randn(2, D_in)) + TestUtils.test_interface(k1, x0, x1, x2) + TestUtils.test_interface(k2, x0, x1, x2) + end # test_ADs(x->spectral_mixture_kernel(exp.(x[1:3]), reshape(x[4:18], 5, 3), reshape(x[19:end], 5, 3)), vcat(log.(αs₁), γs[:], ωs[:]), dims = [5,5]) @test_broken "No tests passing (BaseKernel)" end From 7ab9c5211ef0b0a006bfedf1d806d322b823c74e Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 19:19:03 +0100 Subject: [PATCH 09/35] Fix up maha --- src/KernelFunctions.jl | 1 - src/test_utils.jl | 1 - src/transform/lineartransform.jl | 2 +- test/basekernels/maha.jl | 12 ++++++------ test/distances/mahalanobis.jl | 3 --- test/kernels/tensorproduct.jl | 7 ++----- test/runtests.jl | 2 +- 7 files changed, 10 insertions(+), 18 deletions(-) delete mode 100644 test/distances/mahalanobis.jl diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 9919b4694..7acd39a25 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -58,7 +58,6 @@ include("utils.jl") include(joinpath("distances", "pairwise.jl")) include(joinpath("distances", "dotproduct.jl")) include(joinpath("distances", "delta.jl")) -include(joinpath("distances", "mahalanobis")) include(joinpath("distances", "sinus.jl")) include(joinpath("transform", "transform.jl")) diff --git a/src/test_utils.jl b/src/test_utils.jl index 6610462ed..b187750e1 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -35,7 +35,6 @@ function test_interface( atol=__ATOL, ) # TODO: uncomment the tests of ternary kerneldiagmatrix. - # TODO: add in-place tests. # Ensure that we have the required inputs. @assert length(x0) == length(x1) diff --git a/src/transform/lineartransform.jl b/src/transform/lineartransform.jl index a58a6457f..dd1e4db1b 100644 --- a/src/transform/lineartransform.jl +++ b/src/transform/lineartransform.jl @@ -29,7 +29,7 @@ end (t::LinearTransform)(x::Real) = vec(t.A * x) (t::LinearTransform)(x::AbstractVector{<:Real}) = t.A * x -_map(t::LinearTransform, x::AbstractVector{<:Real}) = ColVecs(t.A * x') +_map(t::LinearTransform, x::AbstractVector{<:Real}) = ColVecs(t.A * collect(x')) _map(t::LinearTransform, x::ColVecs) = ColVecs(t.A * x.X) _map(t::LinearTransform, x::RowVecs) = RowVecs(x.X * t.A') diff --git a/test/basekernels/maha.jl b/test/basekernels/maha.jl index 08638578a..d018ed031 100644 --- a/test/basekernels/maha.jl +++ b/test/basekernels/maha.jl @@ -5,7 +5,7 @@ v1 = rand(rng, D_in) v2 = rand(rng, D_in) - P_ = randn(3, 3) + P_ = randn(D_in, D_in) P = collect(Symmetric(P_ * P_' + I)) k = MahalanobisKernel(P=P) @@ -21,13 +21,13 @@ x0 = ColVecs(randn(D_in, 3)) x1 = ColVecs(randn(D_in, 3)) x2 = ColVecs(randn(D_in, 2)) - TestUtils.test_interface(k, Float64) + TestUtils.test_interface(k, x0, x1, x2) end @testset "RowVecs" begin - x0 = ColVecs(randn(3, D_in)) - x1 = ColVecs(randn(3, D_in)) - x2 = ColVecs(randn(2, D_in)) - TestUtils.test_interface(k, Float64) + x0 = RowVecs(randn(3, D_in)) + x1 = RowVecs(randn(3, D_in)) + x2 = RowVecs(randn(2, D_in)) + TestUtils.test_interface(k, x0, x1, x2) end test_params(k, (P,)) end diff --git a/test/distances/mahalanobis.jl b/test/distances/mahalanobis.jl deleted file mode 100644 index 90b4004af..000000000 --- a/test/distances/mahalanobis.jl +++ /dev/null @@ -1,3 +0,0 @@ -@testset "mahalanobis" begin - # Couldn't find any tests to write that aren't just copying the code. -end diff --git a/test/kernels/tensorproduct.jl b/test/kernels/tensorproduct.jl index a81332690..9896950c4 100644 --- a/test/kernels/tensorproduct.jl +++ b/test/kernels/tensorproduct.jl @@ -25,7 +25,8 @@ end # Standardised tests. - TestUtils.test_interface(k, Float64) + TestUtils.test_interface(kernel1, ColVecs{Float64}) + TestUtils.test_interface(kernel1, RowVecs{Float64}) test_ADs( ()->TensorProduct(SqExponentialKernel(), LinearKernel()); dims = [2, 2], @@ -43,9 +44,5 @@ @test kernel(x, y) == val end end - - # Standardised tests. - TestUtils.test_interface(k, Float64) - end end end diff --git a/test/runtests.jl b/test/runtests.jl index f3d1020f3..07ee04876 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -10,7 +10,7 @@ using Test using Flux import Zygote, ForwardDiff, ReverseDiff, FiniteDifferences -using KernelFunctions: SimpleKernel, metric, kappa, ColVecs, RowVecs +using KernelFunctions: SimpleKernel, metric, kappa, ColVecs, RowVecs, TestUtils # Writing tests: # 1. The file structure of the test should match precisely the file structure of src. From 180c934ea290bfe1a782154444fce2a70200e434 Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 19:19:20 +0100 Subject: [PATCH 10/35] Remove redundant file --- src/distances/mahalanobis.jl | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 src/distances/mahalanobis.jl diff --git a/src/distances/mahalanobis.jl b/src/distances/mahalanobis.jl deleted file mode 100644 index b11a1f4de..000000000 --- a/src/distances/mahalanobis.jl +++ /dev/null @@ -1,2 +0,0 @@ -Distances.pairwise(d::SqMahalanobis, x::ColVecs, y::ColVecs) = pairwise(d, x.X, y.X; dims=2) -Distances.pairwise(d::SqMahalanobis, x::RowVecs, y::RowVecs) = pairwise(d, x.X, y.X; dims=1) From 49545755dda8246a6e686c46ce87910742aec4f7 Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Sun, 30 Aug 2020 19:25:38 +0100 Subject: [PATCH 11/35] Move existing test utils over to module --- src/test_utils.jl | 160 +++++++++++++++++++++++++++++++++++++++++++++ test/runtests.jl | 4 +- test/test_utils.jl | 159 -------------------------------------------- 3 files changed, 162 insertions(+), 161 deletions(-) delete mode 100644 test/test_utils.jl diff --git a/src/test_utils.jl b/src/test_utils.jl index b187750e1..2bf057e65 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -134,4 +134,164 @@ function test_interface(k::Kernel, T::Type{<:Real}; kwargs...) test_interface(Random.GLOBAL_RNG, k, T; kwargs...) end +# Check parameters of kernels + +function test_params(kernel, reference) + params_kernel = params(kernel) + params_reference = params(reference) + + @test length(params_kernel) == length(params_reference) + @test all(p == q for (p, q) in zip(params_kernel, params_reference)) +end + +# AD utilities + +const FDM = FiniteDifferences.central_fdm(5, 1) + +gradient(f, s::Symbol, args) = gradient(f, Val(s), args) + +function gradient(f, ::Val{:Zygote}, args) + g = first(Zygote.gradient(f, args)) + if isnothing(g) + if args isa AbstractArray{<:Real} + return zeros(size(args)) # To respect the same output as other ADs + else + return zeros.(size.(args)) + end + else + return g + end +end + +function gradient(f, ::Val{:ForwardDiff}, args) + ForwardDiff.gradient(f, args) +end + +function gradient(f, ::Val{:ReverseDiff}, args) + ReverseDiff.gradient(f, args) +end + +function gradient(f, ::Val{:FiniteDiff}, args) + first(FiniteDifferences.grad(FDM, f, args)) +end + +function compare_gradient(f, AD::Symbol, args) + grad_AD = gradient(f, AD, args) + grad_FD = gradient(f, :FiniteDiff, args) + @test grad_AD ≈ grad_FD atol=1e-8 rtol=1e-5 +end + +testfunction(k, A, B, dim) = sum(kernelmatrix(k, A, B, obsdim = dim)) +testfunction(k, A, dim) = sum(kernelmatrix(k, A, obsdim = dim)) + +function test_ADs(kernelfunction, args = nothing; ADs = [:Zygote, :ForwardDiff, :ReverseDiff], dims = [3, 3]) + test_fd = test_FiniteDiff(kernelfunction, args, dims) + if !test_fd.anynonpass + for AD in ADs + test_AD(AD, kernelfunction, args, dims) + end + end +end + +function test_FiniteDiff(kernelfunction, args = nothing, dims = [3, 3]) + # Init arguments : + k = if args === nothing + kernelfunction() + else + kernelfunction(args) + end + rng = MersenneTwister(42) + @testset "FiniteDifferences" begin + if k isa SimpleKernel + for d in log.([eps(), rand(rng)]) + @test_nowarn gradient(:FiniteDiff, [d]) do x + kappa(k, exp(first(x))) + end + end + end + ## Testing Kernel Functions + x = rand(rng, dims[1]) + y = rand(rng, dims[1]) + @test_nowarn gradient(:FiniteDiff, x) do x + k(x, y) + end + if !(args === nothing) + @test_nowarn gradient(:FiniteDiff, args) do p + kernelfunction(p)(x, y) + end + end + ## Testing Kernel Matrices + A = rand(rng, dims...) + B = rand(rng, dims...) + for dim in 1:2 + @test_nowarn gradient(:FiniteDiff, A) do a + testfunction(k, a, dim) + end + @test_nowarn gradient(:FiniteDiff , A) do a + testfunction(k, a, B, dim) + end + @test_nowarn gradient(:FiniteDiff, B) do b + testfunction(k, A, b, dim) + end + if !(args === nothing) + @test_nowarn gradient(:FiniteDiff, args) do p + testfunction(kernelfunction(p), A, B, dim) + end + end + end + end +end + +function test_AD(AD::Symbol, kernelfunction, args = nothing, dims = [3, 3]) + @testset "$(AD)" begin + # Test kappa function + k = if args === nothing + kernelfunction() + else + kernelfunction(args) + end + rng = MersenneTwister(42) + if k isa SimpleKernel + for d in log.([eps(), rand(rng)]) + compare_gradient(AD, [d]) do x + kappa(k, exp(x[1])) + end + end + end + # Testing kernel evaluations + x = rand(rng, dims[1]) + y = rand(rng, dims[1]) + compare_gradient(AD, x) do x + k(x, y) + end + compare_gradient(AD, y) do y + k(x, y) + end + if !(args === nothing) + compare_gradient(AD, args) do p + kernelfunction(p)(x,y) + end + end + # Testing kernel matrices + A = rand(rng, dims...) + B = rand(rng, dims...) + for dim in 1:2 + compare_gradient(AD, A) do a + testfunction(k, a, dim) + end + compare_gradient(AD, A) do a + testfunction(k, a, B, dim) + end + compare_gradient(AD, B) do b + testfunction(k, A, b, dim) + end + if !(args === nothing) + compare_gradient(AD, args) do p + testfunction(kernelfunction(p), A, dim) + end + end + end + end +end + end # module diff --git a/test/runtests.jl b/test/runtests.jl index 07ee04876..fe5d5517d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -12,6 +12,8 @@ import Zygote, ForwardDiff, ReverseDiff, FiniteDifferences using KernelFunctions: SimpleKernel, metric, kappa, ColVecs, RowVecs, TestUtils +using TestUtils: test_params, test_AD + # Writing tests: # 1. The file structure of the test should match precisely the file structure of src. # Amongst other things, this means that there should be exactly 1 test file per src file. @@ -43,8 +45,6 @@ using KernelFunctions: SimpleKernel, metric, kappa, ColVecs, RowVecs, TestUtils # 10. If utility files are required. @info "Packages Loaded" -include("test_utils.jl") - @testset "KernelFunctions" begin include("utils.jl") diff --git a/test/test_utils.jl b/test/test_utils.jl deleted file mode 100644 index f6fd39400..000000000 --- a/test/test_utils.jl +++ /dev/null @@ -1,159 +0,0 @@ -# Check parameters of kernels - -function test_params(kernel, reference) - params_kernel = params(kernel) - params_reference = params(reference) - - @test length(params_kernel) == length(params_reference) - @test all(p == q for (p, q) in zip(params_kernel, params_reference)) -end - -# AD utilities - -const FDM = FiniteDifferences.central_fdm(5, 1) - -gradient(f, s::Symbol, args) = gradient(f, Val(s), args) - -function gradient(f, ::Val{:Zygote}, args) - g = first(Zygote.gradient(f, args)) - if isnothing(g) - if args isa AbstractArray{<:Real} - return zeros(size(args)) # To respect the same output as other ADs - else - return zeros.(size.(args)) - end - else - return g - end -end - -function gradient(f, ::Val{:ForwardDiff}, args) - ForwardDiff.gradient(f, args) -end - -function gradient(f, ::Val{:ReverseDiff}, args) - ReverseDiff.gradient(f, args) -end - -function gradient(f, ::Val{:FiniteDiff}, args) - first(FiniteDifferences.grad(FDM, f, args)) -end - -function compare_gradient(f, AD::Symbol, args) - grad_AD = gradient(f, AD, args) - grad_FD = gradient(f, :FiniteDiff, args) - @test grad_AD ≈ grad_FD atol=1e-8 rtol=1e-5 -end - -testfunction(k, A, B, dim) = sum(kernelmatrix(k, A, B, obsdim = dim)) -testfunction(k, A, dim) = sum(kernelmatrix(k, A, obsdim = dim)) - -function test_ADs(kernelfunction, args = nothing; ADs = [:Zygote, :ForwardDiff, :ReverseDiff], dims = [3, 3]) - test_fd = test_FiniteDiff(kernelfunction, args, dims) - if !test_fd.anynonpass - for AD in ADs - test_AD(AD, kernelfunction, args, dims) - end - end -end - -function test_FiniteDiff(kernelfunction, args = nothing, dims = [3, 3]) - # Init arguments : - k = if args === nothing - kernelfunction() - else - kernelfunction(args) - end - rng = MersenneTwister(42) - @testset "FiniteDifferences" begin - if k isa SimpleKernel - for d in log.([eps(), rand(rng)]) - @test_nowarn gradient(:FiniteDiff, [d]) do x - kappa(k, exp(first(x))) - end - end - end - ## Testing Kernel Functions - x = rand(rng, dims[1]) - y = rand(rng, dims[1]) - @test_nowarn gradient(:FiniteDiff, x) do x - k(x, y) - end - if !(args === nothing) - @test_nowarn gradient(:FiniteDiff, args) do p - kernelfunction(p)(x, y) - end - end - ## Testing Kernel Matrices - A = rand(rng, dims...) - B = rand(rng, dims...) - for dim in 1:2 - @test_nowarn gradient(:FiniteDiff, A) do a - testfunction(k, a, dim) - end - @test_nowarn gradient(:FiniteDiff , A) do a - testfunction(k, a, B, dim) - end - @test_nowarn gradient(:FiniteDiff, B) do b - testfunction(k, A, b, dim) - end - if !(args === nothing) - @test_nowarn gradient(:FiniteDiff, args) do p - testfunction(kernelfunction(p), A, B, dim) - end - end - end - end -end - -function test_AD(AD::Symbol, kernelfunction, args = nothing, dims = [3, 3]) - @testset "$(AD)" begin - # Test kappa function - k = if args === nothing - kernelfunction() - else - kernelfunction(args) - end - rng = MersenneTwister(42) - if k isa SimpleKernel - for d in log.([eps(), rand(rng)]) - compare_gradient(AD, [d]) do x - kappa(k, exp(x[1])) - end - end - end - # Testing kernel evaluations - x = rand(rng, dims[1]) - y = rand(rng, dims[1]) - compare_gradient(AD, x) do x - k(x, y) - end - compare_gradient(AD, y) do y - k(x, y) - end - if !(args === nothing) - compare_gradient(AD, args) do p - kernelfunction(p)(x,y) - end - end - # Testing kernel matrices - A = rand(rng, dims...) - B = rand(rng, dims...) - for dim in 1:2 - compare_gradient(AD, A) do a - testfunction(k, a, dim) - end - compare_gradient(AD, A) do a - testfunction(k, a, B, dim) - end - compare_gradient(AD, B) do b - testfunction(k, A, b, dim) - end - if !(args === nothing) - compare_gradient(AD, args) do p - testfunction(kernelfunction(p), A, dim) - end - end - end - end -end From a79de4674e4b5da42bd416d2510d26194c3187dc Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Mon, 31 Aug 2020 11:01:55 +0100 Subject: [PATCH 12/35] Add Gamma Exponential kernel reference --- src/basekernels/exponential.jl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/basekernels/exponential.jl b/src/basekernels/exponential.jl index 731fb6acc..40daa3de2 100644 --- a/src/basekernels/exponential.jl +++ b/src/basekernels/exponential.jl @@ -48,12 +48,15 @@ const LaplacianKernel = ExponentialKernel """ GammaExponentialKernel(; γ = 2.0) -The γ-exponential kernel is an isotropic Mercer kernel given by the formula: +The γ-exponential kernel [1] is an isotropic Mercer kernel given by the formula: ``` κ(x,y) = exp(-‖x-y‖^γ) ``` Where `γ > 0`, (the keyword `γ` can be replaced by `gamma`) -For `γ = 2`, see `SqExponentialKernel` and `γ = 1`, see `ExponentialKernel` +For `γ = 2`, see `SqExponentialKernel` and `γ = 1`, see `ExponentialKernel`. + +[1] - Gaussian Processes for Machine Learning, Carl Edward Rasmussen and Christopher K. I. + Williams, MIT Press, 2006. """ struct GammaExponentialKernel{Tγ<:Real} <: SimpleKernel γ::Vector{Tγ} From 4ba9c35bad0bab72b5e36bef8a6c5f3fa9d26fe8 Mon Sep 17 00:00:00 2001 From: willtebbutt Date: Mon, 31 Aug 2020 11:15:08 +0100 Subject: [PATCH 13/35] Update src/matrix/kernelpdmat.jl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Galy-Fajou --- src/matrix/kernelpdmat.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/matrix/kernelpdmat.jl b/src/matrix/kernelpdmat.jl index 55879b087..a7f798a8f 100644 --- a/src/matrix/kernelpdmat.jl +++ b/src/matrix/kernelpdmat.jl @@ -3,7 +3,7 @@ using .PDMats: PDMat export kernelpdmat """ - Compute a positive-definite matrix in the form of a `PDMat` matrix see [PDMats.jl]() + Compute a positive-definite matrix in the form of a `PDMat` matrix see [PDMats.jl](https://github.com/JuliaStats/PDMats.jl) with the cholesky decomposition precomputed. The algorithm recursively tries to add recursively a diagonal nugget until positive definiteness is achieved or that the noise is too big. From ce11e1da38fe8c8b95da06563252f2910bc9abd9 Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Wed, 2 Sep 2020 14:39:59 +0100 Subject: [PATCH 14/35] Remove repeated code --- src/test_utils.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/test_utils.jl b/src/test_utils.jl index 2bf057e65..b396a869f 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -39,8 +39,6 @@ function test_interface( # Ensure that we have the required inputs. @assert length(x0) == length(x1) @assert length(x0) ≠ length(x2) - @assert length(x0) == length(x1) - @assert length(x0) ≠ length(x2) # Check that kerneldiagmatrix basically works. # @test kerneldiagmatrix(k, x0, x1) isa AbstractVector From 10862413d84fa1b66b88ab330c519b2b1f574191 Mon Sep 17 00:00:00 2001 From: wt Date: Mon, 7 Sep 2020 19:49:54 +0100 Subject: [PATCH 15/35] Warn about breaking change --- Project.toml | 2 +- src/KernelFunctions.jl | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 5c2167326..38a1aea85 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "KernelFunctions" uuid = "ec8451be-7e33-11e9-00cf-bbf324bd1392" -version = "0.7.0" +version = "0.8.0" [deps] Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 15bf5ce51..ee7d37c9d 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -4,6 +4,10 @@ KernelFunctions. [Github](https://github.com/JuliaGaussianProcesses/KernelFuncti """ module KernelFunctions +@warn "SqExponentialKernel changed convention in version 0.8.0. This kernel now divides the + squared distance by 2 to better align itself with standard practice. This warning will + be removed in 0.9.0." + export kernelmatrix, kernelmatrix!, kerneldiagmatrix, kerneldiagmatrix! export transform export duplicate, set! # Helpers From 22295f64490f8aa35678254c711908a7031e9eb4 Mon Sep 17 00:00:00 2001 From: willtebbutt Date: Mon, 7 Sep 2020 21:39:07 +0100 Subject: [PATCH 16/35] Update src/test_utils.jl Co-authored-by: David Widmann --- src/test_utils.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test_utils.jl b/src/test_utils.jl index b396a869f..279c400ac 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -2,6 +2,7 @@ module TestUtils const __ATOL = 1e-9 +using FiniteDifferences using LinearAlgebra using KernelFunctions using Random From b58c649c0053e9f3afdc1f007a70dec0c53b9b4a Mon Sep 17 00:00:00 2001 From: Will Tebbutt Date: Mon, 21 Sep 2020 15:46:39 +0100 Subject: [PATCH 17/35] Bump patch --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6e7dba09c..23508c026 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "KernelFunctions" uuid = "ec8451be-7e33-11e9-00cf-bbf324bd1392" -version = "0.8.0" +version = "0.8.1" [deps] Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" From 2e508e917db53875b682cbead67d8da5629a625e Mon Sep 17 00:00:00 2001 From: wt Date: Mon, 21 Sep 2020 17:44:01 +0100 Subject: [PATCH 18/35] Fix up tests --- src/KernelFunctions.jl | 2 +- src/test_utils.jl | 161 ---------------------------------------- test/runtests.jl | 7 +- test/test_utils.jl | 162 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 168 insertions(+), 164 deletions(-) create mode 100644 test/test_utils.jl diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 885c158d7..3990f584c 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -16,7 +16,7 @@ if !isfile(joinpath(@__DIR__, "update_v0.8.0")) ) touch(joinpath(@__DIR__, "update_v0.8.0")) end - + export kernelmatrix, kernelmatrix!, kerneldiagmatrix, kerneldiagmatrix! export transform export duplicate, set! # Helpers diff --git a/src/test_utils.jl b/src/test_utils.jl index 279c400ac..9e70653ee 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -2,7 +2,6 @@ module TestUtils const __ATOL = 1e-9 -using FiniteDifferences using LinearAlgebra using KernelFunctions using Random @@ -133,164 +132,4 @@ function test_interface(k::Kernel, T::Type{<:Real}; kwargs...) test_interface(Random.GLOBAL_RNG, k, T; kwargs...) end -# Check parameters of kernels - -function test_params(kernel, reference) - params_kernel = params(kernel) - params_reference = params(reference) - - @test length(params_kernel) == length(params_reference) - @test all(p == q for (p, q) in zip(params_kernel, params_reference)) -end - -# AD utilities - -const FDM = FiniteDifferences.central_fdm(5, 1) - -gradient(f, s::Symbol, args) = gradient(f, Val(s), args) - -function gradient(f, ::Val{:Zygote}, args) - g = first(Zygote.gradient(f, args)) - if isnothing(g) - if args isa AbstractArray{<:Real} - return zeros(size(args)) # To respect the same output as other ADs - else - return zeros.(size.(args)) - end - else - return g - end -end - -function gradient(f, ::Val{:ForwardDiff}, args) - ForwardDiff.gradient(f, args) -end - -function gradient(f, ::Val{:ReverseDiff}, args) - ReverseDiff.gradient(f, args) -end - -function gradient(f, ::Val{:FiniteDiff}, args) - first(FiniteDifferences.grad(FDM, f, args)) -end - -function compare_gradient(f, AD::Symbol, args) - grad_AD = gradient(f, AD, args) - grad_FD = gradient(f, :FiniteDiff, args) - @test grad_AD ≈ grad_FD atol=1e-8 rtol=1e-5 -end - -testfunction(k, A, B, dim) = sum(kernelmatrix(k, A, B, obsdim = dim)) -testfunction(k, A, dim) = sum(kernelmatrix(k, A, obsdim = dim)) - -function test_ADs(kernelfunction, args = nothing; ADs = [:Zygote, :ForwardDiff, :ReverseDiff], dims = [3, 3]) - test_fd = test_FiniteDiff(kernelfunction, args, dims) - if !test_fd.anynonpass - for AD in ADs - test_AD(AD, kernelfunction, args, dims) - end - end -end - -function test_FiniteDiff(kernelfunction, args = nothing, dims = [3, 3]) - # Init arguments : - k = if args === nothing - kernelfunction() - else - kernelfunction(args) - end - rng = MersenneTwister(42) - @testset "FiniteDifferences" begin - if k isa SimpleKernel - for d in log.([eps(), rand(rng)]) - @test_nowarn gradient(:FiniteDiff, [d]) do x - kappa(k, exp(first(x))) - end - end - end - ## Testing Kernel Functions - x = rand(rng, dims[1]) - y = rand(rng, dims[1]) - @test_nowarn gradient(:FiniteDiff, x) do x - k(x, y) - end - if !(args === nothing) - @test_nowarn gradient(:FiniteDiff, args) do p - kernelfunction(p)(x, y) - end - end - ## Testing Kernel Matrices - A = rand(rng, dims...) - B = rand(rng, dims...) - for dim in 1:2 - @test_nowarn gradient(:FiniteDiff, A) do a - testfunction(k, a, dim) - end - @test_nowarn gradient(:FiniteDiff , A) do a - testfunction(k, a, B, dim) - end - @test_nowarn gradient(:FiniteDiff, B) do b - testfunction(k, A, b, dim) - end - if !(args === nothing) - @test_nowarn gradient(:FiniteDiff, args) do p - testfunction(kernelfunction(p), A, B, dim) - end - end - end - end -end - -function test_AD(AD::Symbol, kernelfunction, args = nothing, dims = [3, 3]) - @testset "$(AD)" begin - # Test kappa function - k = if args === nothing - kernelfunction() - else - kernelfunction(args) - end - rng = MersenneTwister(42) - if k isa SimpleKernel - for d in log.([eps(), rand(rng)]) - compare_gradient(AD, [d]) do x - kappa(k, exp(x[1])) - end - end - end - # Testing kernel evaluations - x = rand(rng, dims[1]) - y = rand(rng, dims[1]) - compare_gradient(AD, x) do x - k(x, y) - end - compare_gradient(AD, y) do y - k(x, y) - end - if !(args === nothing) - compare_gradient(AD, args) do p - kernelfunction(p)(x,y) - end - end - # Testing kernel matrices - A = rand(rng, dims...) - B = rand(rng, dims...) - for dim in 1:2 - compare_gradient(AD, A) do a - testfunction(k, a, dim) - end - compare_gradient(AD, A) do a - testfunction(k, a, B, dim) - end - compare_gradient(AD, B) do b - testfunction(k, A, b, dim) - end - if !(args === nothing) - compare_gradient(AD, args) do p - testfunction(kernelfunction(p), A, dim) - end - end - end - end -end - end # module diff --git a/test/runtests.jl b/test/runtests.jl index 07fc97c1f..6b450470f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -12,7 +12,7 @@ import Zygote, ForwardDiff, ReverseDiff, FiniteDifferences using KernelFunctions: SimpleKernel, metric, kappa, ColVecs, RowVecs, TestUtils -using TestUtils: test_params, test_AD +using KernelFunctions.TestUtils: test_interface # Writing tests: # 1. The file structure of the test should match precisely the file structure of src. @@ -42,9 +42,12 @@ using TestUtils: test_params, test_AD # 9. List out all test files explicitly (eg. don't loop over them). This makes it easy to # disable tests by simply commenting them out, and makes it very clear which tests are not # currently being run. -# 10. If utility files are required. +# 10. If utility functionality is required, it should be placed in `src/test_utils.jl` so +# that other packages can benefit from it when implementing new kernels. @info "Packages Loaded" +include("test_utils.jl") + @testset "KernelFunctions" begin include("utils.jl") diff --git a/test/test_utils.jl b/test/test_utils.jl new file mode 100644 index 000000000..a94f1c54b --- /dev/null +++ b/test/test_utils.jl @@ -0,0 +1,162 @@ +# More test utilities. Can't be included in KernelFunctions because they introduce a number +# of additional deps that we don't want to have in the main package. + +# Check parameters of kernels + +function test_params(kernel, reference) + params_kernel = params(kernel) + params_reference = params(reference) + + @test length(params_kernel) == length(params_reference) + @test all(p == q for (p, q) in zip(params_kernel, params_reference)) +end + +# AD utilities + +const FDM = FiniteDifferences.central_fdm(5, 1) + +gradient(f, s::Symbol, args) = gradient(f, Val(s), args) + +function gradient(f, ::Val{:Zygote}, args) + g = first(Zygote.gradient(f, args)) + if isnothing(g) + if args isa AbstractArray{<:Real} + return zeros(size(args)) # To respect the same output as other ADs + else + return zeros.(size.(args)) + end + else + return g + end +end + +function gradient(f, ::Val{:ForwardDiff}, args) + ForwardDiff.gradient(f, args) +end + +function gradient(f, ::Val{:ReverseDiff}, args) + ReverseDiff.gradient(f, args) +end + +function gradient(f, ::Val{:FiniteDiff}, args) + first(FiniteDifferences.grad(FDM, f, args)) +end + +function compare_gradient(f, AD::Symbol, args) + grad_AD = gradient(f, AD, args) + grad_FD = gradient(f, :FiniteDiff, args) + @test grad_AD ≈ grad_FD atol=1e-8 rtol=1e-5 +end + +testfunction(k, A, B, dim) = sum(kernelmatrix(k, A, B, obsdim = dim)) +testfunction(k, A, dim) = sum(kernelmatrix(k, A, obsdim = dim)) + +function test_ADs(kernelfunction, args = nothing; ADs = [:Zygote, :ForwardDiff, :ReverseDiff], dims = [3, 3]) + test_fd = test_FiniteDiff(kernelfunction, args, dims) + if !test_fd.anynonpass + for AD in ADs + test_AD(AD, kernelfunction, args, dims) + end + end +end + +function test_FiniteDiff(kernelfunction, args = nothing, dims = [3, 3]) + # Init arguments : + k = if args === nothing + kernelfunction() + else + kernelfunction(args) + end + rng = MersenneTwister(42) + @testset "FiniteDifferences" begin + if k isa SimpleKernel + for d in log.([eps(), rand(rng)]) + @test_nowarn gradient(:FiniteDiff, [d]) do x + kappa(k, exp(first(x))) + end + end + end + ## Testing Kernel Functions + x = rand(rng, dims[1]) + y = rand(rng, dims[1]) + @test_nowarn gradient(:FiniteDiff, x) do x + k(x, y) + end + if !(args === nothing) + @test_nowarn gradient(:FiniteDiff, args) do p + kernelfunction(p)(x, y) + end + end + ## Testing Kernel Matrices + A = rand(rng, dims...) + B = rand(rng, dims...) + for dim in 1:2 + @test_nowarn gradient(:FiniteDiff, A) do a + testfunction(k, a, dim) + end + @test_nowarn gradient(:FiniteDiff , A) do a + testfunction(k, a, B, dim) + end + @test_nowarn gradient(:FiniteDiff, B) do b + testfunction(k, A, b, dim) + end + if !(args === nothing) + @test_nowarn gradient(:FiniteDiff, args) do p + testfunction(kernelfunction(p), A, B, dim) + end + end + end + end +end + +function test_AD(AD::Symbol, kernelfunction, args = nothing, dims = [3, 3]) + @testset "$(AD)" begin + # Test kappa function + k = if args === nothing + kernelfunction() + else + kernelfunction(args) + end + rng = MersenneTwister(42) + if k isa SimpleKernel + for d in log.([eps(), rand(rng)]) + compare_gradient(AD, [d]) do x + kappa(k, exp(x[1])) + end + end + end + # Testing kernel evaluations + x = rand(rng, dims[1]) + y = rand(rng, dims[1]) + compare_gradient(AD, x) do x + k(x, y) + end + compare_gradient(AD, y) do y + k(x, y) + end + if !(args === nothing) + compare_gradient(AD, args) do p + kernelfunction(p)(x,y) + end + end + # Testing kernel matrices + A = rand(rng, dims...) + B = rand(rng, dims...) + for dim in 1:2 + compare_gradient(AD, A) do a + testfunction(k, a, dim) + end + compare_gradient(AD, A) do a + testfunction(k, a, B, dim) + end + compare_gradient(AD, B) do b + testfunction(k, A, b, dim) + end + if !(args === nothing) + compare_gradient(AD, args) do p + testfunction(kernelfunction(p), A, dim) + end + end + end + end +end From e821f1a308432211d24133f8c74b9b7fb448c6a4 Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 21:24:24 +0100 Subject: [PATCH 19/35] Remove dead space --- src/KernelFunctions.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/KernelFunctions.jl b/src/KernelFunctions.jl index 3990f584c..885c158d7 100644 --- a/src/KernelFunctions.jl +++ b/src/KernelFunctions.jl @@ -16,7 +16,7 @@ if !isfile(joinpath(@__DIR__, "update_v0.8.0")) ) touch(joinpath(@__DIR__, "update_v0.8.0")) end - + export kernelmatrix, kernelmatrix!, kerneldiagmatrix, kerneldiagmatrix! export transform export duplicate, set! # Helpers From 09efe1af6c01a2add95711866ffe0f4a1a431aeb Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 21:26:30 +0100 Subject: [PATCH 20/35] Fix rational quadratic parameter test --- src/basekernels/rationalquad.jl | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index 519de706e..9d79b684b 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -3,21 +3,25 @@ The rational-quadratic kernel is a Mercer kernel given by the formula: ``` - κ(x,y)=(1+||x−y||²/α)^(-α) + κ(x,y)=(1 + ||x − y||² / α)^(-α) ``` -where `α` is a shape parameter of the Euclidean distance. Check [`GammaRationalQuadraticKernel`](@ref) for a generalization. +where `α` is a shape parameter of the Euclidean distance. Check +[`GammaRationalQuadraticKernel`](@ref) for a generalization. """ struct RationalQuadraticKernel{Tα<:Real} <: SimpleKernel α::Vector{Tα} function RationalQuadraticKernel(;alpha::T=2.0, α::T=alpha) where {T} - @check_args(RationalQuadraticKernel, α, α > zero(T), "α > 1") + @check_args(RationalQuadraticKernel, α, α > one(T), "α > 1") return new{T}([α]) end end @functor RationalQuadraticKernel -kappa(κ::RationalQuadraticKernel, d²::T) where {T<:Real} = (one(T)+d²/first(κ.α))^(-first(κ.α)) +function kappa(κ::RationalQuadraticKernel, d²::T) where {T<:Real} + return (one(T) + d² / first(κ.α))^(-first(κ.α)) +end + metric(::RationalQuadraticKernel) = SqEuclidean() Base.show(io::IO, κ::RationalQuadraticKernel) = print(io, "Rational Quadratic Kernel (α = ", first(κ.α), ")") From 7eaae6453fc2deaafb804ecc53e698545ba4072a Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 21:35:46 +0100 Subject: [PATCH 21/35] Fix some style issues --- src/basekernels/rationalquad.jl | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index 9d79b684b..983c0da8c 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -1,9 +1,9 @@ """ - RationalQuadraticKernel(; α = 2.0) + RationalQuadraticKernel(; α=2.0) The rational-quadratic kernel is a Mercer kernel given by the formula: ``` - κ(x,y)=(1 + ||x − y||² / α)^(-α) + κ(x, y) = (1 + ||x − y||² / α)^(-α) ``` where `α` is a shape parameter of the Euclidean distance. Check [`GammaRationalQuadraticKernel`](@ref) for a generalization. @@ -24,20 +24,25 @@ end metric(::RationalQuadraticKernel) = SqEuclidean() -Base.show(io::IO, κ::RationalQuadraticKernel) = print(io, "Rational Quadratic Kernel (α = ", first(κ.α), ")") +function Base.show(io::IO, κ::RationalQuadraticKernel) + print(io, "Rational Quadratic Kernel (α = $(first(κ.α)))") +end """ -`GammaRationalQuadraticKernel([ρ=1.0[,α=2.0[,γ=2.0]]])` +`GammaRationalQuadraticKernel([α=2.0 [, γ=2.0]])` + The Gamma-rational-quadratic kernel is an isotropic Mercer kernel given by the formula: ``` - κ(x,y)=(1+ρ^(2γ)||x−y||^(2γ)/α)^(-α) + κ(x, y) = (1 + ||x−y||^(2γ) / α)^(-α) ``` where `α` is a shape parameter of the Euclidean distance and `γ` is another shape parameter. """ struct GammaRationalQuadraticKernel{Tα<:Real, Tγ<:Real} <: SimpleKernel α::Vector{Tα} γ::Vector{Tγ} - function GammaRationalQuadraticKernel(;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma) where {Tα<:Real, Tγ<:Real} + function GammaRationalQuadraticKernel( + ;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma, + ) where {Tα<:Real, Tγ<:Real} @check_args(GammaRationalQuadraticKernel, α, α > one(Tα), "α > 1") @check_args(GammaRationalQuadraticKernel, γ, γ >= one(Tγ), "γ >= 1") return new{Tα, Tγ}([α], [γ]) @@ -46,7 +51,12 @@ end @functor GammaRationalQuadraticKernel -kappa(κ::GammaRationalQuadraticKernel, d²::T) where {T<:Real} = (one(T)+d²^first(κ.γ)/first(κ.α))^(-first(κ.α)) +function kappa(κ::GammaRationalQuadraticKernel, d²::Real) + return (one(d²) + d²^first(κ.γ) / first(κ.α))^(-first(κ.α)) +end + metric(::GammaRationalQuadraticKernel) = SqEuclidean() -Base.show(io::IO, κ::GammaRationalQuadraticKernel) = print(io, "Gamma Rational Quadratic Kernel (α = ", first(κ.α), ", γ = ", first(κ.γ), ")") +function Base.show(io::IO, κ::GammaRationalQuadraticKernel) + print(io, "Gamma Rational Quadratic Kernel (α = $(first(κ.α)), γ = $(first(κ.γ)))") +end From 13772f9b80b944e3bcf1a10f45b100fc5838a425 Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 21:42:07 +0100 Subject: [PATCH 22/35] Add extra parameter check --- src/basekernels/rationalquad.jl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index 983c0da8c..a302b1f00 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -33,7 +33,7 @@ end The Gamma-rational-quadratic kernel is an isotropic Mercer kernel given by the formula: ``` - κ(x, y) = (1 + ||x−y||^(2γ) / α)^(-α) + κ(x, y) = (1 + ||x−y||^γ / α)^(-α) ``` where `α` is a shape parameter of the Euclidean distance and `γ` is another shape parameter. """ @@ -44,7 +44,8 @@ struct GammaRationalQuadraticKernel{Tα<:Real, Tγ<:Real} <: SimpleKernel ;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma, ) where {Tα<:Real, Tγ<:Real} @check_args(GammaRationalQuadraticKernel, α, α > one(Tα), "α > 1") - @check_args(GammaRationalQuadraticKernel, γ, γ >= one(Tγ), "γ >= 1") + @check_args(GammaRationalQuadraticKernel, γ, γ <= 2, "γ <= 2") + @check_args(GammaRationalQuadraticKernel, γ, γ > zero(Tγ), "γ > 0") return new{Tα, Tγ}([α], [γ]) end end @@ -52,7 +53,7 @@ end @functor GammaRationalQuadraticKernel function kappa(κ::GammaRationalQuadraticKernel, d²::Real) - return (one(d²) + d²^first(κ.γ) / first(κ.α))^(-first(κ.α)) + return (one(d²) + d²^(first(κ.γ) / 2) / first(κ.α))^(-first(κ.α)) end metric(::GammaRationalQuadraticKernel) = SqEuclidean() From 7a7fdf1dd7c320325d4e1690d9bc02a93d3a2345 Mon Sep 17 00:00:00 2001 From: willtebbutt Date: Tue, 22 Sep 2020 21:51:13 +0100 Subject: [PATCH 23/35] Update src/basekernels/rationalquad.jl Co-authored-by: David Widmann --- src/basekernels/rationalquad.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index a302b1f00..be4fbe5ed 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -44,8 +44,7 @@ struct GammaRationalQuadraticKernel{Tα<:Real, Tγ<:Real} <: SimpleKernel ;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma, ) where {Tα<:Real, Tγ<:Real} @check_args(GammaRationalQuadraticKernel, α, α > one(Tα), "α > 1") - @check_args(GammaRationalQuadraticKernel, γ, γ <= 2, "γ <= 2") - @check_args(GammaRationalQuadraticKernel, γ, γ > zero(Tγ), "γ > 0") + @check_args(GammaRationalQuadraticKernel, γ, zero(γ) < γ <= 2 * one(γ), "0 < γ <= 2") return new{Tα, Tγ}([α], [γ]) end end From c8965ac55578873b3addb7b6c6cbf8ed1b9b59e2 Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 21:53:56 +0100 Subject: [PATCH 24/35] Tweak check --- src/basekernels/rationalquad.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index be4fbe5ed..7636dccfa 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -44,7 +44,7 @@ struct GammaRationalQuadraticKernel{Tα<:Real, Tγ<:Real} <: SimpleKernel ;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma, ) where {Tα<:Real, Tγ<:Real} @check_args(GammaRationalQuadraticKernel, α, α > one(Tα), "α > 1") - @check_args(GammaRationalQuadraticKernel, γ, zero(γ) < γ <= 2 * one(γ), "0 < γ <= 2") + @check_args(GammaRationalQuadraticKernel, γ, zero(γ) < γ <= 2, "0 < γ <= 2") return new{Tα, Tγ}([α], [γ]) end end From cc559eb05c0494c1f7295452687919ab82dcfc79 Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 22:34:10 +0100 Subject: [PATCH 25/35] Fix RQ convention to match EQ --- src/basekernels/rationalquad.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index 7636dccfa..9788958de 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -19,7 +19,7 @@ end @functor RationalQuadraticKernel function kappa(κ::RationalQuadraticKernel, d²::T) where {T<:Real} - return (one(T) + d² / first(κ.α))^(-first(κ.α)) + return (one(T) + d² / (2 * first(κ.α)))^(-first(κ.α)) end metric(::RationalQuadraticKernel) = SqEuclidean() From 8c079a5113195b2df96622498e918b21a1348cda Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 22:34:28 +0100 Subject: [PATCH 26/35] Refactor tests --- test/basekernels/rationalquad.jl | 68 ++++++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/test/basekernels/rationalquad.jl b/test/basekernels/rationalquad.jl index 0f2a27bab..90b877308 100644 --- a/test/basekernels/rationalquad.jl +++ b/test/basekernels/rationalquad.jl @@ -3,13 +3,19 @@ x = rand(rng)*2 v1 = rand(rng, 3) v2 = rand(rng, 3) + @testset "RationalQuadraticKernel" begin α = 2.0 k = RationalQuadraticKernel(α=α) - @test RationalQuadraticKernel(alpha=α).α == [α] - @test kappa(k,x) ≈ (1.0+x/2.0)^-2 - @test k(v1,v2) ≈ (1.0+norm(v1-v2)^2/2.0)^-2 - @test kappa(RationalQuadraticKernel(α=α),x) == kappa(k,x) + + @testset "RQ ≈ EQ for large α" begin + @test isapprox( + RationalQuadraticKernel(α=1e9)(v1, v2), + SqExponentialKernel()(v1, v2); + atol=1e-6, rtol=1e-6, + ) + end + @test metric(RationalQuadraticKernel()) == SqEuclidean() @test metric(RationalQuadraticKernel(α=2.0)) == SqEuclidean() @test repr(k) == "Rational Quadratic Kernel (α = $(α))" @@ -19,16 +25,55 @@ test_ADs(x->RationalQuadraticKernel(alpha=x[1]),[α]) test_params(k, ([α],)) end + @testset "GammaRationalQuadraticKernel" begin k = GammaRationalQuadraticKernel() - @test kappa(k,x) ≈ (1.0+x^2.0/2.0)^-2 - @test k(v1,v2) ≈ (1.0+norm(v1-v2)^4.0/2.0)^-2 - @test kappa(GammaRationalQuadraticKernel(),x) == kappa(k,x) - a = 1.0 + rand() - @test GammaRationalQuadraticKernel(alpha=a).α == [a] + @test repr(k) == "Gamma Rational Quadratic Kernel (α = 2.0, γ = 2.0)" - #Coherence test - @test kappa(GammaRationalQuadraticKernel(α=a, γ=1.0), x) ≈ kappa(RationalQuadraticKernel(α=a), x) + + @testset "Default GammaRQ ≈ RQ for large α with rescaled inputs" begin + @test isapprox( + GammaRationalQuadraticKernel()(v1 ./ sqrt(2), v2 ./ sqrt(2)), + RationalQuadraticKernel()(v1, v2), + ) + a = 1.0 + rand() + @test isapprox( + GammaRationalQuadraticKernel(α=a)(v1 ./ sqrt(2), v2 ./ sqrt(2)), + RationalQuadraticKernel(α=a)(v1, v2), + ) + end + + @testset "GammaRQ ≈ EQ for large α with rescaled inputs" begin + v1 = randn(2) + v2 = randn(2) + @test isapprox( + GammaRationalQuadraticKernel(α=1e9)(v1 ./ sqrt(2), v2 ./ sqrt(2)), + SqExponentialKernel()(v1, v2); + atol=1e-6, rtol=1e-6, + ) + end + + @testset "GammaRQ(γ=1) ≈ Exponential with rescaled inputs for large α" begin + v1 = randn(4) + v2 = randn(4) + @test isapprox( + GammaRationalQuadraticKernel(α=1e9, γ=1.0)(v1, v2), + ExponentialKernel()(v1, v2); + atol=1e-6, rtol=1e-6, + ) + end + + @testset "GammaRQ ≈ GammaExponential for same γ and large α" begin + v1 = randn(3) + v2 = randn(3) + γ = rand() + 0.5 + @test isapprox( + GammaRationalQuadraticKernel(α=1e9, γ=γ)(v1, v2), + GammaExponentialKernel(γ=γ)(v1, v2); + atol=1e-6, rtol=1e-6, + ) + end + @test metric(GammaRationalQuadraticKernel()) == SqEuclidean() @test metric(GammaRationalQuadraticKernel(γ=2.0)) == SqEuclidean() @test metric(GammaRationalQuadraticKernel(γ=2.0, α=3.0)) == SqEuclidean() @@ -37,6 +82,7 @@ TestUtils.test_interface(k, Float64) # test_ADs(x->GammaRationalQuadraticKernel(α=x[1], γ=x[2]), [a, 2.0]) @test_broken "All (problem with power operation)" + a = 1.0 + rand() test_params(GammaRationalQuadraticKernel(; α=a, γ=x), ([a], [x])) end end From dbd0c164460a1809f7481793b28c3f7afc14da72 Mon Sep 17 00:00:00 2001 From: wt Date: Tue, 22 Sep 2020 22:38:56 +0100 Subject: [PATCH 27/35] Fix nn issues --- src/basekernels/nn.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/basekernels/nn.jl b/src/basekernels/nn.jl index be1d35361..afc9d5a24 100644 --- a/src/basekernels/nn.jl +++ b/src/basekernels/nn.jl @@ -42,13 +42,13 @@ function kernelmatrix(::NeuralNetworkKernel, x::RowVecs, y::RowVecs) X_2 = sum(x.X .* x.X; dims=2) Y_2 = sum(y.X .* y.X; dims=2) XY = x.X * y.X' - return asin.(XY ./ sqrt.((X_2 .+ 1)' * (Y_2 .+ 1))) + return asin.(XY ./ sqrt.((X_2 .+ 1) * (Y_2 .+ 1)')) end function kernelmatrix(::NeuralNetworkKernel, x::RowVecs) X_2_1 = sum(x.X .* x.X; dims=2) .+ 1 XX = x.X * x.X' - return asin.(XX ./ sqrt.(X_2_1' * X_2_1)) + return asin.(XX ./ sqrt.(X_2_1 * X_2_1')) end Base.show(io::IO, κ::NeuralNetworkKernel) = print(io, "Neural Network Kernel") From efb18bea874f195f438974e01eed3db18ac51942 Mon Sep 17 00:00:00 2001 From: wt Date: Wed, 23 Sep 2020 17:36:23 +0100 Subject: [PATCH 28/35] Fix weird printing issue --- src/basekernels/periodic.jl | 4 +++- test/basekernels/periodic.jl | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/basekernels/periodic.jl b/src/basekernels/periodic.jl index 63140f061..6c19518e8 100644 --- a/src/basekernels/periodic.jl +++ b/src/basekernels/periodic.jl @@ -26,4 +26,6 @@ metric(κ::PeriodicKernel) = Sinus(κ.r) kappa(κ::PeriodicKernel, d::Real) = exp(- 0.5d) -Base.show(io::IO, κ::PeriodicKernel) = print(io, "Periodic Kernel (length(r) = ", length(κ.r), ")") +function Base.show(io::IO, κ::PeriodicKernel) + print(io, "Periodic Kernel, length(r) = $(length(κ.r))") +end diff --git a/test/basekernels/periodic.jl b/test/basekernels/periodic.jl index ce373be94..b41ef88b2 100644 --- a/test/basekernels/periodic.jl +++ b/test/basekernels/periodic.jl @@ -8,7 +8,7 @@ @test k(v1, v2) ≈ exp(-0.5 * sum(abs2, sinpi.(v1 - v2) ./ r)) @test k(v1, v2) == k(v2, v1) @test PeriodicKernel(3)(v1, v2) == PeriodicKernel(r = ones(3))(v1, v2) - @test repr(k) == "Periodic Kernel, length(r) = $(length(r)))" + @test repr(k) == "Periodic Kernel, length(r) = $(length(r))" # Standardised tests. TestUtils.test_interface(PeriodicKernel(r=[0.9]), Vector{Float64}) From 93ec40d9577ffc1e6754e8f056f0cec20ac2e427 Mon Sep 17 00:00:00 2001 From: willtebbutt Date: Thu, 24 Sep 2020 12:53:28 +0100 Subject: [PATCH 29/35] Update src/test_utils.jl Co-authored-by: David Widmann --- src/test_utils.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test_utils.jl b/src/test_utils.jl index 9e70653ee..75f45c5ec 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -68,7 +68,7 @@ function test_interface( @test kerneldiagmatrix(k, x0) ≈ diag(kernelmatrix(k, x0)) atol=atol # Check that unary pairwise produces a positive definite matrix (approximately). - @test all(eigvals(Matrix(kernelmatrix(k, x0))) .> -atol) + @test eigmin(Matrix(kernelmatrix(k, x0))) > -atol # Check that unary elementwise / pairwise are consistent with the binary versions. # @test kerneldiagmatrix(k, x0) ≈ kerneldiagmatrix(k, x0, x0) atol=atol From c7f2490480643ba3c8434b131905fa9e3d5d2b0b Mon Sep 17 00:00:00 2001 From: willtebbutt Date: Thu, 24 Sep 2020 16:18:53 +0100 Subject: [PATCH 30/35] Update test/kernels/kernelsum.jl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Galy-Fajou --- test/kernels/kernelsum.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kernels/kernelsum.jl b/test/kernels/kernelsum.jl index 13aaa2143..3cd59a5b0 100644 --- a/test/kernels/kernelsum.jl +++ b/test/kernels/kernelsum.jl @@ -35,7 +35,7 @@ # Standardised tests. TestUtils.test_interface(k, Float64) test_ADs( - x->KernelSum(SqExponentialKernel(),LinearKernel(c= x[1])), rand(1); + x->KernelSum(SqExponentialKernel(), LinearKernel(c=x[1])), rand(1); ADs = [:ForwardDiff, :ReverseDiff, :Zygote], ) From bc345f620a379fe6689f07c799837ab72495c0b7 Mon Sep 17 00:00:00 2001 From: willtebbutt Date: Thu, 24 Sep 2020 16:21:11 +0100 Subject: [PATCH 31/35] Update src/test_utils.jl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Théo Galy-Fajou --- src/test_utils.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test_utils.jl b/src/test_utils.jl index 75f45c5ec..996cea498 100644 --- a/src/test_utils.jl +++ b/src/test_utils.jl @@ -128,7 +128,7 @@ function test_interface(rng::AbstractRNG, k::Kernel, T::Type{<:Real}; kwargs...) test_interface(rng, k, RowVecs{T}; kwargs...) end -function test_interface(k::Kernel, T::Type{<:Real}; kwargs...) +function test_interface(k::Kernel, T::Type{<:Real}=Float64; kwargs...) test_interface(Random.GLOBAL_RNG, k, T; kwargs...) end From ab492c0c4c9e4e1fcc8e3715a7349efef6d0229b Mon Sep 17 00:00:00 2001 From: wt Date: Thu, 24 Sep 2020 16:32:09 +0100 Subject: [PATCH 32/35] Test FBM kernel --- test/basekernels/fbm.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/basekernels/fbm.jl b/test/basekernels/fbm.jl index e81ed0854..5f651cce0 100644 --- a/test/basekernels/fbm.jl +++ b/test/basekernels/fbm.jl @@ -7,7 +7,7 @@ @test k(v1,v2) ≈ (sqeuclidean(v1, zero(v1))^h + sqeuclidean(v2, zero(v2))^h - sqeuclidean(v1-v2, zero(v1-v2))^h)/2 atol=1e-5 @test repr(k) == "Fractional Brownian Motion Kernel (h = $(h))" - + test_interface(k) @test repr(k) == "Fractional Brownian Motion Kernel (h = $(h))" test_ADs(FBMKernel, ADs = [:ReverseDiff, :Zygote]) @test_broken "Tests failing for kernelmatrix(k, x) for ForwardDiff" From e2bb5b54bc812377c886ecca0b4c915c44a91508 Mon Sep 17 00:00:00 2001 From: wt Date: Thu, 24 Sep 2020 16:41:00 +0100 Subject: [PATCH 33/35] Fix up Gabor --- src/basekernels/gabor.jl | 15 ++++----------- test/basekernels/gabor.jl | 2 ++ 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/basekernels/gabor.jl b/src/basekernels/gabor.jl index bf3cf115e..a72f4eb8f 100644 --- a/src/basekernels/gabor.jl +++ b/src/basekernels/gabor.jl @@ -57,17 +57,10 @@ end Base.show(io::IO, κ::GaborKernel) = print(io, "Gabor Kernel (ell = ", κ.ell, ", p = ", κ.p, ")") -function kernelmatrix(κ::GaborKernel, X::AbstractMatrix; obsdim::Int=defaultobs) - return kernelmatrix(κ.kernel, X; obsdim=obsdim) -end +kernelmatrix(κ::GaborKernel, x::AbstractVector) = kernelmatrix(κ.kernel, x) -function kernelmatrix( - κ::GaborKernel, X::AbstractMatrix, Y::AbstractMatrix; - obsdim::Int=defaultobs, -) - return kernelmatrix(κ.kernel, X, Y; obsdim=obsdim) +function kernelmatrix(κ::GaborKernel, x::AbstractVector, y::AbstractVector) + return kernelmatrix(κ.kernel, x, y) end -function kerneldiagmatrix(κ::GaborKernel, X::AbstractMatrix; obsdim::Int=defaultobs) #TODO Add test - return kerneldiagmatrix(κ.kernel, X; obsdim=obsdim) -end +kerneldiagmatrix(κ::GaborKernel, x::AbstractVector) = kerneldiagmatrix(κ.kernel, x) diff --git a/test/basekernels/gabor.jl b/test/basekernels/gabor.jl index 6efe50f2a..2d8ccfc94 100644 --- a/test/basekernels/gabor.jl +++ b/test/basekernels/gabor.jl @@ -19,6 +19,8 @@ @test k.p ≈ 1.0 atol=1e-5 @test repr(k) == "Gabor Kernel (ell = 1.0, p = 1.0)" + test_interface(k) + test_ADs(x -> GaborKernel(ell = x[1], p = x[2]), [ell, p], ADs = [:Zygote]) # Tests are also failing randomly for ForwardDiff and ReverseDiff but randomly From ef044ea8612e13d266f678e84ec7f22afb13be54 Mon Sep 17 00:00:00 2001 From: wt Date: Fri, 25 Sep 2020 18:44:46 +0100 Subject: [PATCH 34/35] Loosen dof bound --- src/basekernels/rationalquad.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/basekernels/rationalquad.jl b/src/basekernels/rationalquad.jl index 9788958de..f59bccd56 100644 --- a/src/basekernels/rationalquad.jl +++ b/src/basekernels/rationalquad.jl @@ -3,7 +3,7 @@ The rational-quadratic kernel is a Mercer kernel given by the formula: ``` - κ(x, y) = (1 + ||x − y||² / α)^(-α) + κ(x, y) = (1 + ||x − y||² / (2α))^(-α) ``` where `α` is a shape parameter of the Euclidean distance. Check [`GammaRationalQuadraticKernel`](@ref) for a generalization. @@ -11,7 +11,7 @@ where `α` is a shape parameter of the Euclidean distance. Check struct RationalQuadraticKernel{Tα<:Real} <: SimpleKernel α::Vector{Tα} function RationalQuadraticKernel(;alpha::T=2.0, α::T=alpha) where {T} - @check_args(RationalQuadraticKernel, α, α > one(T), "α > 1") + @check_args(RationalQuadraticKernel, α, α > zero(T), "α > 0") return new{T}([α]) end end @@ -43,7 +43,7 @@ struct GammaRationalQuadraticKernel{Tα<:Real, Tγ<:Real} <: SimpleKernel function GammaRationalQuadraticKernel( ;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma, ) where {Tα<:Real, Tγ<:Real} - @check_args(GammaRationalQuadraticKernel, α, α > one(Tα), "α > 1") + @check_args(GammaRationalQuadraticKernel, α, α > zero(Tα), "α > 0") @check_args(GammaRationalQuadraticKernel, γ, zero(γ) < γ <= 2, "0 < γ <= 2") return new{Tα, Tγ}([α], [γ]) end From e63002e653f349a120cee3781bd527b6812f3000 Mon Sep 17 00:00:00 2001 From: wt Date: Fri, 25 Sep 2020 19:45:41 +0100 Subject: [PATCH 35/35] Perturb test --- test/basekernels/piecewisepolynomial.jl | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/test/basekernels/piecewisepolynomial.jl b/test/basekernels/piecewisepolynomial.jl index adaea47e6..617dffefe 100644 --- a/test/basekernels/piecewisepolynomial.jl +++ b/test/basekernels/piecewisepolynomial.jl @@ -1,9 +1,8 @@ @testset "piecewisepolynomial" begin - v1 = rand(3) - v2 = rand(3) - m1 = rand(3, 4) - m2 = rand(3, 4) - maha = Matrix{Float64}(I, 3, 3) + D = 2 + v1 = rand(D) + v2 = rand(D) + maha = Matrix{Float64}(I, D, D) v = 3 k = PiecewisePolynomialKernel{v}(maha) @@ -16,8 +15,8 @@ @test repr(k) == "Piecewise Polynomial Kernel (v = $(v), size(maha) = $(size(maha)))" # Standardised tests. - TestUtils.test_interface(k, ColVecs{Float64}; dim_in=3) - TestUtils.test_interface(k, RowVecs{Float64}; dim_in=3) + TestUtils.test_interface(k, ColVecs{Float64}; dim_in=2) + TestUtils.test_interface(k, RowVecs{Float64}; dim_in=2) # test_ADs(maha-> PiecewisePolynomialKernel(v=2, maha = maha), maha) @test_broken "Nothing passes (problem with Mahalanobis distance in Distances)"