Skip to content

Commit 53a494c

Browse files
tmigotamontoison
andauthored
Add GPU tests (#222)
* Add GPU tests * Add CUDA version compatible with 1.6 * Update the GPU tests to use buildkite --------- Co-authored-by: Alexis Montoison <[email protected]>
1 parent be41bb6 commit 53a494c

26 files changed

+161
-95
lines changed

.buildkite/pipeline.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,6 @@ steps:
77
queue: "juliagpu"
88
cuda: "*"
99
command: |
10-
julia --color=yes --project -e 'using Pkg; Pkg.add("CUDA"); Pkg.instantiate(); using CUDA'
10+
julia --color=yes --project -e 'using Pkg; Pkg.add("CUDA"); Pkg.add("NLPModels"); Pkg.add("NLPModelsTest"); Pkg.instantiate()'
1111
julia --color=yes --project -e 'include("test/gpu.jl")'
1212
timeout_in_minutes: 30

src/ad_api.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -139,10 +139,10 @@ Jtprod!(nlp::AbstractNLPModel, Jtv, c, x, v, ::Val{:F}) = jtprod_residual!(nlp,
139139
function Hvprod!(nlp::AbstractNLPModel, Hv, x, v, ℓ, ::Val{:obj}, obj_weight)
140140
return hprod!(nlp, x, v, Hv, obj_weight = obj_weight)
141141
end
142-
function Hvprod!(nlp::AbstractNLPModel, Hv, x, v, ℓ, ::Val{:lag}, y, obj_weight)
142+
function Hvprod!(nlp::AbstractNLPModel, Hv, x::S, v, ℓ, ::Val{:lag}, y, obj_weight) where {S}
143143
if nlp.meta.nlin > 0
144144
# y is of length nnln, and hprod expectes ncon...
145-
yfull = zeros(eltype(x), nlp.meta.ncon)
145+
yfull = fill!(S(undef, nlp.meta.ncon), 0)
146146
k = 0
147147
for i in nlp.meta.nln
148148
k += 1
@@ -200,14 +200,14 @@ end
200200
function NLPModels.hess_coord!(
201201
nlp::AbstractNLPModel,
202202
::ADModel,
203-
x::AbstractVector,
203+
x::S,
204204
y::AbstractVector,
205205
obj_weight::Real,
206206
vals::AbstractVector,
207-
)
207+
) where {S}
208208
if nlp.meta.nlin > 0
209209
# y is of length nnln, and hess expectes ncon...
210-
yfull = zeros(eltype(x), nlp.meta.ncon)
210+
yfull = fill!(S(undef, nlp.meta.ncon), 0)
211211
k = 0
212212
for i in nlp.meta.nln
213213
k += 1

src/forward.jl

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -195,9 +195,10 @@ function ForwardDiffADHvprod(
195195
f,
196196
ncon::Integer = 0,
197197
c!::Function = (args...) -> [];
198-
x0::AbstractVector{T} = rand(nvar),
198+
x0::S = rand(nvar),
199199
kwargs...,
200-
) where {T}
200+
) where {S}
201+
T = eltype(S)
201202
function lag(z; nvar = nvar, ncon = ncon, f = f, c! = c!)
202203
cx, x, y, ob = view(z, 1:ncon),
203204
view(z, (ncon + 1):(nvar + ncon)),
@@ -221,8 +222,8 @@ function ForwardDiffADHvprod(
221222
ForwardDiff.gradient!(gz, lag, z, cfg)
222223
return gz
223224
end
224-
longv = zeros(T, ntotal)
225-
Hvp = zeros(T, ntotal)
225+
longv = fill!(S(undef, ntotal), 0)
226+
Hvp = fill!(S(undef, ntotal), 0)
226227

227228
# unconstrained Hessian
228229
tagf = ForwardDiff.Tag{typeof(f), T}

src/sparse_diff_tools.jl

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,17 @@
1313
f,
1414
ncon,
1515
c!;
16-
x0::AbstractVector{T} = rand(nvar),
16+
x0::S = rand(nvar),
1717
alg::SparseDiffTools.SparseDiffToolsColoringAlgorithm = SparseDiffTools.GreedyD1Color(),
1818
kwargs...,
19-
) where {T}
19+
) where {S}
20+
T = eltype(S)
2021
output = similar(x0, ncon)
2122
J = compute_jacobian_sparsity(c!, output, x0)
2223
colors = sparse_matrix_colors(J, alg)
2324
jac = SparseMatrixCSC{T, Int}(J.m, J.n, J.colptr, J.rowval, T.(J.nzval))
2425

25-
dx = zeros(T, ncon)
26+
dx = fill!(S(undef, ncon), 0)
2627
cfJ = SparseDiffTools.ForwardColorJacCache(c!, x0, colorvec = colors, dx = dx, sparsity = jac)
2728
SDTSparseADJacobian(cfJ)
2829
end

src/sparse_hessian.jl

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,13 @@ function SparseADHessian(
1919
f,
2020
ncon,
2121
c!;
22-
x0::AbstractVector{T} = rand(nvar),
22+
x0::S = rand(nvar),
2323
alg = ColPackColoration(),
2424
kwargs...,
25-
) where {T}
26-
S = compute_hessian_sparsity(f, nvar, c!, ncon)
27-
H = ncon == 0 ? S : S[1:nvar, 1:nvar]
25+
) where {S}
26+
T = eltype(S)
27+
Hs = compute_hessian_sparsity(f, nvar, c!, ncon)
28+
H = ncon == 0 ? Hs : Hs[1:nvar, 1:nvar]
2829

2930
colors = sparse_matrix_colors(H, alg)
3031
ncolors = maximum(colors)
@@ -59,10 +60,9 @@ function SparseADHessian(
5960
ForwardDiff.gradient!(gz, lag, z, cfg)
6061
return gz
6162
end
62-
longv = zeros(T, ntotal)
63-
Hvp = zeros(T, ntotal)
64-
65-
y = zeros(T, ncon)
63+
longv = fill!(S(undef, ntotal), 0)
64+
Hvp = fill!(S(undef, ntotal), 0)
65+
y = fill!(S(undef, ncon), 0)
6666

6767
return SparseADHessian(d, rowval, colptr, colors, ncolors, res, lz, glz, sol, longv, Hvp, ∇φ!, y)
6868
end
@@ -95,8 +95,8 @@ function SparseReverseADHessian(
9595
alg = ColPackColoration(),
9696
kwargs...,
9797
) where {T}
98-
S = compute_hessian_sparsity(f, nvar, c!, ncon)
99-
H = ncon == 0 ? S : S[1:nvar, 1:nvar]
98+
Hs = compute_hessian_sparsity(f, nvar, c!, ncon)
99+
H = ncon == 0 ? Hs : Hs[1:nvar, 1:nvar]
100100

101101
colors = sparse_matrix_colors(H, alg)
102102
ncolors = maximum(colors)
@@ -138,7 +138,7 @@ function SparseReverseADHessian(
138138
end
139139
Hv_temp = similar(x0)
140140

141-
y = zeros(T, ncon)
141+
y = similar(x0, ncon)
142142
return SparseReverseADHessian(
143143
d,
144144
rowval,

src/sparse_sym.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,9 @@ function SparseSymbolicsADHessian(
102102
f,
103103
ncon,
104104
c!;
105-
x0::AbstractVector{T} = rand(nvar),
105+
x0::S = rand(nvar),
106106
kwargs...,
107-
) where {T}
107+
) where {S}
108108
Symbolics.@variables xs[1:nvar], μs
109109
xsi = Symbolics.scalarize(xs)
110110
fun = μs * f(xsi)
@@ -122,7 +122,7 @@ function SparseSymbolicsADHessian(
122122
# cfH is a Tuple{Expr, Expr}, cfH[2] is the in-place function
123123
# that we need to update a vector `vals` with the nonzeros of ∇²ℓ(x, y, μ).
124124
cfH = Symbolics.build_function(vals, xsi, ysi, μs, expression = Val{false})
125-
y = zeros(T, ncon)
125+
y = fill!(S(undef, ncon), 0)
126126
return SparseSymbolicsADHessian(nnzh, rows, cols, y, cfH[2])
127127
end
128128

test/Project.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
[deps]
2+
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
23
Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9"
34
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
45
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
@@ -14,6 +15,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
1415
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
1516

1617
[compat]
18+
CUDA = "4, 5"
1719
Enzyme = "0.10, 0.11, 0.12"
1820
ForwardDiff = "0.10"
1921
ManualNLPModels = "0.1"

test/gpu.jl

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,31 @@
1-
using CUDA, Test
1+
using CUDA, LinearAlgebra, SparseArrays, Test
2+
using ADNLPModels, NLPModels, NLPModelsTest
3+
4+
for problem in NLPModelsTest.nlp_problems ["GENROSE"]
5+
include("nlp/problems/$(lowercase(problem)).jl")
6+
end
7+
for problem in NLPModelsTest.nls_problems
8+
include("nls/problems/$(lowercase(problem)).jl")
9+
end
210

311
@test CUDA.functional()
12+
13+
@testset "Checking NLPModelsTest (NLP) tests with $backend - GPU multiple precision" for backend in keys(ADNLPModels.predefined_backend)
14+
@testset "Checking GPU multiple precision on problem $problem" for problem in NLPModelsTest.nlp_problems
15+
nlp_from_T = eval(Meta.parse(lowercase(problem) * "_autodiff"))
16+
CUDA.allowscalar() do
17+
# sparse Jacobian/Hessian doesn't work here
18+
multiple_precision_nlp_array(T -> nlp_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jth_hprod, hprod, jprod], linear_api = true)
19+
end
20+
end
21+
end
22+
23+
@testset "Checking NLPModelsTest (NLS) tests with $backend - GPU multiple precision" for backend in keys(ADNLPModels.predefined_backend)
24+
@testset "Checking GPU multiple precision on problem $problem" for problem in NLPModelsTest.nls_problems
25+
nls_from_T = eval(Meta.parse(lowercase(problem) * "_autodiff"))
26+
CUDA.allowscalar() do
27+
# sparse Jacobian/Hessian doesn't work here
28+
multiple_precision_nls_array(T -> nls_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian, jacobian_residual_backend = ADNLPModels.ForwardDiffADJacobian, hessian_residual_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jprod, jprod_residual, hprod_residual], linear_api = true)
29+
end
30+
end
31+
end

test/nlp/nlpmodelstest.jl

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,14 @@
1818
@testset "Check multiple precision" begin
1919
multiple_precision_nlp(nlp_from_T, exclude = [], linear_api = true)
2020
end
21+
@testset "Check multiple precision GPU" begin
22+
if CUDA.functional()
23+
CUDA.allowscalar() do
24+
# sparse Jacobian/Hessian doesn't work here
25+
multiple_precision_nlp_array(T -> nlp_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jth_hprod, hprod, jprod], linear_api = true)
26+
end
27+
end
28+
end
2129
@testset "Check view subarray" begin
2230
view_subarray_nlp(nlp_ad, exclude = [])
2331
end

test/nlp/problems/brownden.jl

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
export brownden_autodiff
22

3-
function brownden_autodiff(::Type{T} = Float64; kwargs...) where {T}
4-
x0 = T[25.0; 5.0; -5.0; -1.0]
3+
brownden_autodiff(::Type{T}; kwargs...) where {T <: Number} = brownden_autodiff(Vector{T}; kwargs...)
4+
function brownden_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S}
5+
T = eltype(S)
6+
x0 = S([25.0; 5.0; -5.0; -1.0])
57
f(x) = begin
68
s = zero(T)
79
for i = 1:20

0 commit comments

Comments
 (0)