Skip to content

Commit 2f10876

Browse files
authored
Add benchmark analyzer (#259)
* Review benchmark local script and add analyzer * add analyzer env * test analyzer
1 parent cbeff59 commit 2f10876

20 files changed

+125
-30
lines changed

benchmark/Manifest.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@
22

33
julia_version = "1.9.1"
44
manifest_format = "2.0"
5-
project_hash = "14fe4b55e0aa680d5c90f646c1a87c8fc8737479"
5+
project_hash = "0cb0756144aac73ae8e2d06d9a0f6567a7a2f964"
66

77
[[deps.ADNLPModels]]
88
deps = ["ADTypes", "ForwardDiff", "LinearAlgebra", "NLPModels", "Requires", "ReverseDiff", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings"]
9-
git-tree-sha1 = "ad4682ad3f6da4246a5a5408593e5824d949e5a0"
9+
git-tree-sha1 = "2b582670fb51216d8d000c6de72934d1f68c4e7c"
1010
repo-rev = "main"
1111
repo-url = "https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl"
1212
uuid = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"

benchmark/Project.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
[deps]
22
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
3-
BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0"
43
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
54
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
65
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
@@ -13,7 +12,6 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1312
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
1413
NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
1514
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
16-
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
1715
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
1816
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
1917
SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a"
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
[deps]
2+
BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0"
3+
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
4+
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
5+
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
6+
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
7+
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
8+
SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a"
9+
StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd"

benchmark/benchmarks.jl

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
# Include useful packages
22
using ADNLPModels
33
using Dates, DelimitedFiles, JLD2, LinearAlgebra, Printf, SparseArrays
4-
using BenchmarkTools, DataFrames, Plots
4+
using BenchmarkTools, DataFrames
55
#JSO packages
6-
using NLPModels, BenchmarkProfiles, OptimizationProblems, SolverBenchmark
6+
using NLPModels, OptimizationProblems, SolverBenchmark
77
# Most likely benchmark with JuMP as well
88
using JuMP, NLPModelsJuMP
99

1010
include("problems_sets.jl")
11+
verbose_subbenchmark = false
1112

1213
# Run locally with `tune!(SUITE)` and then `run(SUITE)`
1314
const SUITE = BenchmarkGroup()

benchmark/gradient/benchmarks_gradient.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ for f in benchmark_list
5151
for pb in problem_sets[s]
5252
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
5353
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
54-
@info " $(pb): $T with $n vars and $m cons"
54+
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
5555
g = zeros(T, n)
5656
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $g) setup =
5757
(nlp = set_adnlp($pb, $(name_backend), $(backend), $nscal, $T))

benchmark/hessian/benchmarks_coloring.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ for f in benchmark_list
4949
for pb in problem_sets[s]
5050
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
5151
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
52-
@info " $(pb): $T with $n vars and $m cons"
52+
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
5353
SUITE["$(fun)"][f][T][s][b][pb] =
5454
@benchmarkable set_adnlp($pb, $(name_backend), $backend, $nscal, $T)
5555
end

benchmark/hessian/benchmarks_hessian.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ for f in benchmark_list
4040
for pb in problem_sets[s]
4141
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
4242
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
43-
@info " $(pb): $T with $n vars"
43+
verbose_subbenchmark && @info " $(pb): $T with $n vars"
4444
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp)) setup =
4545
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
4646
end

benchmark/hessian/benchmarks_hessian_lagrangian.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ for f in benchmark_list
4040
for pb in problem_sets[s]
4141
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
4242
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
43-
@info " $(pb): $T with $n vars and $m cons"
43+
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
4444
y = 10 * T[-(-1.0)^i for i = 1:m]
4545
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y) setup =
4646
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))

benchmark/hessian/benchmarks_hessian_residual.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ for f in benchmark_list
4141
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
4242
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
4343
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
44-
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
44+
verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons"
4545
v = 10 * T[-(-1.0)^i for i = 1:nequ]
4646
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls), $v) setup =
4747
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))

benchmark/hessian/benchmarks_hprod.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ for f in benchmark_list
3838
for pb in problem_sets[s]
3939
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
4040
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
41-
@info " $(pb): $T with $n vars"
41+
verbose_subbenchmark && @info " $(pb): $T with $n vars"
4242
v = [sin(T(i) / 10) for i = 1:n]
4343
Hv = Vector{T}(undef, n)
4444
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Hv) setup =

0 commit comments

Comments
 (0)