From 2f8536326148947ac85f29246c7072013b56badb Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 28 May 2021 22:59:44 +0200 Subject: [PATCH 1/4] add allocs and improve hess --- src/model-Fletcherpenaltynlp.jl | 152 ++++++++++++++++++++++++++++++-- src/solve_linear_system.jl | 6 +- src/solve_two_systems_struct.jl | 4 +- 3 files changed, 151 insertions(+), 11 deletions(-) diff --git a/src/model-Fletcherpenaltynlp.jl b/src/model-Fletcherpenaltynlp.jl index ca03eed..241e565 100644 --- a/src/model-Fletcherpenaltynlp.jl +++ b/src/model-Fletcherpenaltynlp.jl @@ -15,6 +15,10 @@ import NLPModels: include("solve_two_systems_struct.jl") +#= +We need to create a workspace structure Val1/Val2/matrix-free Val2 +=# + """ We consider here the implementation of Fletcher's exact penalty method for the minimization problem: @@ -77,6 +81,17 @@ mutable struct FletcherPenaltyNLP{ Jcρ::T Jv::T Ss::Array{S, 2} # only when Val(1) + Arows + Acols + Avals + Hrows + Hcols + Hvals + Ax # jacobian matrix (remove for matrix free implementation) (ncon x nvar) + F + Pt # dense projector (nvar x nvar) + invAtA # dense pseudo-inverse of AA' (ncon x ncon) + Hs # = Symmetric(hess(nlp.nlp, x, -ys), :L) (nvar x nvar) # Problem parameter σ::P @@ -113,6 +128,18 @@ function FletcherPenaltyNLP( name = "Fletcher penalization of $(nlp.meta.name)", ) + Arows, Acols = jac_structure(nlp) + Avals = Vector{S}(undef, nlp.meta.nnzj) + Hrows, Hcols = hess_structure(nlp) + Hvals = Vector{S}(undef, nlp.meta.nnzh) + + invAtA = spdiagm(ones(nlp.meta.ncon)) + F = if nlp.meta.ncon > 0 + lu(invAtA) + else + nothing + end + return FletcherPenaltyNLP( meta, Counters(), @@ -134,6 +161,17 @@ function FletcherPenaltyNLP( Vector{S}(undef, nlp.meta.nvar), Vector{S}(undef, nlp.meta.ncon), Array{S, 2}(undef, nlp.meta.ncon, nlp.meta.nvar), + Arows, + Acols, + Avals, + Hrows, + Hcols, + Hvals, + Array{S, 2}(undef, nlp.meta.ncon, nlp.meta.nvar), + F, + Array{S, 2}(undef, nlp.meta.nvar, nlp.meta.nvar), + invAtA, + Array{S, 2}(undef, nlp.meta.nvar, nlp.meta.nvar), σ, zero(typeof(σ)), zero(typeof(σ)), @@ -169,6 +207,19 @@ function FletcherPenaltyNLP( name = "Fletcher penalization of $(nlp.meta.name)", ) counters = Counters() + + Arows, Acols = jac_structure(nlp) + Avals = Vector{S}(undef, nlp.meta.nnzj) + Hrows, Hcols = hess_structure(nlp) + Hvals = Vector{S}(undef, nlp.meta.nnzh) + + invAtA = spdiagm(ones(nlp.meta.ncon)) + F = if nlp.meta.ncon > 0 + lu(invAtA) + else + nothing + end + return FletcherPenaltyNLP( meta, counters, @@ -190,6 +241,17 @@ function FletcherPenaltyNLP( Vector{S}(undef, nlp.meta.nvar), Vector{S}(undef, nlp.meta.ncon), Array{S, 2}(undef, nlp.meta.ncon, nlp.meta.nvar), + Arows, + Acols, + Avals, + Hrows, + Hcols, + Hvals, + Array{S, 2}(undef, nlp.meta.ncon, nlp.meta.nvar), + F, + Array{S, 2}(undef, nlp.meta.nvar, nlp.meta.nvar), + invAtA, + Array{S, 2}(undef, nlp.meta.nvar, nlp.meta.nvar), σ, ρ, δ, @@ -363,12 +425,14 @@ function hess_structure!( return rows, cols end +# function _compute_pseudo_inverse!(nlp, x) + function hess_coord!( - nlp::FletcherPenaltyNLP, + nlp::FletcherPenaltyNLP{S, Tt, Val{1}, P, QDS}, x::AbstractVector{T}, vals::AbstractVector{T}; obj_weight::Real = one(T), -) where {T} +) where {T, S, Tt, P, QDS} @lencheck nlp.meta.nvar x @lencheck nlp.meta.nnzh vals increment!(nlp, :neval_hess) @@ -381,9 +445,13 @@ function hess_coord!( g = nlp.gx c = nlp.cx A = jac(nlp.nlp, x) # If used, this should be allocated probably + # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? + # A = nlp.Ax σ, ρ, δ = nlp.σ, nlp.ρ, nlp.δ Hs = Symmetric(hess(nlp.nlp, x, -ys), :L) + # nlp.Hs .= Symmetric(hess(nlp.nlp, x, -ys), :L) # do in-place ? + # Hs = nlp.Hs Im = Matrix(I, ncon, ncon) τ = T(max(nlp.δ, 1e-14)) invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! @@ -397,11 +465,83 @@ function hess_coord!( Hx += Hc + T(ρ) * A' * A end - if nlp.hessian_approx == Val(1) - for j = 1:ncon - nlp.Ss[j, :] = gs' * Symmetric(jth_hess(nlp.nlp, x, j), :L) + # for 1st hessian approximation only + for j = 1:ncon + nlp.Ss[j, :] = gs' * Symmetric(jth_hess(nlp.nlp, x, j), :L) # could we use hprod ? + end + Hx += -AinvAtA * nlp.Ss - nlp.Ss' * invAtA * A + + #= + if nlp.η > 0.0 + In = Matrix(I, nvar, nvar) + Hx += T(nlp.η) * In + end + =# + + k = 1 + for j = 1:nvar + for i = j:nvar + vals[k] = obj_weight * Hx[i, j] + if i == j && nlp.η > 0.0 + vals[k] += obj_weight * T(nlp.η) + end + k += 1 end - Hx += -AinvAtA * nlp.Ss - nlp.Ss' * invAtA * A + end + + return vals +end + +function hess_coord!( + nlp::FletcherPenaltyNLP{S, Tt, Val{2}, P, QDS}, + x::AbstractVector{T}, + vals::AbstractVector{T}; + obj_weight::Real = one(T), +) where {T, S, Tt, P, QDS} + @lencheck nlp.meta.nvar x + @lencheck nlp.meta.nnzh vals + increment!(nlp, :neval_hess) + + nvar = nlp.meta.nvar + ncon = nlp.nlp.meta.ncon + + gs, ys, _, _ = _compute_ys_gs!(nlp, x) + f = nlp.fx + g = nlp.gx + c = nlp.cx + A = jac(nlp.nlp, x) # If used, this should be allocated probably + # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? + # A = nlp.Ax + σ, ρ, δ = nlp.σ, nlp.ρ, nlp.δ + + Hs = Symmetric(hess(nlp.nlp, x, -ys), :L) # do in-place ? + # Hs = nlp.Hs + + # put in a separate function + # Im = Matrix(I, ncon, ncon) + τ = T(max(nlp.δ, 1e-14)) + # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! + nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # + mul!(nlp.invAtA, A, A', 1, τ) + # lu!(nlp.F, nlp.invAtA) + nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place + # AinvAtA = A' * invAtA + # Pt = AinvAtA * A + mul!(nlp.Ax, nlp.invAtA, A) + # ldiv!(nlp.Ax, nlp.F, A) + mul!(nlp.Pt, A', nlp.Ax) + # end of separate function + + # mul!(C, A, B, α, β) -> C = A * B * α + C * β + # Hx = Hs - nlp.Pt * Hs - Hs * nlp.Pt + 2 * T(σ) * nlp.Pt + mul!(nlp.Hs, nlp.Pt, Hs, -1, 0) # - nlp.Pt * Hs + mul!(nlp.Hs, Hs, nlp.Pt, -1, 1) # nlp.Hs -= nlp.Pt * Hs + @. nlp.Hs += Hs + 2 * T(σ) * nlp.Pt + Hx = nlp.Hs + #regularization term + if ρ > 0.0 + Hc = hess(nlp.nlp, x, c * T(ρ), obj_weight = zero(T)) + Hx += Hc + T(ρ) * A' * A end #= diff --git a/src/solve_linear_system.jl b/src/solve_linear_system.jl index 6873954..37347e1 100644 --- a/src/solve_linear_system.jl +++ b/src/solve_linear_system.jl @@ -115,7 +115,7 @@ function solve_two_mixed( if !stats1.solved @warn "Failed solving 1st linear system lsqr in mixed." end - + if nlp.δ != 0.0 (p2, q2, stats2) = craig!( nlp.qdsolver.solver_struct_least_norm, @@ -131,8 +131,8 @@ function solve_two_mixed( ) else (p2, q2, stats2) = craig!( - nlp.qdsolver.solver_struct_least_norm, - nlp.Aop, + nlp.qdsolver.solver_struct_least_norm, + nlp.Aop, -rhs2, atol = nlp.qdsolver.ln_atol, rtol = nlp.qdsolver.ln_rtol, diff --git a/src/solve_two_systems_struct.jl b/src/solve_two_systems_struct.jl index 828d5a8..3b0ba1a 100644 --- a/src/solve_two_systems_struct.jl +++ b/src/solve_two_systems_struct.jl @@ -90,8 +90,8 @@ function IterativeSolver( ln_btol::T = √eps(T), ln_conlim::T = 1 / √eps(T), ln_itmax::Integer = 5 * (nlp.meta.ncon + nlp.meta.nvar), - ne_atol::T = √eps(T)/100, - ne_rtol::T = √eps(T)/100, + ne_atol::T = √eps(T) / 100, + ne_rtol::T = √eps(T) / 100, ne_ratol::T = zero(T), ne_rrtol::T = zero(T), ne_etol::T = √eps(T), From bc465292594a1cdde178d4812968dd6f2127a761 Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 28 May 2021 23:20:01 +0200 Subject: [PATCH 2/4] skip if ncon = 0 in hess --- src/model-Fletcherpenaltynlp.jl | 64 ++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/src/model-Fletcherpenaltynlp.jl b/src/model-Fletcherpenaltynlp.jl index 241e565..2f8d51b 100644 --- a/src/model-Fletcherpenaltynlp.jl +++ b/src/model-Fletcherpenaltynlp.jl @@ -509,39 +509,47 @@ function hess_coord!( f = nlp.fx g = nlp.gx c = nlp.cx - A = jac(nlp.nlp, x) # If used, this should be allocated probably - # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? - # A = nlp.Ax σ, ρ, δ = nlp.σ, nlp.ρ, nlp.δ Hs = Symmetric(hess(nlp.nlp, x, -ys), :L) # do in-place ? # Hs = nlp.Hs - # put in a separate function - # Im = Matrix(I, ncon, ncon) - τ = T(max(nlp.δ, 1e-14)) - # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! - nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # - mul!(nlp.invAtA, A, A', 1, τ) - # lu!(nlp.F, nlp.invAtA) - nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place - # AinvAtA = A' * invAtA - # Pt = AinvAtA * A - mul!(nlp.Ax, nlp.invAtA, A) - # ldiv!(nlp.Ax, nlp.F, A) - mul!(nlp.Pt, A', nlp.Ax) - # end of separate function - - # mul!(C, A, B, α, β) -> C = A * B * α + C * β - # Hx = Hs - nlp.Pt * Hs - Hs * nlp.Pt + 2 * T(σ) * nlp.Pt - mul!(nlp.Hs, nlp.Pt, Hs, -1, 0) # - nlp.Pt * Hs - mul!(nlp.Hs, Hs, nlp.Pt, -1, 1) # nlp.Hs -= nlp.Pt * Hs - @. nlp.Hs += Hs + 2 * T(σ) * nlp.Pt - Hx = nlp.Hs - #regularization term - if ρ > 0.0 - Hc = hess(nlp.nlp, x, c * T(ρ), obj_weight = zero(T)) - Hx += Hc + T(ρ) * A' * A + if ncon > 0 + A = jac(nlp.nlp, x) # If used, this should be allocated probably + # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? + # A = nlp.Ax + # put in a separate function + # Im = Matrix(I, ncon, ncon) + τ = T(max(nlp.δ, 1e-14)) + # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! + nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # + mul!(nlp.invAtA, A, A', 1, τ) + # lu!(nlp.F, nlp.invAtA) + nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place + # AinvAtA = A' * invAtA + # Pt = AinvAtA * A + mul!(nlp.Ax, nlp.invAtA, A) + # ldiv!(nlp.Ax, nlp.F, A) + mul!(nlp.Pt, A', nlp.Ax) + # end of separate function + + # mul!(C, A, B, α, β) -> C = A * B * α + C * β + # Hx = Hs - nlp.Pt * Hs - Hs * nlp.Pt + 2 * T(σ) * nlp.Pt + mul!(nlp.Hs, nlp.Pt, Hs, -1, 0) # - nlp.Pt * Hs + mul!(nlp.Hs, Hs, nlp.Pt, -1, 1) # nlp.Hs -= nlp.Pt * Hs + @. nlp.Hs += Hs + 2 * T(σ) * nlp.Pt + Hx = nlp.Hs + #regularization term + if ρ > 0.0 + Hc = hess(nlp.nlp, x, c * T(ρ), obj_weight = zero(T)) + Hx += Hc + T(ρ) * A' * A + end + else + Hx = Hs + if ρ > 0.0 + Hc = hess(nlp.nlp, x, c * T(ρ), obj_weight = zero(T)) + Hx += Hc + end end #= From b99b9ee53f2bebef6d4a4f837f99a94badd7e4f2 Mon Sep 17 00:00:00 2001 From: tmigot Date: Fri, 28 May 2021 23:43:37 +0200 Subject: [PATCH 3/4] lu [don't work] --- src/model-Fletcherpenaltynlp.jl | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/model-Fletcherpenaltynlp.jl b/src/model-Fletcherpenaltynlp.jl index 2f8d51b..7035c74 100644 --- a/src/model-Fletcherpenaltynlp.jl +++ b/src/model-Fletcherpenaltynlp.jl @@ -115,7 +115,7 @@ function FletcherPenaltyNLP( x0::AbstractVector{S}; qds = LDLtSolver(nlp, S(0)), ) where {S} - nvar = nlp.meta.nvar + nvar, ncon = nlp.meta.nvar, nlp.meta.ncon meta = NLPModelMeta( nvar, @@ -129,11 +129,12 @@ function FletcherPenaltyNLP( ) Arows, Acols = jac_structure(nlp) - Avals = Vector{S}(undef, nlp.meta.nnzj) + Avals = zeros(S, nlp.meta.nnzj) Hrows, Hcols = hess_structure(nlp) Hvals = Vector{S}(undef, nlp.meta.nnzh) - invAtA = spdiagm(ones(nlp.meta.ncon)) + tempA = sparse(Arows, Acols, Avals, ncon, nvar) + invAtA = sparse(tempA * tempA' + 1e-14 * Matrix(I, ncon, ncon)) F = if nlp.meta.ncon > 0 lu(invAtA) else @@ -194,7 +195,7 @@ function FletcherPenaltyNLP( x0::AbstractVector{S}; qds = LDLtSolver(nlp, S(0)), #IterativeSolver(nlp, S(NaN)), ) where {S} - nvar = nlp.meta.nvar + nvar, ncon = nlp.meta.nvar, nlp.meta.ncon meta = NLPModelMeta( nvar, @@ -209,11 +210,12 @@ function FletcherPenaltyNLP( counters = Counters() Arows, Acols = jac_structure(nlp) - Avals = Vector{S}(undef, nlp.meta.nnzj) + Avals = zeros(S, nlp.meta.nnzj) Hrows, Hcols = hess_structure(nlp) Hvals = Vector{S}(undef, nlp.meta.nnzh) - invAtA = spdiagm(ones(nlp.meta.ncon)) + tempA = sparse(Arows, Acols, Avals, ncon, nvar) + invAtA = sparse(tempA * tempA' + 1e-14 * Matrix(I, ncon, ncon)) F = if nlp.meta.ncon > 0 lu(invAtA) else @@ -515,7 +517,8 @@ function hess_coord!( # Hs = nlp.Hs if ncon > 0 - A = jac(nlp.nlp, x) # If used, this should be allocated probably + # A = jac(nlp.nlp, x) # If used, this should be allocated probably + A = sparse(nlp.Arows, nlp.Acols, jac_coord!(nlp.nlp, x, nlp.Avals), ncon, nvar) # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? # A = nlp.Ax # put in a separate function @@ -524,12 +527,12 @@ function hess_coord!( # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # mul!(nlp.invAtA, A, A', 1, τ) - # lu!(nlp.F, nlp.invAtA) - nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place + lu!(nlp.F, nlp.invAtA) + # nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place # AinvAtA = A' * invAtA # Pt = AinvAtA * A - mul!(nlp.Ax, nlp.invAtA, A) - # ldiv!(nlp.Ax, nlp.F, A) + # mul!(nlp.Ax, nlp.invAtA, A) + ldiv!(nlp.Ax, nlp.F, A) mul!(nlp.Pt, A', nlp.Ax) # end of separate function From 281837e960e7afe954892413411573fb367c94b0 Mon Sep 17 00:00:00 2001 From: tmigot Date: Sat, 29 May 2021 06:19:09 +0200 Subject: [PATCH 4/4] add new functions [warning test fail] --- benchmark/figures_benchmark_no_tex.jl | 8 ++-- src/model-Fletcherpenaltynlp.jl | 62 ++++++++++++++++++++++----- test/problems/controlinvestment.jl | 18 ++++++++ test/speedtest-val1.jl | 47 ++++++++++---------- test/speedtest.jl | 62 +++++++++++++++++++++++---- 5 files changed, 150 insertions(+), 47 deletions(-) create mode 100644 test/problems/controlinvestment.jl diff --git a/benchmark/figures_benchmark_no_tex.jl b/benchmark/figures_benchmark_no_tex.jl index de8378a..12181be 100644 --- a/benchmark/figures_benchmark_no_tex.jl +++ b/benchmark/figures_benchmark_no_tex.jl @@ -13,7 +13,7 @@ using Random gr() #pgfplots() function figure() - names = ["2021-05-21__FPS_FPSFF_knitro_ipopt_45"] + names = ["2021-05-27_all__FPS_FPSFF_knitro_ipopt_105"] tod = string(today()) * "" dsolvers = [:ipopt, :knitro, :FPS, :FPSFF] @@ -53,10 +53,10 @@ function figure() #p = performance_profile(stats, cost) p = performance_profile( stats, - cost, + cost, title = perf_title(col), legend = :bottomright, - linestyles=styles, + linestyles = styles, ) #savefig("$(tod)_$(list)_perf-$col.tex") png("$(tod)_$(list)_perf-$col") @@ -70,7 +70,7 @@ function figure() ipairs = 0 # combinations of solvers 2 by 2 for i = 2:nsolvers - for j = 1:(i-1) + for j = 1:(i - 1) ipairs += 1 pair = [solvers[i], solvers[j]] dfs = (stats[solver] for solver in pair) diff --git a/src/model-Fletcherpenaltynlp.jl b/src/model-Fletcherpenaltynlp.jl index 7035c74..f2c9a86 100644 --- a/src/model-Fletcherpenaltynlp.jl +++ b/src/model-Fletcherpenaltynlp.jl @@ -129,7 +129,7 @@ function FletcherPenaltyNLP( ) Arows, Acols = jac_structure(nlp) - Avals = zeros(S, nlp.meta.nnzj) + Avals = rand(S, nlp.meta.nnzj) Hrows, Hcols = hess_structure(nlp) Hvals = Vector{S}(undef, nlp.meta.nnzh) @@ -210,7 +210,7 @@ function FletcherPenaltyNLP( counters = Counters() Arows, Acols = jac_structure(nlp) - Avals = zeros(S, nlp.meta.nnzj) + Avals = rand(S, nlp.meta.nnzj) Hrows, Hcols = hess_structure(nlp) Hvals = Vector{S}(undef, nlp.meta.nnzh) @@ -427,8 +427,6 @@ function hess_structure!( return rows, cols end -# function _compute_pseudo_inverse!(nlp, x) - function hess_coord!( nlp::FletcherPenaltyNLP{S, Tt, Val{1}, P, QDS}, x::AbstractVector{T}, @@ -494,6 +492,47 @@ function hess_coord!( return vals end +function _compute_pseudo_inverse_dense!(nlp, x::AbstractVector{T}) where {T} + nvar = nlp.meta.nvar + ncon = nlp.nlp.meta.ncon + A = jac(nlp.nlp, x) + # put in a separate function + # Im = Matrix(I, ncon, ncon) + τ = T(max(nlp.δ, 1e-14)) + # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! + nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # + mul!(nlp.invAtA, A, A', 1, τ) + # lu!(nlp.F, nlp.invAtA) + nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place + # AinvAtA = A' * invAtA + # Pt = AinvAtA * A + mul!(nlp.Ax, nlp.invAtA, A) + # ldiv!(nlp.Ax, nlp.F, A) + mul!(nlp.Pt, A', nlp.Ax) + return nlp.Pt +end + +function _compute_pseudo_inverse_sparse!(nlp, x::AbstractVector{T}) where {T} + nvar = nlp.meta.nvar + ncon = nlp.nlp.meta.ncon + jac_coord!(nlp.nlp, x, nlp.Avals) + A = sparse(nlp.Arows, nlp.Acols, nlp.Avals, ncon, nvar) + # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? + # A = nlp.Ax + # put in a separate function + # Im = Matrix(I, ncon, ncon) + τ = T(max(nlp.δ, 1e-14)) + # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! + nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # + mul!(nlp.invAtA, A, A', 1, τ) + lu!(nlp.F, nlp.invAtA) + # AinvAtA = A' * invAtA + # Pt = AinvAtA * A + ldiv!(nlp.Ax, nlp.F, A) + mul!(nlp.Pt, A', nlp.Ax) + return A, nlp.Pt +end + function hess_coord!( nlp::FletcherPenaltyNLP{S, Tt, Val{2}, P, QDS}, x::AbstractVector{T}, @@ -517,8 +556,9 @@ function hess_coord!( # Hs = nlp.Hs if ncon > 0 - # A = jac(nlp.nlp, x) # If used, this should be allocated probably - A = sparse(nlp.Arows, nlp.Acols, jac_coord!(nlp.nlp, x, nlp.Avals), ncon, nvar) + A = jac(nlp.nlp, x) # If used, this should be allocated probably + # jac_coord!(nlp.nlp, x, nlp.Avals) + # A = sparse(nlp.Arows, nlp.Acols, nlp.Avals, ncon, nvar) # nlp.Ax .= jac(nlp.nlp, x) # do in-place ? # A = nlp.Ax # put in a separate function @@ -527,14 +567,16 @@ function hess_coord!( # invAtA = pinv(Matrix(A * A') + τ * Im) #inv(Matrix(A*A') + τ * Im) # Euh... wait ! nlp.invAtA .= Matrix(I, ncon, ncon) # spdiagm(ones(ncon)) # mul!(nlp.invAtA, A, A', 1, τ) - lu!(nlp.F, nlp.invAtA) - # nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place + # lu!(nlp.F, nlp.invAtA) + nlp.invAtA = pinv(Matrix(nlp.invAtA)) # in-place # AinvAtA = A' * invAtA # Pt = AinvAtA * A - # mul!(nlp.Ax, nlp.invAtA, A) - ldiv!(nlp.Ax, nlp.F, A) + mul!(nlp.Ax, nlp.invAtA, A) + # ldiv!(nlp.Ax, nlp.F, A) mul!(nlp.Pt, A', nlp.Ax) # end of separate function + # A, nlp.Pt = _compute_pseudo_inverse_dense!(nlp, x) # doesn't work... + # A, Pt = _compute_pseudo_inverse_sparse!(nlp, x) # mul!(C, A, B, α, β) -> C = A * B * α + C * β # Hx = Hs - nlp.Pt * Hs - Hs * nlp.Pt + 2 * T(σ) * nlp.Pt diff --git a/test/problems/controlinvestment.jl b/test/problems/controlinvestment.jl new file mode 100644 index 0000000..d7e03fb --- /dev/null +++ b/test/problems/controlinvestment.jl @@ -0,0 +1,18 @@ +function controlinvestment_autodiff(args...; n::Int=200, type::Val{T}=Val(Float64), kwargs...) where T + N = div(n, 2) + h = 1/N + x0 = 1.0 + gamma = 3 + function f(y) + x, u = y[1:N], y[N+1:end] + return 0.5 * h * sum((u[k] - 1) * x[k] + (u[k+1] - 1) * x[k+1] for k = 1 : N-1) + end + function c(y) + x, u = y[1:N], y[N+1:end] + return [x[k+1] - x[k] - 0.5 * h * gamma * (u[k] * x[k] + u[k+1] * x[k+1]) for k = 1 : N-1] + end + lvar = vcat(x0, -Inf*ones(T, N-1), zeros(T, N)) + uvar = vcat(x0, Inf*ones(T, N-1), ones(T, N)) + xi = vcat(ones(T, N), zeros(T, N)) + return ADNLPModel(f, xi, lvar, uvar, c, zeros(T, N-1), zeros(T, N-1), name="controlinvestment_autodiff") +end \ No newline at end of file diff --git a/test/speedtest-val1.jl b/test/speedtest-val1.jl index cb56684..5641a48 100644 --- a/test/speedtest-val1.jl +++ b/test/speedtest-val1.jl @@ -101,46 +101,45 @@ rhs2 = cons(fpnlp.nlp, xr) #= Iterative [ Info: 3 Optml 4.0e-02 6.3e-09 2.6e-08 4.0e+00 Optimal 0.0e+00 - 80.473073 seconds (116.98 M allocations: 5.798 GiB, 7.36% gc time, 0.06% compilation time) + 61.021040 seconds (123.37 M allocations: 6.023 GiB, 8.16% gc time, 44.67% compilation time) Counters: obj: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 74 grad: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 74 cons: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 80 jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 16 jprod: ███████████████████⋅ 231 jtprod: ████████████████████ 245 hess: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 32 hprod: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 36 jhess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 16 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 - 453.888 ns (3 allocations: 752 bytes) - 11.843 μs (42 allocations: 4.22 KiB) - 8.695 μs (45 allocations: 4.97 KiB) - 28.684 μs (143 allocations: 14.69 KiB) - 44.942 μs (253 allocations: 20.31 KiB) + 340.990 ns (3 allocations: 752 bytes) + 6.955 μs (42 allocations: 4.22 KiB) + 7.171 μs (45 allocations: 4.97 KiB) + 22.516 μs (143 allocations: 14.62 KiB) + 36.162 μs (242 allocations: 18.55 KiB) Counters: obj: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 grad: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 cons: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 - jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 58830 - jprod: █████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 307527 jtprod: █████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 307527 hess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 58830 - hprod: ████████████████████ 750118 jhess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 58830 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 - 23.608 μs (91 allocations: 6.14 KiB) - 10.482 μs (76 allocations: 4.86 KiB) - + jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 126405 + jprod: ██████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 571434 jtprod: ██████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 571434 hess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 126405 + hprod: ████████████████████ 2251494 jhess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 126405 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + 10.956 μs (91 allocations: 6.14 KiB) + 8.645 μs (76 allocations: 4.86 KiB) + =# #= LDLt [ Info: 3 Optml 4.0e-02 6.3e-09 2.6e-08 4.0e+00 Optimal 0.0e+00 - 9.716252 seconds (6.88 M allocations: 397.899 MiB, 4.25% gc time, 83.01% compilation time) + 54.176399 seconds (123.43 M allocations: 6.026 GiB, 8.68% gc time, 42.33% compilation time) Counters: obj: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 74 grad: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 74 cons: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 80 jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 16 jprod: ███████████████████⋅ 231 jtprod: ████████████████████ 245 hess: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 32 hprod: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 36 jhess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 16 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 - 439.389 ns (3 allocations: 752 bytes) - 9.328 μs (42 allocations: 4.22 KiB) - 9.090 μs (45 allocations: 4.97 KiB) - 30.408 μs (143 allocations: 14.69 KiB) - 39.356 μs (197 allocations: 17.80 KiB) + 351.673 ns (3 allocations: 752 bytes) + 7.003 μs (42 allocations: 4.22 KiB) + 7.152 μs (45 allocations: 4.97 KiB) + 22.705 μs (144 allocations: 14.73 KiB) + 30.640 μs (199 allocations: 18.05 KiB) Counters: obj: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 grad: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 cons: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 - jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 54853 - jprod: ████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 159219 jtprod: ████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 159219 hess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 54852 - hprod: ████████████████████ 818082 jhess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 54852 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 - 1.608 μs (27 allocations: 3.27 KiB) - 11.167 μs (64 allocations: 5.77 KiB) - + jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 128831 + jprod: ████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 369156 jtprod: ████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 369156 hess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 128830 + hprod: ████████████████████ 2245498 jhess: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 128830 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + 1.277 μs (27 allocations: 3.27 KiB) + 5.765 μs (64 allocations: 5.77 KiB) =# diff --git a/test/speedtest.jl b/test/speedtest.jl index 9a071f4..7157ba9 100644 --- a/test/speedtest.jl +++ b/test/speedtest.jl @@ -26,7 +26,12 @@ n = 300 nlp = ADNLPModel(x -> dot(x, x), zeros(n), x -> [sum(x) - 1], zeros(1), zeros(1)) =# +include("problems/controlinvestment.jl") + +nlp = controlinvestment_autodiff(n=1000) + # problem with the 2nd approximation: +#= @time fps_solve( nlp, nlp.meta.x0, @@ -41,6 +46,7 @@ n = 300 ) print(nlp.counters) reset!(nlp) +=# #= @time fps_solve( nlp, @@ -72,6 +78,7 @@ reset!(nlp) print(nlp.counters) reset!(nlp) =# + qds = FletcherPenaltyNLPSolver.IterativeSolver(nlp, 0.0) # qds = FletcherPenaltyNLPSolver.LDLtSolver(nlp, 0.0) fpnlp = FletcherPenaltyNLP( @@ -97,25 +104,40 @@ rhs2 = cons(fpnlp.nlp, xr) @btime FletcherPenaltyNLPSolver.solve_two_mixed(fpnlp, xr, rhs1, rhs2); # 19.561 μs (76 allocations: 4.86 KiB) +@show "Compare jacobian formation" +using SparseArrays +x = rand(fpnlp.meta.nvar); +@btime fpnlp.Ax = jac(fpnlp.nlp, x); +@btime fpnlp.Ax = sparse(fpnlp.Arows, fpnlp.Acols, jac_coord!(fpnlp.nlp, x, fpnlp.Avals), fpnlp.nlp.meta.ncon, fpnlp.meta.nvar); +x2 = rand(fpnlp.meta.nvar); +@btime jac_coord!(fpnlp.nlp, x2, fpnlp.Ax.nzval) + +nothing #= Iterative [ Info: 3 Optml 4.0e-02 6.9e-09 2.7e-08 4.0e+00 Optimal 0.0e+00 - 31.888646 seconds (111.09 M allocations: 5.458 GiB, 9.57% gc time, 0.07% compilation time) + 68.432747 seconds (128.60 M allocations: 6.658 GiB, 7.80% gc time, 41.55% compilation time) Counters: obj: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 108 grad: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 108 cons: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 114 jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ██⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 22 jprod: ███████████████████⋅ 333 jtprod: ████████████████████ 354 hess: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 44 hprod: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 50 jhess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 - 340.710 ns (3 allocations: 752 bytes) - 6.863 μs (42 allocations: 4.22 KiB) - 7.126 μs (45 allocations: 4.97 KiB) - 11.411 μs (81 allocations: 8.36 KiB) - 20.745 μs (135 allocations: 11.08 KiB) + 345.986 ns (3 allocations: 752 bytes) + 6.803 μs (42 allocations: 4.22 KiB) + 7.026 μs (45 allocations: 4.97 KiB) + 14.578 μs (79 allocations: 7.30 KiB) + 20.583 μs (135 allocations: 11.08 KiB) Counters: obj: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 grad: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 cons: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 - jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 361543 - jprod: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 845191 jtprod: ███████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 845191 hess: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 361543 - hprod: ████████████████████ 2604602 jhess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 233448 + jprod: █████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 557155 jtprod: █████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 557155 hess: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 233448 + hprod: ████████████████████ 2301784 jhess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + 11.581 μs (91 allocations: 6.14 KiB) + 9.142 μs (76 allocations: 4.86 KiB) +"Compare jacobian formation" = "Compare jacobian formation" + 907.122 ns (5 allocations: 592 bytes) + 2.490 μs (24 allocations: 2.75 KiB) + =# #= LDLt @@ -137,3 +159,25 @@ LDLt jprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jtprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 hess: ███⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 383682 hprod: ████████████████████ 2842812 jhess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 =# + +#= +ControlInvestment n = 1000 +Iterative: +=# +#= +10.253 μs (6 allocations: 800 bytes) + 24.298 ms (2641 allocations: 84.01 MiB) + 25.348 ms (2645 allocations: 84.01 MiB) + 15.623 s (3354500 allocations: 22.36 GiB) + 3.282 s (515864 allocations: 12.28 GiB) + Counters: + obj: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 grad: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 cons: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1 + jcon: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jgrad: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jac: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 4 + jprod: ████████████████████ 7894 jtprod: ████████████████████ 7894 hess: █⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 4 + hprod: ████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1378 jhess: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 jhprod: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 + 3.450 s (521033 allocations: 12.38 GiB) + 3.140 s (520511 allocations: 12.37 GiB) +"Compare jacobian formation" = "Compare jacobian formation" + 3.624 ms (511 allocations: 16.42 MiB) + 23.906 ms (535 allocations: 35.48 MiB) +=# \ No newline at end of file