diff --git a/Project.toml b/Project.toml index 66d986a..23b0616 100644 --- a/Project.toml +++ b/Project.toml @@ -37,14 +37,11 @@ julia = "^1.6.0" [extras] ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71" NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856" -OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SolverTest = "4343dc35-3317-4c6e-8877-f0cc8502c90e" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["ADNLPModels", "BenchmarkTools", "ForwardDiff", "NLPModelsIpopt", "NLPModelsTest", "OptimizationProblems", "Random", "SolverTest", "Test"] +test = ["ADNLPModels", "ForwardDiff", "NLPModelsTest", "Random", "SolverTest", "Test"] diff --git a/test/nlpmodelstest.jl b/test/nlpmodelstest.jl index e993df7..a412be3 100644 --- a/test/nlpmodelstest.jl +++ b/test/nlpmodelstest.jl @@ -1,11 +1,11 @@ function test_fps_model(T, σ, vivi, qds_type) - nlp1 = ADNLPModel(x -> dot(x, x), zeros(T, 10), x -> [sum(x)], ones(T, 1), ones(T, 1)) + nlp1 = ADNLPModel(x -> dot(x, x), zeros(T, 2), x -> [sum(x)], ones(T, 1), ones(T, 1)) qds = FletcherPenaltySolver.eval(qds_type)(nlp1, T(0)) return FletcherPenaltyNLP(nlp1, T(σ), vivi, qds = qds) end function test_fps_lin_model(T, σ, vivi, qds_type, use_linear = false) - nvar = 10 + nvar = 2 nlp1 = ADNLPModel(x -> dot(x, x), zeros(T, nvar), sparse(ones(T, 1, nvar)), ones(T, 1), ones(T, 1)) qds = FletcherPenaltySolver.eval(qds_type)(nlp1, T(0), explicit_linear_constraints = use_linear) diff --git a/test/runtests.jl b/test/runtests.jl index b3895c6..fe8774f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,16 +1,10 @@ -using LinearAlgebra, LinearOperators, Random, SparseArrays +using LinearAlgebra, Logging, Random, SparseArrays, Stopping, Test #JSO packages -using ADNLPModels, Logging, NLPModels, NLPModelsTest, SolverCore, SolverTest -using JSOSolvers, NLPModelsIpopt, NLPModelsKnitro +using ADNLPModels, JSOSolvers, NLPModels, NLPModelsTest, SolverCore, SolverTest Random.seed!(1234) -using Stopping, StoppingInterface - -#For Test only -using Test - #This package using FletcherPenaltySolver @@ -38,11 +32,15 @@ using FletcherPenaltySolver @test stats.iter == 4 end -include("restart.jl") +@testset "Test restart" begin + include("restart.jl") +end include("nlpmodelstest.jl") -include("unit-test.jl") +@testset "Unit tests" begin + include("unit-test.jl") +end #Test the solvers: mutable struct DummyModel{S, T} <: AbstractNLPModel{S, T} @@ -65,70 +63,33 @@ end ) end -@testset "Problem using KKT optimality" begin - f(x) = (x[1] - 1)^2 + 4 * (x[2] - x[1]^2)^2 - c(x) = [x[1]^2 + x[2]^2 - 2] - T = Float64 - x0 = T[-1.2; 1.0] - ℓ, u = zeros(T, 2), 2 * ones(T, 2) - nlp = ADNLPModel(f, x0, ℓ, u, c, zeros(1), zeros(1)) - - ϵ = eps(T)^T(1 / 4) - - ng0 = norm(grad(nlp, nlp.meta.x0)) - - stp = NLPStopping(nlp) - stats = fps_solve(stp) +@testset "Test usual problems and difficult cases" begin + @testset "Problem using KKT optimality" begin + f(x) = (x[1] - 1)^2 + 4 * (x[2] - x[1]^2)^2 + c(x) = [x[1]^2 + x[2]^2 - 2] + T = Float64 + x0 = T[-1.2; 1.0] + ℓ, u = zeros(T, 2), 2 * ones(T, 2) + nlp = ADNLPModel(f, x0, ℓ, u, c, zeros(1), zeros(1)) + + ϵ = eps(T)^T(1 / 4) + + ng0 = norm(grad(nlp, nlp.meta.x0)) + + stp = NLPStopping(nlp) + stats = fps_solve(stp) + + @test eltype(stats.solution) == T + @test stats.objective isa T + @test stats.dual_feas isa T + @test stats.primal_feas isa T + @test stats.dual_feas < ϵ * ng0 + ϵ + @test isapprox(stats.solution, ones(T, 2), atol = ϵ * ng0 * 10) + end - @test eltype(stats.solution) == T - @test stats.objective isa T - @test stats.dual_feas isa T - @test stats.primal_feas isa T - @test stats.dual_feas < ϵ * ng0 + ϵ - @test isapprox(stats.solution, ones(T, 2), atol = ϵ * ng0 * 10) + include("test-2.jl") + include("rank-deficient.jl") end -#On usual test problems -include("test-2.jl") - -#Rank-deficient problems -include("rank-deficient.jl") - # Solver tests include("solvertest.jl") - -@testset "Problems with explicit linear constraints" begin - nlp = ADNLPModels.ADNLPModel( - x -> 0.0, - [-1.2; 1.0], - [1], - [1], - [-1.0], - x -> [10 * (x[2] - x[1]^2)], - [-1.0, 0.0], - [-1.0, 0.0], - name = "mgh01feas"; - ) - stats = fps_solve(nlp, explicit_linear_constraints = true) - @test norm(cons(nlp, stats.solution) - get_lcon(nlp)) ≤ 1e-10 - @test stats.dual_feas ≤ 1e-10 - @test stats.primal_feas ≤ 1e-10 - @test stats.status == :first_order - - nlp = ADNLPModels.ADNLPModel( - x -> 0.0, - [-1.2; 1.0], - [1], - [1], - [-1.0], - x -> [10 * (x[2] - x[1]^2)], - [-1.0, 1.0], - [-1.0, 1.0], - name = "mgh01feas-bis"; - ) - stats = fps_solve(nlp, explicit_linear_constraints = true) - @test norm(cons(nlp, stats.solution) - get_lcon(nlp)) ≤ 1e-10 - @test stats.dual_feas ≤ 1e-10 - @test stats.primal_feas ≤ 1e-10 - @test stats.status == :first_order -end diff --git a/test/test-2.jl b/test/test-2.jl index 6cba43a..e27c0bd 100644 --- a/test/test-2.jl +++ b/test/test-2.jl @@ -286,3 +286,39 @@ CUTEst FLT problem @test primal < 1e-6 * max(norm(nlp.meta.x0), 1.0) @test status == :first_order end + +@testset "Problems with explicit linear constraints" begin + nlp = ADNLPModels.ADNLPModel( + x -> 0.0, + [-1.2; 1.0], + [1], + [1], + [-1.0], + x -> [10 * (x[2] - x[1]^2)], + [-1.0, 0.0], + [-1.0, 0.0], + name = "mgh01feas"; + ) + stats = fps_solve(nlp, explicit_linear_constraints = true) + @test norm(cons(nlp, stats.solution) - get_lcon(nlp)) ≤ 1e-10 + @test stats.dual_feas ≤ 1e-10 + @test stats.primal_feas ≤ 1e-10 + @test stats.status == :first_order + + nlp = ADNLPModels.ADNLPModel( + x -> 0.0, + [-1.2; 1.0], + [1], + [1], + [-1.0], + x -> [10 * (x[2] - x[1]^2)], + [-1.0, 1.0], + [-1.0, 1.0], + name = "mgh01feas-bis"; + ) + stats = fps_solve(nlp, explicit_linear_constraints = true) + @test norm(cons(nlp, stats.solution) - get_lcon(nlp)) ≤ 1e-10 + @test stats.dual_feas ≤ 1e-10 + @test stats.primal_feas ≤ 1e-10 + @test stats.status == :first_order +end