Skip to content

Commit

Permalink
🤖 Format .jl files (#138)
Browse files Browse the repository at this point in the history
Co-authored-by: tmigot <tmigot@users.noreply.github.com>
  • Loading branch information
github-actions[bot] and tmigot authored Nov 2, 2024
1 parent 0722837 commit 2f52220
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 55 deletions.
36 changes: 18 additions & 18 deletions .breakage/get_jso_users.jl
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
import GitHub, PkgDeps # both export users()

length(ARGS) >= 1 || error("specify at least one JSO package as argument")

jso_repos, _ = GitHub.repos("JuliaSmoothOptimizers")
jso_names = [splitext(x.name)[1] for x jso_repos]

name = splitext(ARGS[1])[1]
name jso_names || error("argument should be one of ", jso_names)

dependents = String[]
try
global dependents = filter(x -> x jso_names, PkgDeps.users(name))
catch e
# package not registered; don't insert into dependents
end

println(dependents)
import GitHub, PkgDeps # both export users()

length(ARGS) >= 1 || error("specify at least one JSO package as argument")

jso_repos, _ = GitHub.repos("JuliaSmoothOptimizers")
jso_names = [splitext(x.name)[1] for x jso_repos]

name = splitext(ARGS[1])[1]
name jso_names || error("argument should be one of ", jso_names)

dependents = String[]
try
global dependents = filter(x -> x jso_names, PkgDeps.users(name))
catch e
# package not registered; don't insert into dependents
end

println(dependents)
10 changes: 1 addition & 9 deletions src/feasibility.jl
Original file line number Diff line number Diff line change
Expand Up @@ -205,15 +205,7 @@ using `lsmr` method from `Krylov.jl`.
- `infeasible`: `true` if the problem is infeasible.
- `solved`: `true` if the problem has been successfully solved.
"""
function TR_lsmr(
solver,
cz,
Jz,
ctol::AbstractFloat,
Δ::T,
normcz::AbstractFloat,
Jd,
) where {T}
function TR_lsmr(solver, cz, Jz, ctol::AbstractFloat, Δ::T, normcz::AbstractFloat, Jd) where {T}
Krylov.solve!(
solver,
Jz,
Expand Down
28 changes: 9 additions & 19 deletions src/model-Fletcherpenaltynlp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -284,14 +284,7 @@ end
Redefine the NLPModel function `hprod` to account for Lagrange multiplier of size < ncon.
"""
function hprod_nln!(
nlp::FletcherPenaltyNLP{S, T},
x,
y,
v,
Hv;
obj_weight = one(S),
) where {S, T}
function hprod_nln!(nlp::FletcherPenaltyNLP{S, T}, x, y, v, Hv; obj_weight = one(S)) where {S, T}
return if nlp.explicit_linear_constraints & (nlp.meta.ncon > 0)
nlp.lag_mul .= zero(S)
nlp.lag_mul[nlp.meta.nln] .= y
Expand Down Expand Up @@ -327,7 +320,12 @@ end
Redefine the NLPModel function `hprod` to account for Lagrange multiplier of size < ncon.
"""
function hess_nln(nlp::FletcherPenaltyNLP{S, T}, x::AbstractVector, y; obj_weight = one(S)) where {S, T}
function hess_nln(
nlp::FletcherPenaltyNLP{S, T},
x::AbstractVector,
y;
obj_weight = one(S),
) where {S, T}
return if nlp.explicit_linear_constraints & (nlp.meta.ncon > 0)
nlp.lag_mul .= zero(S)
nlp.lag_mul[nlp.meta.nln] .= y
Expand Down Expand Up @@ -371,11 +369,7 @@ function obj(nlp::FletcherPenaltyNLP{T, S}, x::AbstractVector) where {T, S}
return fx
end

function grad!(
nlp::FletcherPenaltyNLP{T, S},
x::AbstractVector,
gx::AbstractVector,
) where {T, S}
function grad!(nlp::FletcherPenaltyNLP{T, S}, x::AbstractVector, gx::AbstractVector) where {T, S}
nvar = get_nvar(nlp)
@lencheck nvar x gx
increment!(nlp, :neval_grad)
Expand Down Expand Up @@ -406,11 +400,7 @@ function grad!(
return gx
end

function objgrad!(
nlp::FletcherPenaltyNLP{T, S},
x::AbstractVector,
gx::AbstractVector,
) where {T, S}
function objgrad!(nlp::FletcherPenaltyNLP{T, S}, x::AbstractVector, gx::AbstractVector) where {T, S}
nvar = get_nvar(nlp)
@lencheck nvar x gx
increment!(nlp, :neval_obj)
Expand Down
6 changes: 1 addition & 5 deletions src/parameters.jl
Original file line number Diff line number Diff line change
Expand Up @@ -261,11 +261,7 @@ mutable struct FPSSSolver{
}
end

function FPSSSolver(
nlp::AbstractNLPModel{T, S},
x0::S = nlp.meta.x0;
kwargs...,
) where {T, S}
function FPSSSolver(nlp::AbstractNLPModel{T, S}, x0::S = nlp.meta.x0; kwargs...) where {T, S}
cx0, gx0 = cons(nlp, x0), grad(nlp, x0)
#Tanj: how to handle stopping criteria where tol_check depends on the State?
Fptc(atol, rtol, opt0) =
Expand Down
8 changes: 4 additions & 4 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,14 +71,14 @@ end
x0 = T[-1.2; 1.0]
ℓ, u = zeros(T, 2), 2 * ones(T, 2)
nlp = ADNLPModel(f, x0, ℓ, u, c, zeros(1), zeros(1))

ϵ = eps(T)^T(1 / 4)

ng0 = norm(grad(nlp, nlp.meta.x0))

stp = NLPStopping(nlp)
stats = fps_solve(stp)

@test eltype(stats.solution) == T
@test stats.objective isa T
@test stats.dual_feas isa T
Expand Down

0 comments on commit 2f52220

Please sign in to comment.