Skip to content

Commit

Permalink
errors
Browse files Browse the repository at this point in the history
  • Loading branch information
sshin23 committed Sep 10, 2023
1 parent cdb0239 commit 9ccc605
Show file tree
Hide file tree
Showing 7 changed files with 78 additions and 49 deletions.
7 changes: 5 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@ authors = ["Sungho Shin <sshin@anl.gov>"]
version = "0.4.0"

[deps]
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"

[weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Expand Down Expand Up @@ -34,6 +36,7 @@ oneAPI = "1"
[extras]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
MadNLP = "2621e9c9-9eb4-46b1-8089-e8c72242dfb6"
Expand All @@ -42,8 +45,8 @@ NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71"
NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
Percival = "01435c0c-c90d-11e9-3788-63660f8fbccc"
PowerModels = "c36e90e8-916a-50a6-bd94-075b64ef4655"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Test", "NLPModels", "NLPModelsIpopt", "KernelAbstractions", "CUDA", "AMDGPU", "oneAPI", "MadNLP", "Percival", "PowerModels", "JuMP", "NLPModelsJuMP", "Downloads", "Random"]
test = ["Test", "NLPModels", "NLPModelsIpopt", "KernelAbstractions", "CUDA", "AMDGPU", "oneAPI", "MadNLP", "Percival", "PowerModels", "JuMP", "NLPModelsJuMP", "Downloads", "Random", "ForwardDiff", "SpecialFunctions"]
2 changes: 1 addition & 1 deletion src/gradient.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ function gradient!(y, f, x, adj)
return y
end

function gradient!(y, f, x, p = nothing, adj = one(eltype(y)))
function gradient!(y, f, x, p, adj)
graph = f(p, AdjointNodeSource(x))
drpass(graph, y, adj)
return y
Expand Down
4 changes: 2 additions & 2 deletions src/graph.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ abstract type AbstractSecondAdjointNode end
A source of variable nodes
"""
struct VarSource end
struct VarSource <: AbstractNode end

"""
Var{I}
Expand All @@ -33,7 +33,7 @@ end
A source of parameterized data
"""
struct ParSource end
struct ParSource <: AbstractNode end

"""
ParIndexed{I, J}
Expand Down
17 changes: 9 additions & 8 deletions src/jacobian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -126,28 +126,29 @@ Performs sparse jacobian evalution
"""
function sjacobian!(y1, y2, f, x, adj)
@simd for i in eachindex(f.itr)
@inbounds jrpass(
f.f.f(f.itr[i], AdjointNodeSource(x)),
f.f.comp1,
offset0(f, i),
@inbounds sjacobian!(
y1,
y2,
f.f.f,
f.itr[i],
x,
f.f.comp1,
offset0(f, i),
offset1(f, i),
0,
adj,
)
end
end

function sjacobian!(y1, y2, f, p, x, comp, adj)
function sjacobian!(y1, y2, f, p, x, comp, o0, o1, adj)
graph = f(p, AdjointNodeSource(x))
jrpass(
graph,
comp,
offset0(f, i),
o0,
y1,
y2,
offset1(f, i),
o1,
0,
adj,
)
Expand Down
1 change: 0 additions & 1 deletion src/simdfunction.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ function SIMDFunction(gen::Base.Generator, o0 = 0, o1 = 0, o2 = 0)
p = Par(eltype(gen.iter))
f = gen.f(p)


d = f(Identity(), AdjointNodeSource(nothing))
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN16)
Expand Down
94 changes: 60 additions & 34 deletions test/ADTest/ADTest.jl
Original file line number Diff line number Diff line change
@@ -1,59 +1,82 @@
module ADTest

using ExaModels
using Random, Test, ForwardDiff
using KernelAbstractions, CUDA, AMDGPU, oneAPI
using Random, Test, ForwardDiff, SpecialFunctions

Random.seed!(0)

const BACKENDS = Any[nothing, CPU()]

if CUDA.has_cuda()
push!(BACKENDS, CUDABackend())
@info "testing CUDA"
end

if AMDGPU.has_rocm_gpu()
push!(BACKENDS, ROCBackend())
@info "testing AMDGPU"
end

try
oneAPI.oneL0.zeInit(0)
push!(BACKENDS, oneAPIBackend())
push!(EXCLUDE2, ("percival", oneAPIBackend()))
@info "testing oneAPI"
catch e
end


const FUNCTIONS = [
("function-test-1-1",x->beta(erf(x[1]/x[2]/3.0)+3.0*x[2],erf(x[9])^2)),
("function-test-1-2",x->0*x[1]),
("function-test-1-3",x->beta(cos(log(inv(inv(x[1])))),erfc(tanh(0*x[1])))),
("function-test-1-3",x->beta(cos(log(abs2(inv(inv(x[1])))+1.)),erfc(tanh(0*x[1])))),
("function-test-1-4",x->(0*x[1]^x[3]^1.0+x[1])/x[9]/x[10]),
("function-test-1-5",x->(x[1]+1.)^x[2]*log(x[3])/tanh(x[2])),
("function-test-1-5",x->exp(x[1]+1.)^x[2]*log(abs2(x[3])+3)/tanh(x[2])),
("function-test-1-6",x->beta(2*logbeta(x[1],x[5]),beta(x[2],x[3]))),
("function-test-1-7",x->besselj0(exp(erf(-x[1])))),
("function-test-1-8",x->erfc((x[1]^2/x[2])^x[9]/x[10])),
("function-test-1-8",x->erfc(abs2(x[1]^2/x[2])^x[9]/x[10])),
("function-test-1-9",x->erfc(x[1])^erf(2.5x[2])),
("function-test-1-10",x->sin(1/x[1])),
("function-test-1-13",x->exp(x[2])/cos(x[1])^2+sin(x[1]^2)),
("function-test-1-14",x->airyai(exp(x[1]+x[2]*2.0^8))),
("function-test-1-16",x->sin(x[9]inv(x[1])-x[8]inv(x[2]))),
("function-test-1-19",x->x[1]/log(x[2]^2+9.)),
("function-test-1-21",x->beta(beta(tan(beta(x[1],1)+2.0),cos(sin(x[2]))),x[3])),
("function-test-1-24",x->beta(cos(beta(beta(x[1]^9,x[2]),x[2]*x[3])),sin(x[2]*x[3]/2.0)/1.0)),
("function-test-1-11",x->exp(x[2])/cos(x[1])^2+sin(x[1]^2)),
("function-test-1-12",x->sin(x[9]inv(x[1])-x[8]inv(x[2]))),
("function-test-1-13",x->x[1]/log(x[2]^2+9.)),
("function-test-1-14",x->beta(beta(tan(beta(x[1],1)+2.0),cos(sin(x[2]))),x[3])),
("function-test-1-15",x->beta(cos(beta(beta(x[1]^9,x[2]),x[2]*x[3])),sin(x[2]*x[3]/2.0)/1.0)),
]

function gradient(f, x)
y = similar(y)
T = eltype(x)
y = fill!(similar(x), zero(T))
ExaModels.gradient!(y, (p,x)->f(x), x, nothing, one(T))
return y
end

function sgradient(f, x)
T = eltype(x)

ff = f(ExaModels.VarSource())
d = ff(ExaModels.Identity(), ExaModels.AdjointNodeSource(nothing))
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN16)

a1 = unique(y1)
comp = ExaModels.Compressor(Tuple(findfirst(isequal(i), a1) for i in y1))

n = length(a1)
buffer = similar(x, n)
buffer_I = similar(x, Tuple{Int,Int}, n)

ExaModels.sgradient!(buffer_I, ff, nothing, nothing, comp, 0, NaN32)
ExaModels.sgradient!(buffer, ff, nothing, x, comp, 0, one(T))

y = zeros(length(x))
y[collect(i for (i,j) in buffer_I)] = buffer

return y
end

function sjacobian(f, x)
T = eltype(x)

ff = f(ExaModels.VarSource())
d = ff(ExaModels.Identity(), ExaModels.AdjointNodeSource(nothing))
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN16)

a1 = unique(y1)
comp = ExaModels.Compressor(Tuple(findfirst(isequal(i), a1) for i in y1))

n = length(a1)
buffer = similar(x, n)
buffer_I = similar(x, Int, n)
buffer_J = similar(x, Int, n)

ExaModels.sjacobian!(buffer_I, buffer_J, ff, nothing, nothing, comp, 0, 0, NaN32)
ExaModels.sjacobian!(buffer_I, buffer_J, ff, nothing, x, comp, 0, 0, one(T))

y = zeros(length(x))
y[collect(i for (i,j) in buffer_J)] = buffer

return y
end

function shessian(f, x)
Expand All @@ -64,7 +87,10 @@ function runtests()
for (name, f) in FUNCTIONS
x0 = randn(10)
@testset "$name" begin
@test gradient(f, x0) ForwardDiff.gradient(f, x0) atol=1e-6
g = ForwardDiff.gradient(f, x0)
@test gradient(f, x0) g atol=1e-6
@test sgradient(f, x0) g atol=1e-6
@test sjacobian(f, x0) g atol=1e-6
end
end
end
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ include("ADTest/ADTest.jl")

@testset "ExaModels test" begin
ADTest.runtests()
NLPTest.runtests()
# NLPTest.runtests()
end

0 comments on commit 9ccc605

Please sign in to comment.