From 0abcf8c5e198f3eb16ec1fe7eb1808794e0accf9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:21:25 -0600 Subject: [PATCH] Format .jl files (#53) Co-authored-by: sshin23 --- docs/make.jl | 13 +- docs/src/jump.jl | 5 +- ext/ExaModelsJuMP.jl | 5 +- ext/ExaModelsMOI.jl | 265 +++++++++++++++++------------------- ext/ExaModelsOneAPI.jl | 16 ++- src/gradient.jl | 9 +- src/graph.jl | 4 +- src/hessian.jl | 13 +- src/jacobian.jl | 22 +-- src/nlp.jl | 14 +- src/simdfunction.jl | 2 +- src/utils.jl | 205 +++++++++++++--------------- test/JuMPTest/JuMPTest.jl | 24 +--- test/NLPTest/power.jl | 1 - test/UtilsTest/UtilsTest.jl | 20 +-- test/backends.jl | 1 - test/runtests.jl | 2 +- 17 files changed, 282 insertions(+), 339 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index 6cf79c37..e9732fbd 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -24,8 +24,15 @@ if !(@isdefined _PAGES) end if !(@isdefined _JL_FILENAMES) - const _JL_FILENAMES = - ["guide.jl", "jump.jl", "quad.jl", "distillation.jl", "opf.jl", "gpu.jl", "performance.jl"] + const _JL_FILENAMES = [ + "guide.jl", + "jump.jl", + "quad.jl", + "distillation.jl", + "opf.jl", + "gpu.jl", + "performance.jl", + ] end for jl_filename in _JL_FILENAMES @@ -53,7 +60,7 @@ bib = CitationBibliography(joinpath(@__DIR__, "src", "refs.bib")) makedocs( - plugins=[bib], + plugins = [bib], sitename = "ExaModels.jl", modules = [ExaModels], authors = "Sungho Shin", diff --git a/docs/src/jump.jl b/docs/src/jump.jl index 8ff59f35..efdc2a76 100644 --- a/docs/src/jump.jl +++ b/docs/src/jump.jl @@ -12,7 +12,7 @@ jm = Model() jm, s[i = 1:N-2], 3x[i+1]^3 + 2x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] - - x[i]exp(x[i] - x[i+1]) - 3 == 0.0 + x[i]exp(x[i] - x[i+1]) - 3 == 0.0 ) @objective(jm, Min, sum(100(x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N)) @@ -24,6 +24,3 @@ em = ExaModel(jm) using NLPModelsIpopt result = ipopt(em) - - - diff --git a/ext/ExaModelsJuMP.jl b/ext/ExaModelsJuMP.jl index 5bb2978d..0312e4b1 100644 --- a/ext/ExaModelsJuMP.jl +++ b/ext/ExaModelsJuMP.jl @@ -3,11 +3,8 @@ module ExaModelsJuMP import ExaModels import JuMP -function ExaModels.ExaModel(jm::JuMP.GenericModel{T}; backend = nothing) where T +function ExaModels.ExaModel(jm::JuMP.GenericModel{T}; backend = nothing) where {T} return ExaModels.ExaModel(jm.moi_backend; backend = backend) end end # module ExaModelsJuMP - - - diff --git a/ext/ExaModelsMOI.jl b/ext/ExaModelsMOI.jl index 3c2e2e49..ac75004e 100644 --- a/ext/ExaModelsMOI.jl +++ b/ext/ExaModelsMOI.jl @@ -6,49 +6,38 @@ import MathOptInterface const MOI = MathOptInterface const MOIU = MathOptInterface.Utilities -const SUPPORTED_OBJ_TYPE = [ - :scalar_nonlinear, - :scalar_affine, - :scalar_quadratic, - :single_variable -] -const UNSUPPORTED_OBJ_TYPE = [ - :vector_nonlinear, - :vector_affine, - :vector_quadratic, - :vector_variables, -] - -const SUPPORTED_CONS_TYPE = [ - :moi_scalarnonlinearfunction, - :moi_scalaraffinefunction, - :moi_scalarquadraticfunction -] +const SUPPORTED_OBJ_TYPE = + [:scalar_nonlinear, :scalar_affine, :scalar_quadratic, :single_variable] +const UNSUPPORTED_OBJ_TYPE = + [:vector_nonlinear, :vector_affine, :vector_quadratic, :vector_variables] + +const SUPPORTED_CONS_TYPE = + [:moi_scalarnonlinearfunction, :moi_scalaraffinefunction, :moi_scalarquadraticfunction] const UNSUPPORTED_CONS_TYPE = [ :moi_vectoraffinefunction, :moi_vectornonlinearfunction, :moi_vectorquadraticfunction, - :moi_vectorofvariables + :moi_vectorofvariables, ] """ Abstract data structure for storing expression tree and data arrays -""" +""" abstract type AbstractBin end struct Bin{E,P,I} <: AbstractBin head::E data::P - inner::I + inner::I end struct BinNull <: AbstractBin end function update_bin!(bin, e, p) - if _update_bin!(bin,e,p) # if update succeeded, return the original bin + if _update_bin!(bin, e, p) # if update succeeded, return the original bin return bin else # if update has failed, return a new bin - return Bin(e,[p],bin) + return Bin(e, [p], bin) end end function _update_bin!(bin::Bin{E,P,I}, e, p) where {E,P,I} @@ -63,14 +52,17 @@ function _update_bin!(::BinNull, e, p) return false end -float_type(::MOIU.Model{T}) where T = T +float_type(::MOIU.Model{T}) where {T} = T -function ExaModels.ExaModel(moi_backend::MathOptInterface.Utilities.CachingOptimizer; backend = nothing) +function ExaModels.ExaModel( + moi_backend::MathOptInterface.Utilities.CachingOptimizer; + backend = nothing, +) jm_cache = moi_backend.model_cache - - T = float_type(moi_backend.model_cache.model) - + + T = float_type(moi_backend.model_cache.model) + # create exacore; c = ExaModels.ExaCore(T, backend) @@ -78,18 +70,18 @@ function ExaModels.ExaModel(moi_backend::MathOptInterface.Utilities.CachingOptim jvars = jm_cache.model.variables lvar = jvars.lower uvar = jvars.upper - x0 = fill!(similar(lvar), 0.) + x0 = fill!(similar(lvar), 0.0) nvar = length(lvar) if haskey(jm_cache.varattr, MOI.VariablePrimalStart()) - for (k,v) in jm_cache.varattr[MOI.VariablePrimalStart()] + for (k, v) in jm_cache.varattr[MOI.VariablePrimalStart()] x0[k.value] = v end end - v = ExaModels.variable(c, nvar; start = x0, lvar= lvar, uvar = uvar) + v = ExaModels.variable(c, nvar; start = x0, lvar = lvar, uvar = uvar) # objective jobjs = jm_cache.model.objective - + bin = BinNull() for field in SUPPORTED_OBJ_TYPE @@ -101,9 +93,9 @@ function ExaModels.ExaModel(moi_backend::MathOptInterface.Utilities.CachingOptim error("$field type objective is not supported") end end - + build_objective(c, bin) - + # constraint jcons = jm_cache.model.constraints @@ -125,7 +117,7 @@ function ExaModels.ExaModel(moi_backend::MathOptInterface.Utilities.CachingOptim y0 = fill!(similar(lcon), zero(T)) cons = ExaModels.constraint(c, offset; start = y0, lcon = lcon, ucon = ucon) build_constraint!(c, cons, bin) - + return ExaModels.ExaModel(c) end @@ -142,57 +134,57 @@ end function _exafy_con(cons::MOIU.VectorOfConstraints, bin, offset, lcon, ucon) l = length(cons.constraints) - + resize!(lcon, offset + l) resize!(ucon, offset + l) - - for (i,(c,e)) in cons.constraints - + + for (i, (c, e)) in cons.constraints + _exafy_con_update_vector(i, e, lcon, ucon, offset) if c isa MOI.ScalarAffineFunction for mm in c.terms - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!( bin, - ExaModels.ParIndexed(ExaModels.ParSource(), length(p)+1) => e, - (p..., offset + i.value) + ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e, + (p..., offset + i.value), ) # augment data with constraint index end bin = update_bin!(bin, ExaModels.Null(c.constant), (1,)) elseif c isa MOI.ScalarQuadraticFunction for mm in c.affine_terms - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!( bin, - ExaModels.ParIndexed(ExaModels.ParSource(), length(p)+1)=>e, - (p..., offset + i.value) + ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e, + (p..., offset + i.value), ) # augment data with constraint index end for mm in c.quadratic_terms - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!( bin, - ExaModels.ParIndexed(ExaModels.ParSource(), length(p)+1)=>e, - (p..., offset + i.value) + ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e, + (p..., offset + i.value), ) # augment data with constraint index end bin = update_bin!(bin, ExaModels.Null(c.constant), (1,)) - elseif c isa MOI.ScalarNonlinearFunction && c.head == :+; # TODO maybe: also * Real, -, etc. + elseif c isa MOI.ScalarNonlinearFunction && c.head == :+ # TODO maybe: also * Real, -, etc. for mm in c.args - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!( bin, - ExaModels.ParIndexed(ExaModels.ParSource(), length(p)+1)=>e, - (p..., offset + i.value) + ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e, + (p..., offset + i.value), ) # augment data with constraint index end else e, p = _exafy(c) bin = update_bin!( bin, - ExaModels.ParIndexed(ExaModels.ParSource(), length(p)+1)=>e, - (p..., offset + i.value) + ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e, + (p..., offset + i.value), ) # augment data with constraint index end end @@ -205,24 +197,24 @@ function _exafy_con(::Nothing, bin, offset, lcon, ucon) return bin, offset end -function _exafy_con_update_vector(i, e::MOI.Interval{T}, lcon, ucon, offset) where T - lcon[offset + i.value] = e.lower - ucon[offset + i.value] = e.upper +function _exafy_con_update_vector(i, e::MOI.Interval{T}, lcon, ucon, offset) where {T} + lcon[offset+i.value] = e.lower + ucon[offset+i.value] = e.upper end -function _exafy_con_update_vector(i, e::MOI.LessThan{T}, lcon, ucon, offset) where T - lcon[offset + i.value] = -Inf - ucon[offset + i.value] = e.upper +function _exafy_con_update_vector(i, e::MOI.LessThan{T}, lcon, ucon, offset) where {T} + lcon[offset+i.value] = -Inf + ucon[offset+i.value] = e.upper end -function _exafy_con_update_vector(i, e::MOI.GreaterThan{T}, lcon, ucon, offset) where T - ucon[offset + i.value] = Inf - lcon[offset + i.value] = e.lower +function _exafy_con_update_vector(i, e::MOI.GreaterThan{T}, lcon, ucon, offset) where {T} + ucon[offset+i.value] = Inf + lcon[offset+i.value] = e.lower end -function _exafy_con_update_vector(i, e::MOI.EqualTo{T}, lcon, ucon, offset) where T - lcon[offset + i.value] = e.value - ucon[offset + i.value] = e.value +function _exafy_con_update_vector(i, e::MOI.EqualTo{T}, lcon, ucon, offset) where {T} + lcon[offset+i.value] = e.value + ucon[offset+i.value] = e.value end @@ -235,7 +227,7 @@ function build_constraint!(c, cons, ::BinNull) end function build_objective(c, bin) build_objective(c, bin.inner) - ExaModels.objective(c, bin.head, bin.data) + ExaModels.objective(c, bin.head, bin.data) end function build_objective(c, ::BinNull) end @@ -245,53 +237,53 @@ function exafy_obj(o::Nothing, bin) end function exafy_obj(o::MOI.VariableIndex, bin) - e,p = _exafy(o) + e, p = _exafy(o) return update_bin!(bin, e, p) end -function exafy_obj(o::MOI.ScalarQuadraticFunction{T}, bin) where T +function exafy_obj(o::MOI.ScalarQuadraticFunction{T}, bin) where {T} for m in o.affine_terms - e,p = _exafy(m) + e, p = _exafy(m) bin = update_bin!(bin, e, p) end for m in o.quadratic_terms - e,p = _exafy(m) + e, p = _exafy(m) bin = update_bin!(bin, e, p) end - + return update_bin!(bin, ExaModels.Null(o.constant), (1,)) end - -function exafy_obj(o::MOI.ScalarAffineFunction{T}, bin) where T + +function exafy_obj(o::MOI.ScalarAffineFunction{T}, bin) where {T} for m in o.terms - e,p = _exafy(m) + e, p = _exafy(m) bin = update_bin!(bin, e, p) end - + return update_bin!(bin, ExaModels.Null(o.constant), (1,)) end -function exafy_obj(o::MOI.ScalarNonlinearFunction, bin) - constant = 0. - if o.head == :+; +function exafy_obj(o::MOI.ScalarNonlinearFunction, bin) + constant = 0.0 + if o.head == :+ for m in o.args if m isa MOI.ScalarAffineFunction for mm in m.affine_terms - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!(bin, e, p) end elseif m isa MOI.ScalarQuadraticFunction for mm in m.affine_terms - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!(bin, e, p) end for mm in m.quadratic_terms - e,p = _exafy(mm) + e, p = _exafy(mm) bin = update_bin!(bin, e, p) end constant += m.constant else - e,p = _exafy(m) + e, p = _exafy(m) bin = update_bin!(bin, e, p) end end @@ -308,91 +300,86 @@ function _exafy(v::MOI.VariableIndex, p = ()) return ExaModels.Var(i), (p..., v.value) end -function _exafy(i::R, p) where R <: Real - return ExaModels.ParIndexed( ExaModels.ParSource(), length(p) + 1), (p..., i) +function _exafy(i::R, p) where {R<:Real} + return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1), (p..., i) end function _exafy(e::MOI.ScalarNonlinearFunction, p = ()) - return op(e.head)( - ( - begin - c, p = _exafy(e,p) - c - end - for e in e.args - )... - ), p + return op(e.head)(( + begin + c, p = _exafy(e, p) + c + end for e in e.args + )...), p end -function _exafy(e::MOI.ScalarAffineFunction{T}, p = ()) where T - return sum( - begin - c1, p = _exafy(term, p) - c1 - end - for term in e.terms) + ExaModels.ParIndexed( ExaModels.ParSource(), length(p) + 1), (p..., e.constant) +function _exafy(e::MOI.ScalarAffineFunction{T}, p = ()) where {T} + return sum(begin + c1, p = _exafy(term, p) + c1 + end for term in e.terms) + ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1), + (p..., e.constant) end -function _exafy(e::MOI.ScalarAffineTerm{T}, p = ()) where T +function _exafy(e::MOI.ScalarAffineTerm{T}, p = ()) where {T} c1, p = _exafy(e.variable, p) - return *(c1, ExaModels.ParIndexed( ExaModels.ParSource(), length(p) + 1)), (p..., e.coefficient) + return *(c1, ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1)), + (p..., e.coefficient) end -function _exafy(e::MOI.ScalarQuadraticFunction{T}, p = ()) where T - t = ExaModels.ParIndexed( ExaModels.ParSource(), length(p) + 1) +function _exafy(e::MOI.ScalarQuadraticFunction{T}, p = ()) where {T} + t = ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) p = (p..., e.constant) if !isempty(e.affine_terms) - t += sum( - begin - c1, p = _exafy(term, p) - c1 - end - for term in e.affine_terms) + t += sum(begin + c1, p = _exafy(term, p) + c1 + end for term in e.affine_terms) end - + if !isempty(e.quadratic_terms) - t += sum( - begin - c1, p = _exafy(term, p) - c1 - end - for term in e.quadratic_terms) + t += sum(begin + c1, p = _exafy(term, p) + c1 + end for term in e.quadratic_terms) end - + return t, p end -function _exafy(e::MOI.ScalarQuadraticTerm{T}, p = ()) where T - +function _exafy(e::MOI.ScalarQuadraticTerm{T}, p = ()) where {T} + if e.variable_1 == e.variable_2 v, p = _exafy(e.variable_1, p) - return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) * abs2(v), (p..., e.coefficient / 2) # it seems that MOI assumes this by default + return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) * abs2(v), + (p..., e.coefficient / 2) # it seems that MOI assumes this by default else v1, p = _exafy(e.variable_1, p) v2, p = _exafy(e.variable_2, p) - return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) * v1 * v2, (p..., e.coefficient) + return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) * v1 * v2, + (p..., e.coefficient) end end # eval can be a performance killer -- we want to explicitly include symbols for frequently used operations. function op(s::Symbol) - if s == :+; - return +; - elseif s == :-; - return -; - elseif s == :*; - return *; - elseif s == :/; - return /; - elseif s == :^; - return ^; - elseif s == :sin; - return sin; - elseif s == :cos; - return cos; - elseif s == :exp; - return exp; + if s == :+ + return + + elseif s == :- + return - + elseif s == :* + return * + elseif s == :/ + return / + elseif s == :^ + return ^ + elseif s == :sin + return sin + elseif s == :cos + return cos + elseif s == :exp + return exp else return eval(s) end diff --git a/ext/ExaModelsOneAPI.jl b/ext/ExaModelsOneAPI.jl index 7b026ba4..679c7f37 100644 --- a/ext/ExaModelsOneAPI.jl +++ b/ext/ExaModelsOneAPI.jl @@ -6,7 +6,12 @@ ExaModels.ExaCore(backend::oneAPI.oneAPIBackend) = ExaModels.ExaCore(Float64, ba ExaModels.ExaCore(T, backend::oneAPI.oneAPIBackend) = ExaModels.ExaCore(x0 = oneAPI.zeros(T, 0), backend = backend) -function ExaModels.append!(backend, a::A, b::Base.Generator{UnitRange{I}}, lb) where {I, A<:oneAPI.oneVector} +function ExaModels.append!( + backend, + a::A, + b::Base.Generator{UnitRange{I}}, + lb, +) where {I,A<:oneAPI.oneVector} la = length(a) aa = similar(a, la + lb) copyto!(view(aa, 1:la), a) @@ -14,7 +19,7 @@ function ExaModels.append!(backend, a::A, b::Base.Generator{UnitRange{I}}, lb) w return aa end -function ExaModels.append!(backend, a::A, b::Base.Generator, lb) where { A<:oneAPI.oneVector} +function ExaModels.append!(backend, a::A, b::Base.Generator, lb) where {A<:oneAPI.oneVector} la = length(a) aa = similar(a, la + lb) copyto!(view(aa, 1:la), a) @@ -23,7 +28,12 @@ function ExaModels.append!(backend, a::A, b::Base.Generator, lb) where { A<:oneA return aa end -function ExaModels.append!(backend, a::A, b::V, lb) where {A<:oneAPI.oneVector, V <: AbstractVector} +function ExaModels.append!( + backend, + a::A, + b::V, + lb, +) where {A<:oneAPI.oneVector,V<:AbstractVector} la = length(a) aa = similar(a, la + lb) copyto!(view(aa, 1:la), a) diff --git a/src/gradient.jl b/src/gradient.jl index 9eded07e..c41c0e66 100644 --- a/src/gradient.jl +++ b/src/gradient.jl @@ -61,7 +61,14 @@ Performs dsparse gradient evaluation via the reverse pass on the computation (su - `cnt`: counter - `adj`: adjoint propagated up to the current node """ -@inline function grpass(d::D, comp, y, o1, cnt, adj) where {D<: Union{AdjointNull, ParIndexed}} +@inline function grpass( + d::D, + comp, + y, + o1, + cnt, + adj, +) where {D<:Union{AdjointNull,ParIndexed}} return cnt end @inline function grpass(d::D, comp, y, o1, cnt, adj) where {D<:AdjointNode1} diff --git a/src/graph.jl b/src/graph.jl index 5491562d..7602661c 100644 --- a/src/graph.jl +++ b/src/graph.jl @@ -296,7 +296,7 @@ end @inbounds SecondAdjointNodeVar(i, x.inner[i]) -@inline (v::Null{Nothing})(i, x::V) where {T, V <: AbstractVector{T}} = zero(T) -@inline (v::Null{N})(i, x::V) where {N, T, V <: AbstractVector{T}} = T(v.value) +@inline (v::Null{Nothing})(i, x::V) where {T,V<:AbstractVector{T}} = zero(T) +@inline (v::Null{N})(i, x::V) where {N,T,V<:AbstractVector{T}} = T(v.value) @inline (v::Null{N})(i, x::AdjointNodeSource{T}) where {N,T} = AdjointNull() @inline (v::Null{N})(i, x::SecondAdjointNodeSource{T}) where {N,T} = SecondAdjointNull() diff --git a/src/hessian.jl b/src/hessian.jl index 5fa691e2..dc638ea5 100644 --- a/src/hessian.jl +++ b/src/hessian.jl @@ -309,16 +309,7 @@ Performs sparse hessian evaluation (`d²f/dx²` portion) via the reverse pass on - `adj`: second adjoint propagated up to the current node """ -@inline function hrpass( - t::SecondAdjointNull, - comp, - y1, - y2, - o2, - cnt, - adj, - adj2, -) +@inline function hrpass(t::SecondAdjointNull, comp, y1, y2, o2, cnt, adj, adj2) cnt end @inline function hrpass( @@ -658,7 +649,7 @@ function shessian!(y1, y2, f, x, adj1s::V, adj2) where {V<:AbstractVector} end end -function shessian!(y1, y2, f, p, x, comp, o2, adj1, adj2) +function shessian!(y1, y2, f, p, x, comp, o2, adj1, adj2) graph = f(p, SecondAdjointNodeSource(x)) hrpass0(graph, comp, y1, y2, o2, 0, adj1, adj2) end diff --git a/src/jacobian.jl b/src/jacobian.jl index 84470276..e3f9b0be 100644 --- a/src/jacobian.jl +++ b/src/jacobian.jl @@ -13,28 +13,10 @@ Performs sparse jacobian evaluation via the reverse pass on the computation (sub - `cnt`: counter - `adj`: adjoint propagated up to the current node """ -@inline function jrpass( - d::AdjointNull, - comp, - i, - y1, - y2, - o1, - cnt, - adj, -) +@inline function jrpass(d::AdjointNull, comp, i, y1, y2, o1, cnt, adj) return cnt end -@inline function jrpass( - d::D, - comp, - i, - y1, - y2, - o1, - cnt, - adj, -) where {D<:AdjointNode1} +@inline function jrpass(d::D, comp, i, y1, y2, o1, cnt, adj) where {D<:AdjointNode1} cnt = jrpass(d.inner, comp, i, y1, y2, o1, cnt, adj * d.y) return cnt end diff --git a/src/nlp.jl b/src/nlp.jl index 4f602f5e..cd9316e4 100644 --- a/src/nlp.jl +++ b/src/nlp.jl @@ -249,7 +249,7 @@ function append!(backend, a, b::Base.Generator, lb) return a end -function append!(backend, a, b::Base.Generator{UnitRange{I}}, lb) where I +function append!(backend, a, b::Base.Generator{UnitRange{I}}, lb) where {I} la = length(a) resize!(a, la + lb) @@ -352,7 +352,7 @@ Objective function objective(c::C, gen) where {C<:ExaCore} f = SIMDFunction(gen, c.nobj, c.nnzg, c.nnzh) pars = gen.iter - + _objective(c, f, pars) end @@ -361,7 +361,7 @@ end Adds objective terms specified by a `expr` and `pars` to `core`, and returns an `Objective` object. """ -function objective(c, expr::N, pars) where N <: AbstractNode +function objective(c, expr::N, pars) where {N<:AbstractNode} f = _simdfunction(expr, c.nobj, c.nnzg, c.nnzh) _objective(c, f, pars) @@ -413,7 +413,7 @@ function constraint( f = SIMDFunction(gen, c.ncon, c.nnzj, c.nnzh) pars = gen.iter - + _constraint(c, f, pars, start, lcon, ucon) end @@ -429,10 +429,10 @@ function constraint( start = zero(T), lcon = zero(T), ucon = zero(T), -) where {T, C<:ExaCore{T}, N <: AbstractNode} +) where {T,C<:ExaCore{T},N<:AbstractNode} f = _simdfunction(expr, c.ncon, c.nnzj, c.nnzh) - + _constraint(c, f, pars, start, lcon, ucon) end @@ -450,7 +450,7 @@ function constraint( ) where {T,C<:ExaCore{T}} f = _simdfunction(Null(), c.ncon, c.nnzj, c.nnzh) - + _constraint(c, f, 1:n, start, lcon, ucon) end diff --git a/src/simdfunction.jl b/src/simdfunction.jl index 8cff7e86..5a97d9bb 100644 --- a/src/simdfunction.jl +++ b/src/simdfunction.jl @@ -38,7 +38,7 @@ Returns a `SIMDFunction` using the `gen`. function SIMDFunction(gen::Base.Generator, o0 = 0, o1 = 0, o2 = 0) f = gen.f(Par(eltype(gen.iter))) - + _simdfunction(f, o0, o1, o2) end diff --git a/src/utils.jl b/src/utils.jl index d09da55e..16c7a123 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -18,7 +18,7 @@ Base.@kwdef mutable struct CallbackStats end struct TimedNLPModel{T,VT,I<:NLPModels.AbstractNLPModel{T,VT}} <: - NLPModels.AbstractNLPModel{T,VT} + NLPModels.AbstractNLPModel{T,VT} inner::I meta::NLPModels.AbstractNLPModelMeta{T,VT} stats::CallbackStats @@ -37,7 +37,7 @@ function NLPModels.jac_structure!( m::M, rows::V, cols::V, - ) where {M<:TimedNLPModel,V<:AbstractVector} +) where {M<:TimedNLPModel,V<:AbstractVector} m.stats.jac_structure_cnt += 1 t = time() @@ -49,7 +49,7 @@ function NLPModels.hess_structure!( m::M, rows::V, cols::V, - ) where {M<:TimedNLPModel,V<:AbstractVector} +) where {M<:TimedNLPModel,V<:AbstractVector} m.stats.hess_structure_cnt += 1 t = time() @@ -95,7 +95,7 @@ function NLPModels.hess_coord!( y::AbstractVector, hess::AbstractVector; obj_weight = one(eltype(x)), - ) +) m.stats.hess_coord_cnt += 1 t = time() @@ -124,144 +124,133 @@ Base.show(io::IO, ::MIME"text/plain", e::TimedNLPModel) = Base.print(io, e); # CompressedNLPModels struct CompressedNLPModel{ - T, - VT <: AbstractVector{T}, - VI <: AbstractVector{Int}, - VI2 <: AbstractVector{Tuple{Tuple{Int, Int}, Int}}, - M <: NLPModels.AbstractNLPModel{T, VT}, - } <: NLPModels.AbstractNLPModel{T, VT} - - inner::M - jptr::VI - jsparsity::VI2 - hptr::VI - hsparsity::VI2 - buffer::VT - - meta::NLPModels.NLPModelMeta{T, VT} - counters::NLPModels.Counters + T, + VT<:AbstractVector{T}, + VI<:AbstractVector{Int}, + VI2<:AbstractVector{Tuple{Tuple{Int,Int},Int}}, + M<:NLPModels.AbstractNLPModel{T,VT}, +} <: NLPModels.AbstractNLPModel{T,VT} + + inner::M + jptr::VI + jsparsity::VI2 + hptr::VI + hsparsity::VI2 + buffer::VT + + meta::NLPModels.NLPModelMeta{T,VT} + counters::NLPModels.Counters end function getptr(array) - return push!( - pushfirst!( - findall( - _is_sparsity_not_equal.(@view(array[1:end-1]), @view(array[2:end])), - ) .+= 1, - 1, - ), - length(array) + 1, - ) + return push!( + pushfirst!( + findall(_is_sparsity_not_equal.(@view(array[1:end-1]), @view(array[2:end]))) .+= + 1, + 1, + ), + length(array) + 1, + ) end -_is_sparsity_not_equal(a,b) = first(a) != first(b) +_is_sparsity_not_equal(a, b) = first(a) != first(b) function CompressedNLPModel(m) - nnzj = NLPModels.get_nnzj(m) - Ibuffer = Vector{Int}(undef, nnzj) - Jbuffer = Vector{Int}(undef, nnzj) - NLPModels.jac_structure!(m, Ibuffer, Jbuffer) - - jsparsity = map( - (k, i, j) -> ((j,i), k), - 1:nnzj, - Ibuffer, - Jbuffer, - ) - sort!(jsparsity; lt = (a,b) -> a[1] < b[1]) - jptr = getptr(jsparsity) - - nnzh = NLPModels.get_nnzh(m) - resize!(Ibuffer, nnzh) - resize!(Jbuffer, nnzh) - NLPModels.hess_structure!(m, Ibuffer, Jbuffer) - - hsparsity = map( - (k, i, j) -> ((j,i), k), - 1:nnzh, - Ibuffer, - Jbuffer, - ) - sort!(hsparsity; lt = (a,b) -> a[1] < b[1]) - hptr = getptr(hsparsity) - - buffer = similar(m.meta.x0, max(nnzj, nnzh)) - - meta = NLPModels.NLPModelMeta( - m.meta.nvar, - ncon = m.meta.ncon, - nnzj = length(jptr)-1, - nnzh = length(hptr)-1, - x0 = m.meta.x0, - lvar = m.meta.lvar, - uvar = m.meta.uvar, - y0 = m.meta.y0, - lcon = m.meta.lcon, - ucon = m.meta.ucon, - ) - - counters = NLPModels.Counters() - - return CompressedNLPModel( - m, - jptr, - jsparsity, - hptr, - hsparsity, - buffer, - meta, - counters, - ) + nnzj = NLPModels.get_nnzj(m) + Ibuffer = Vector{Int}(undef, nnzj) + Jbuffer = Vector{Int}(undef, nnzj) + NLPModels.jac_structure!(m, Ibuffer, Jbuffer) + + jsparsity = map((k, i, j) -> ((j, i), k), 1:nnzj, Ibuffer, Jbuffer) + sort!(jsparsity; lt = (a, b) -> a[1] < b[1]) + jptr = getptr(jsparsity) + + nnzh = NLPModels.get_nnzh(m) + resize!(Ibuffer, nnzh) + resize!(Jbuffer, nnzh) + NLPModels.hess_structure!(m, Ibuffer, Jbuffer) + + hsparsity = map((k, i, j) -> ((j, i), k), 1:nnzh, Ibuffer, Jbuffer) + sort!(hsparsity; lt = (a, b) -> a[1] < b[1]) + hptr = getptr(hsparsity) + + buffer = similar(m.meta.x0, max(nnzj, nnzh)) + + meta = NLPModels.NLPModelMeta( + m.meta.nvar, + ncon = m.meta.ncon, + nnzj = length(jptr) - 1, + nnzh = length(hptr) - 1, + x0 = m.meta.x0, + lvar = m.meta.lvar, + uvar = m.meta.uvar, + y0 = m.meta.y0, + lcon = m.meta.lcon, + ucon = m.meta.ucon, + ) + + counters = NLPModels.Counters() + + return CompressedNLPModel(m, jptr, jsparsity, hptr, hsparsity, buffer, meta, counters) end function NLPModels.obj(m::CompressedNLPModel, x::AbstractVector) - NLPModels.obj(m.inner, x) + NLPModels.obj(m.inner, x) end function NLPModels.grad!(m::CompressedNLPModel, x::AbstractVector, y::AbstractVector) - NLPModels.grad!(m.inner, x, y) + NLPModels.grad!(m.inner, x, y) end function NLPModels.cons!(m::CompressedNLPModel, x::AbstractVector, g::AbstractVector) - NLPModels.cons!(m.inner, x, g) + NLPModels.cons!(m.inner, x, g) end function NLPModels.jac_coord!(m::CompressedNLPModel, x::AbstractVector, j::AbstractVector) - NLPModels.jac_coord!(m.inner, x, m.buffer) - _compress!(j, m.buffer, m.jptr, m.jsparsity) + NLPModels.jac_coord!(m.inner, x, m.buffer) + _compress!(j, m.buffer, m.jptr, m.jsparsity) end -function NLPModels.hess_coord!(m::CompressedNLPModel, x::AbstractVector, y::AbstractVector, h::AbstractVector; obj_weight = 1.0) - NLPModels.hess_coord!(m.inner, x, y, m.buffer; obj_weight = obj_weight) - _compress!(h, m.buffer, m.hptr, m.hsparsity) +function NLPModels.hess_coord!( + m::CompressedNLPModel, + x::AbstractVector, + y::AbstractVector, + h::AbstractVector; + obj_weight = 1.0, +) + NLPModels.hess_coord!(m.inner, x, y, m.buffer; obj_weight = obj_weight) + _compress!(h, m.buffer, m.hptr, m.hsparsity) end -function NLPModels.jac_structure!(m::CompressedNLPModel, I::AbstractVector, J::AbstractVector) - _structure!(I, J, m.jptr, m.jsparsity) +function NLPModels.jac_structure!( + m::CompressedNLPModel, + I::AbstractVector, + J::AbstractVector, +) + _structure!(I, J, m.jptr, m.jsparsity) end -function NLPModels.hess_structure!(m::CompressedNLPModel, I::AbstractVector, J::AbstractVector) - _structure!(I, J, m.hptr, m.hsparsity) +function NLPModels.hess_structure!( + m::CompressedNLPModel, + I::AbstractVector, + J::AbstractVector, +) + _structure!(I, J, m.hptr, m.hsparsity) end function _compress!(V, buffer, ptr, sparsity) fill!(V, zero(eltype(V))) - @simd for i in 1:length(ptr)-1 - for j in ptr[i]:ptr[i+1]-1 + @simd for i = 1:length(ptr)-1 + for j = ptr[i]:ptr[i+1]-1 V[i] += buffer[sparsity[j][2]] end - end + end end function _structure!(I, J, ptr, sparsity) - @simd for i in 1:length(ptr)-1 - J[i], I[i] = sparsity[ptr[i]][1] - end + @simd for i = 1:length(ptr)-1 + J[i], I[i] = sparsity[ptr[i]][1] + end end export TimedNLPModel, CompressedNLPModel - - - - - diff --git a/test/JuMPTest/JuMPTest.jl b/test/JuMPTest/JuMPTest.jl index 9c8537f3..e35b3f39 100644 --- a/test/JuMPTest/JuMPTest.jl +++ b/test/JuMPTest/JuMPTest.jl @@ -5,20 +5,8 @@ using Test, JuMP, ExaModels, PowerModels, NLPModelsIpopt, ..NLPTest import ..BACKENDS const JUMP_INTERFACE_INSTANCES = [ - ( - :jump_luksan_vlcek_model, - [ - 3, - 10 - ] - ), - ( - :jump_ac_power_model, - [ - "pglib_opf_case3_lmbd.m", - "pglib_opf_case14_ieee.m" - ] - ), + (:jump_luksan_vlcek_model, [3, 10]), + (:jump_ac_power_model, ["pglib_opf_case3_lmbd.m", "pglib_opf_case14_ieee.m"]), ] function jump_luksan_vlcek_model(N) @@ -186,17 +174,15 @@ function runtests() for backend in BACKENDS @testset "$backend" begin - m = WrapperNLPModel( - ExaModel(jm; backend=backend) - ) + m = WrapperNLPModel(ExaModel(jm; backend = backend)) result = ipopt(m; print_level = 0) - + @test sol ≈ result.solution atol = 1e-6 end end end end - end + end end end diff --git a/test/NLPTest/power.jl b/test/NLPTest/power.jl index d042b096..7382c3a1 100644 --- a/test/NLPTest/power.jl +++ b/test/NLPTest/power.jl @@ -481,4 +481,3 @@ function jump_ac_power_model(backend, filename) jm, vars = _jump_ac_power_model(backend, filename) return MathOptNLPModel(jm) end - diff --git a/test/UtilsTest/UtilsTest.jl b/test/UtilsTest/UtilsTest.jl index 13cff72a..4b3b10c5 100644 --- a/test/UtilsTest/UtilsTest.jl +++ b/test/UtilsTest/UtilsTest.jl @@ -4,22 +4,13 @@ using Test import ExaModels, NLPModelsIpopt import ..NLPTest: _exa_luksan_vlcek_model -UTIL_MODELS = [ - ExaModels.TimedNLPModel, - ExaModels.CompressedNLPModel -] - -FIELDS = [ - :solution, - :multipliers, - :multipliers_L, - :multipliers_U, - :objective -] +UTIL_MODELS = [ExaModels.TimedNLPModel, ExaModels.CompressedNLPModel] + +FIELDS = [:solution, :multipliers, :multipliers_L, :multipliers_U, :objective] function runtests() @testset "Utils tests" begin - m, ~ = _exa_luksan_vlcek_model(nothing,3) + m, ~ = _exa_luksan_vlcek_model(nothing, 3) result = NLPModelsIpopt.ipopt(m; print_level = 0) @@ -29,7 +20,8 @@ function runtests() @testset "$util_model" begin for field in FIELDS @testset "$field" begin - @test getfield(util_result, field) ≈ getfield(result, field) atol = 1e-6 + @test getfield(util_result, field) ≈ getfield(result, field) atol = + 1e-6 end end end diff --git a/test/backends.jl b/test/backends.jl index 0cbeac86..74961a54 100644 --- a/test/backends.jl +++ b/test/backends.jl @@ -21,4 +21,3 @@ try catch e @info "excluding oneAPI" end - diff --git a/test/runtests.jl b/test/runtests.jl index 663d595d..48c89bff 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -13,7 +13,7 @@ include("UtilsTest/UtilsTest.jl") @testset "ExaModels test" begin @info "Running AD Test" ADTest.runtests() - + @info "Running NLP Test" NLPTest.runtests()