Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactoring of ExaPF #214

Closed
wants to merge 34 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
ebf3921
implement network_basis kernel
frapac Oct 29, 2021
cc51a45
fix code on GPU
frapac Oct 29, 2021
230cb83
polar: better accumulation for line flow constraints
frapac Dec 22, 2021
803614a
clean code before PR
frapac Dec 23, 2021
98e0562
fix tests on GPU
frapac Dec 23, 2021
fb688c5
hotfix
frapac Dec 30, 2021
18b517c
reimplement all constraints with nonlinear basis
frapac Jan 5, 2022
600ee8a
reimplement Jacobian code
frapac Jan 6, 2022
b2bc6fa
reimplement Hessian
frapac Jan 6, 2022
48ad8a0
add proper intermediate state
frapac Jan 6, 2022
bf20c2a
update Jacobian and Hessian Autodiff to work with mapping
frapac Jan 6, 2022
cd6b960
fix wrong sign in Hessian computation
frapac Jan 6, 2022
ca72e5f
add MultiExpressions for aggregations
frapac Jan 7, 2022
70be81c
rewrite transfer function with mapping
frapac Jan 7, 2022
97061ba
update powerflow solver
frapac Jan 7, 2022
794c505
[polar] rewrite tests
frapac Jan 8, 2022
5d5572b
port new code on GPU
frapac Jan 8, 2022
35cedbc
add expressions for Matpower Hessians
frapac Jan 10, 2022
a692933
implement FullHessian with autodiff
frapac Jan 10, 2022
1c14305
fix FullHessian on GPU
frapac Jan 10, 2022
03231eb
clean definition of basis with ComposedExpressions
frapac Jan 10, 2022
99726ea
update tests
frapac Jan 11, 2022
926665b
fix Jacobian for PowerFlowBounds expression
frapac Jan 11, 2022
3fdf7ff
minor fixes
frapac Jan 12, 2022
bd62ac6
remove code
frapac Jan 12, 2022
05b8708
fix tests on CPU
frapac Jan 12, 2022
6c78a5b
[skip ci] clean CUDA deps
frapac Jan 12, 2022
81ad172
fix tests on GPU
frapac Jan 12, 2022
9878023
various performance fixes
frapac Jan 14, 2022
990eca8
add missing functions for Argos
frapac Jan 14, 2022
88de7da
clean functions in matpower.jl
frapac Jan 14, 2022
faa13c3
[skip ci] another fixes on GPU
frapac Jan 17, 2022
8bd3491
fix: initiate properly sparsity pattern for Jacobians
frapac Jan 18, 2022
d1b2dea
add support for multi-generators
frapac Jan 18, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ version = "0.6.0"

[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
CUDAKernels = "72cfdca4-0801-4ab0-bf6a-d52aa10adc57"
FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
Expand Down
60 changes: 25 additions & 35 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,61 +40,51 @@ function run_benchmark(datafile, device, linsolver)
ntol = 1e-6
pf = PowerSystem.PowerNetwork(datafile)
polar = PolarForm(pf, device)
cache = ExaPF.get(polar, ExaPF.PhysicalState())
jx = AutoDiff.Jacobian(polar, ExaPF.power_balance, State())
mapx = ExaPF.my_map(polar, State())
nx = length(mapx)
stack = ExaPF.NetworkStack(polar)

basis = ExaPF.PolarBasis(polar)
pflow = ExaPF.PowerFlowBalance(polar)
jx = ExaPF.MyJacobian(polar, pflow ∘ basis, mapx)
J = jx.J
npartitions = ceil(Int64,(size(jx.J,1)/64))
if npartitions < 2
npartitions = 2
end
precond = ExaPF.LinearSolvers.BlockJacobiPreconditioner(J, npartitions, device)
# Retrieve initial state of network
u0 = ExaPF.initial(polar, Control())

algo = linsolver(J; P=precond)
powerflow_solver = NewtonRaphson(tol=ntol)
VT = typeof(stack.input)
pf_buffer = ExaPF.NLBuffer{VT}(nx)

# Init variables
buffer = get(polar, ExaPF.PhysicalState())
jx = AutoDiff.Jacobian(polar, ExaPF.power_balance, State())

# Warmstart
ExaPF.init_buffer!(polar, buffer)
ExaPF.powerflow(polar, jx, buffer, powerflow_solver; linear_solver=algo)
# Warm-up
ExaPF.nlsolve!(
powerflow_solver, jx, stack; linear_solver=algo, nl_buffer=pf_buffer,
)

TimerOutputs.reset_timer!(ExaPF.TIMER)
ExaPF.init_buffer!(polar, buffer)
convergence = ExaPF.powerflow(polar, jx, buffer, powerflow_solver; linear_solver=algo)
ExaPF.init!(polar, stack)
res = @timed ExaPF.nlsolve!(
powerflow_solver, jx, stack; linear_solver=algo, nl_buffer=pf_buffer,
)
convergence = res.value

# Make sure we are converged
@assert(convergence.has_converged)

# Output
prettytime = TimerOutputs.prettytime
timers = ExaPF.TIMER.inner_timers
inner_timer = timers["Newton"]
return convergence.has_converged, timers, inner_timer
return convergence.has_converged, res.time
end

function main()
linsolver = eval(Meta.parse("LinearSolvers.$(ARGS[1])"))
device = eval(Meta.parse("$(ARGS[2])()"))
datafile = joinpath(dirname(@__FILE__), ARGS[3])

has_converged, timers, inner_timer = run_benchmark(datafile, device, linsolver)

if ARGS[1] == "DirectSolver"
println("$(ARGS[1]), $(ARGS[2]), $(ARGS[3]),",
printtimer(timers, "Newton"),
", $(has_converged)")
else
println("$(ARGS[1]), $(ARGS[2]), $(ARGS[3]),",
printtimer(timers, "Newton"),",",
printtimer(inner_timer, "Jacobian"),",",
printtimer(inner_timer, "Linear Solver"),
", $(has_converged)")
end
has_converged, timer = run_benchmark(datafile, device, linsolver)
@test has_converged

println("$(ARGS[1]), $(ARGS[2]), $(ARGS[3]),",
timer,
", $(has_converged)")

end

main()
Expand Down
15 changes: 6 additions & 9 deletions src/ExaPF.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,16 @@ using LinearAlgebra
using SparseArrays

import CUDA
import CUDA.CUBLAS
import CUDA.CUSPARSE
import CUDA.CUSOLVER

import ForwardDiff
using KernelAbstractions
const KA = KernelAbstractions
using TimerOutputs: @timeit, TimerOutput

import Base: show, get

const VERBOSE_LEVEL_HIGH = 3
const VERBOSE_LEVEL_MEDIUM = 2
const VERBOSE_LEVEL_LOW = 1
const VERBOSE_LEVEL_NONE = 0
const TIMER = TimerOutput()
export run_pf

include("utils.jl")
include("architectures.jl")

# Templates
Expand All @@ -43,4 +35,9 @@ const LS = LinearSolvers
# Polar formulation
include("Polar/polar.jl")

# CUDA extension
if CUDA.has_cuda()
include("cuda_wrapper.jl")
end

end
31 changes: 11 additions & 20 deletions src/LinearSolvers/LinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,15 @@ import Base: show

using CUDA
using KernelAbstractions
using CUDAKernels
import CUDA.CUBLAS
import CUDA.CUSOLVER
import CUDA.CUSPARSE
import Krylov
import LightGraphs
import Metis

import ..ExaPF: xnorm, csclsvqr!
import ..ExaPF: xnorm

const KA = KernelAbstractions

Expand Down Expand Up @@ -102,9 +103,12 @@ exa_factorize(J::Adjoint{T, SparseMatrixCSC{T, Int}}) where T = lu(J.parent)'
DirectSolver(J; options...) = DirectSolver(exa_factorize(J))
DirectSolver() = DirectSolver(nothing)

function update!(s::DirectSolver, J::AbstractMatrix)
lu!(s.factorization, J) # Update factorization inplace
end

# Reuse factorization in update
function ldiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::AbstractVector, J::AbstractMatrix, x::AbstractVector)
lu!(s.factorization, J) # Update factorization inplace
LinearAlgebra.ldiv!(y, s.factorization, x) # Forward-backward solve
return 0
end
Expand All @@ -122,10 +126,6 @@ function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::AbstractArray,
LinearAlgebra.ldiv!(y, s.factorization', x) # Forward-backward solve
return 0
end
function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::Array, J::SparseMatrixCSC, x::Array)
LinearAlgebra.ldiv!(y, s.factorization', x) # Forward-backward solve
return 0
end

function ldiv!(::DirectSolver{Nothing}, y::Vector, J::AbstractMatrix, x::Vector)
F = lu(J)
Expand All @@ -143,18 +143,6 @@ function batch_ldiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, Y, Js::Vect
end
end

function ldiv!(::DirectSolver{Nothing},
y::CUDA.CuVector, J::CUSPARSE.CuSparseMatrixCSR, x::CUDA.CuVector,
)
CUSOLVER.csrlsvqr!(J, x, y, 1e-8, one(Cint), 'O')
return 0
end
function ldiv!(::DirectSolver{Nothing},
y::CUDA.CuVector, J::CUSPARSE.CuSparseMatrixCSC, x::CUDA.CuVector,
)
csclsvqr!(J, x, y, 1e-8, one(Cint), 'O')
return 0
end
get_transpose(::DirectSolver, M::CUSPARSE.CuSparseMatrixCSR) = CUSPARSE.CuSparseMatrixCSC(M)

function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::CUDA.CuVector, J::CUSPARSE.CuSparseMatrixCSR, x::CUDA.CuVector)
Expand All @@ -163,8 +151,11 @@ function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::CUDA.CuVector,
return 0
end

function update_preconditioner!(solver::AbstractIterativeLinearSolver, J, device)
update(solver.precond, J, device)
function update!(solver::AbstractIterativeLinearSolver, J::SparseMatrixCSC)
update(solver.precond, J, CPU())
end
function update!(solver::AbstractIterativeLinearSolver, J::CUSPARSE.CuSparseMatrixCSR)
update(solver.precond, J, CUDADevice())
end

"""
Expand Down
96 changes: 0 additions & 96 deletions src/Polar/Constraints/active_power.jl

This file was deleted.

Loading