Skip to content

Commit

Permalink
add regularization hyperparameter
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinsung committed May 23, 2024
1 parent a52a9bb commit 3ebb55c
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 7 deletions.
83 changes: 77 additions & 6 deletions python/ffsim/optimize/stochastic_reconfiguration.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def minimize_stochastic_reconfiguration(
cond: float = 1e-4,
epsilon: float = 1e-8,
gtol: float = 1e-5,
regularization: float = 0.0,
variation: float = 1.0,
optimize_regularization: bool = True,
optimize_variation: bool = True,
optimize_kwargs: dict | None = None,
callback: Callable[[OptimizeResult], Any] | None = None,
Expand All @@ -60,9 +62,16 @@ def minimize_stochastic_reconfiguration(
epsilon: Increment to use for approximating the gradient using
finite difference.
gtol: Convergence threshold for the norm of the projected gradient.
regularization: Hyperparameter controlling regularization of the
overlap matrix. Its value must be positive. A larger value results in
greater regularization.
variation: TODO Hyperparameter controlling the size of parameter variations
used in the linear expansion of the wavefunction. Its value must be
positive.
optimize_regularization: Whether to optimize the `regularization` hyperparameter
in each iteration. Optimizing hyperparameters incurs more function and
energy evaluations in each iteration, but may improve convergence.
The optimization is performed using `scipy.optimize.minimize`_.
optimize_variation: Whether to optimize the `variation` hyperparameter
in each iteration. Optimizing hyperparameters incurs more function and
energy evaluations in each iteration, but may improve convergence.
Expand Down Expand Up @@ -117,7 +126,6 @@ def minimize_stochastic_reconfiguration(
if optimize_kwargs is None:
optimize_kwargs = dict(method="L-BFGS-B")

variation_param = math.sqrt(variation)
params = x0.copy()
converged = False
intermediate_result = OptimizeResult(
Expand All @@ -140,27 +148,81 @@ def minimize_stochastic_reconfiguration(
intermediate_result.fun = energy
intermediate_result.jac = grad
intermediate_result.overlap_mat = overlap_mat
intermediate_result.regularization = regularization
intermediate_result.variation = variation
callback(intermediate_result)

if np.linalg.norm(grad) < gtol:
converged = True
break

if optimize_variation:
if optimize_regularization and optimize_variation:

def f(x: np.ndarray) -> float:
(regularization_param, variation_param) = x
regularization = regularization_param**2
variation = variation_param**2
param_update = _get_param_update(
grad,
overlap_mat,
regularization=regularization,
variation=variation,
cond=cond,
)
vec = params_to_vec(params + param_update)
return np.vdot(vec, hamiltonian @ vec).real

regularization_param = math.sqrt(regularization)
variation_param = math.sqrt(variation)
result = minimize(
f,
x0=[regularization_param, variation_param],
**optimize_kwargs,
)
(regularization_param, variation_param) = result.x
regularization = regularization_param**2
variation = variation_param**2

elif optimize_regularization:

def f(x: np.ndarray) -> float:
(regularization_param,) = x
regularization = regularization_param**2
param_update = _get_param_update(
grad,
overlap_mat,
regularization=regularization,
variation=variation,
cond=cond,
)
vec = params_to_vec(params + param_update)
return np.vdot(vec, hamiltonian @ vec).real

regularization_param = math.sqrt(regularization)
result = minimize(
f,
x0=[regularization_param],
**optimize_kwargs,
)
(regularization_param,) = result.x
regularization = regularization_param**2

elif optimize_variation:

def f(x: np.ndarray) -> float:
(variation_param,) = x
variation = variation_param**2
param_update = _get_param_update(
grad,
overlap_mat,
variation,
regularization=regularization,
variation=variation,
cond=cond,
)
vec = params_to_vec(params + param_update)
return np.vdot(vec, hamiltonian @ vec).real

variation_param = math.sqrt(variation)
result = minimize(
f,
x0=[variation_param],
Expand All @@ -172,7 +234,8 @@ def f(x: np.ndarray) -> float:
param_update = _get_param_update(
grad,
overlap_mat,
variation,
regularization=regularization,
variation=variation,
cond=cond,
)
params = params + param_update
Expand Down Expand Up @@ -217,7 +280,15 @@ def _sr_matrices(


def _get_param_update(
grad: np.ndarray, overlap_mat: np.ndarray, variation: float, cond: float
grad: np.ndarray,
overlap_mat: np.ndarray,
regularization: float,
variation: float,
cond: float,
) -> np.ndarray:
x, _, _, _ = scipy.linalg.lstsq(overlap_mat, -0.5 * variation * grad, cond=cond)
x, _, _, _ = scipy.linalg.lstsq(
overlap_mat + regularization * np.eye(overlap_mat.shape[0]),
-0.5 * variation * grad,
cond=cond,
)
return x
48 changes: 47 additions & 1 deletion tests/python/optimize/stochastic_reconfiguration_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ def callback(intermediate_result):
)
if hasattr(intermediate_result, "jac"):
info["jac"].append(intermediate_result.jac)
if hasattr(intermediate_result, "regularization"):
info["regularization"].append(intermediate_result.regularization)
if hasattr(intermediate_result, "variation"):
info["variation"].append(intermediate_result.variation)

Expand All @@ -85,6 +87,9 @@ def callback(intermediate_result):
params_to_vec,
x0=x0,
hamiltonian=hamiltonian,
regularization=1e-4,
variation=0.9,
optimize_regularization=False,
optimize_variation=False,
callback=callback,
)
Expand All @@ -94,7 +99,48 @@ def callback(intermediate_result):
np.testing.assert_allclose(energy(params), fun)
assert result.nit <= 30
assert result.nit < result.nlinop < result.nfev
assert set(info["variation"]) == {1.0}
assert set(info["regularization"]) == {1e-4}
assert set(info["variation"]) == {0.9}

# optimization without optimizing regularization
info = defaultdict(list)
result = ffsim.optimize.minimize_stochastic_reconfiguration(
params_to_vec,
x0=x0,
hamiltonian=hamiltonian,
regularization=1e-4,
variation=0.9,
optimize_regularization=False,
callback=callback,
)
np.testing.assert_allclose(energy(result.x), result.fun)
np.testing.assert_allclose(result.fun, -0.970773)
for params, fun in zip(info["x"], info["fun"]):
np.testing.assert_allclose(energy(params), fun)
assert result.nit <= 30
assert result.nit < result.nlinop < result.nfev
assert set(info["regularization"]) == {1e-4}
assert len(set(info["variation"])) > 1

# optimization without optimizing variation
info = defaultdict(list)
result = ffsim.optimize.minimize_stochastic_reconfiguration(
params_to_vec,
x0=x0,
hamiltonian=hamiltonian,
regularization=1e-4,
variation=0.9,
optimize_variation=False,
callback=callback,
)
np.testing.assert_allclose(energy(result.x), result.fun)
np.testing.assert_allclose(result.fun, -0.970773)
for params, fun in zip(info["x"], info["fun"]):
np.testing.assert_allclose(energy(params), fun)
assert result.nit <= 30
assert result.nit < result.nlinop < result.nfev
assert set(info["regularization"]) != {1e-4}
assert set(info["variation"]) == {0.9}

# optimization with maxiter
info = defaultdict(list)
Expand Down

0 comments on commit 3ebb55c

Please sign in to comment.