Skip to content

Commit

Permalink
Merge pull request #22 from NREL/gb/better_history
Browse files Browse the repository at this point in the history
Gb/better history
  • Loading branch information
grantbuster authored Feb 8, 2021
2 parents a2f9593 + 461f1c7 commit 44c6133
Show file tree
Hide file tree
Showing 6 changed files with 286 additions and 81 deletions.
185 changes: 118 additions & 67 deletions examples/phygnn_gans.ipynb

Large diffs are not rendered by default.

17 changes: 13 additions & 4 deletions phygnn/model_interfaces/phygnn_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,8 @@ def save_model(self, path):
path : str
Save phygnn model
"""

path = os.path.abspath(path)
if path.endswith(('.json', '.pkl')):
dir_path = os.path.dirname(path)
if path.endswith('.pkl'):
Expand Down Expand Up @@ -235,7 +237,7 @@ def build(cls, p_fun, feature_names, label_names,
loss_weights=(0.5, 0.5), hidden_layers=None, metric='mae',
initializer=None, optimizer=None, learning_rate=0.01,
history=None, kernel_reg_rate=0.0, kernel_reg_power=1,
bias_reg_rate=0.0, bias_reg_power=1):
bias_reg_rate=0.0, bias_reg_power=1, name=None):
"""
Build phygnn model from given features, layers and kwargs
Expand Down Expand Up @@ -310,6 +312,8 @@ def build(cls, p_fun, feature_names, label_names,
Bias regularization power. bias_reg_power=1 is L1
regularization (lasso regression), and bias_reg_power=2 is L2
regularization (ridge regression).
name : None | str
Optional model name for debugging.
Returns
-------
Expand Down Expand Up @@ -341,7 +345,8 @@ def build(cls, p_fun, feature_names, label_names,
bias_reg_rate=bias_reg_rate,
bias_reg_power=bias_reg_power,
feature_names=feature_names,
output_names=label_names)
output_names=label_names,
name=name)

model = cls(model, feature_names=feature_names,
label_names=label_names, normalize=normalize,
Expand All @@ -358,7 +363,8 @@ def build_trained(cls, p_fun, features, labels, p, normalize=(True, False),
bias_reg_rate=0.0, bias_reg_power=1, n_batch=16,
n_epoch=10, shuffle=True, validation_split=0.2,
run_preflight=True, return_diagnostics=False,
p_kwargs=None, parse_kwargs=None, save_path=None):
p_kwargs=None, parse_kwargs=None, save_path=None,
name=None):
"""
Build phygnn model from given features, layers and
kwargs and then train with given labels and kwargs
Expand Down Expand Up @@ -471,6 +477,8 @@ def build_trained(cls, p_fun, features, labels, p, normalize=(True, False),
Directory path to save model to. The tensorflow model will be
saved to the directory while the framework parameters will be
saved in json, by default None
name : None | str
Optional model name for debugging.
Returns
-------
Expand All @@ -495,7 +503,8 @@ def build_trained(cls, p_fun, features, labels, p, normalize=(True, False),
kernel_reg_rate=kernel_reg_rate,
kernel_reg_power=kernel_reg_power,
bias_reg_rate=bias_reg_rate,
bias_reg_power=bias_reg_power)
bias_reg_power=bias_reg_power,
name=name)

diagnostics = model.train_model(features, labels, p,
n_batch=n_batch,
Expand Down
32 changes: 23 additions & 9 deletions phygnn/phygnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,7 @@ def model_params(self):
'bias_reg_power': self.bias_reg_power,
'feature_names': self.feature_names,
'output_names': self.output_names,
'name': self.name,
}

return model_params
Expand Down Expand Up @@ -605,7 +606,7 @@ def set_loss_weights(self, loss_weights):
assert len(loss_weights) == 2, 'loss_weights can only have two values!'
self._loss_weights = loss_weights

def loss(self, y_true, y_predicted, p, p_kwargs):
def calc_loss(self, y_true, y_predicted, p, p_kwargs):
"""Calculate the loss function by comparing y_true to model-predicted y
Parameters
Expand Down Expand Up @@ -681,17 +682,18 @@ def _get_grad(self, x, y_true, p, p_kwargs):
tape.watch(layer.variables)

y_predicted = self.predict(x, to_numpy=False, training=True)
loss = self.loss(y_true, y_predicted, p, p_kwargs)[0]
loss, nn_loss, p_loss = self.calc_loss(y_true, y_predicted,
p, p_kwargs)
grad = tape.gradient(loss, self.weights)

return grad, loss
return grad, loss, nn_loss, p_loss

def _run_gradient_descent(self, x, y_true, p, p_kwargs):
"""Run gradient descent for one mini-batch of (x, y_true)
and adjust NN weights."""
grad, loss = self._get_grad(x, y_true, p, p_kwargs)
grad, loss, nn_loss, p_loss = self._get_grad(x, y_true, p, p_kwargs)
self._optimizer.apply_gradients(zip(grad, self.weights))
return grad, loss
return loss, nn_loss, p_loss

def fit(self, x, y, p, n_batch=16, n_epoch=10, shuffle=True,
validation_split=0.2, p_kwargs=None, run_preflight=True,
Expand Down Expand Up @@ -746,7 +748,14 @@ def fit(self, x, y, p, n_batch=16, n_epoch=10, shuffle=True,

if self._history is None:
self._history = pd.DataFrame(
columns=['elapsed_time', 'training_loss', 'validation_loss'])
columns=['elapsed_time',
'training_nn_loss',
'validation_nn_loss',
'training_p_loss',
'validation_p_loss',
'training_loss',
'validation_loss',
])
self._history.index.name = 'epoch'
else:
epochs += self._history.index.values[-1] + 1
Expand All @@ -765,18 +774,23 @@ def fit(self, x, y, p, n_batch=16, n_epoch=10, shuffle=True,

batch_iter = zip(x_batches, y_batches, p_batches)
for x_batch, y_batch, p_batch in batch_iter:
tr_loss = self._run_gradient_descent(
x_batch, y_batch, p_batch, p_kwargs)[1]
tr_loss, tr_nn_loss, tr_p_loss = self._run_gradient_descent(
x_batch, y_batch, p_batch, p_kwargs)

y_val_pred = self.predict(x_val, to_numpy=False)
val_loss = self.loss(y_val, y_val_pred, p_val, p_kwargs)[0]
val_loss, val_nn_loss, val_p_loss = self.calc_loss(
y_val, y_val_pred, p_val, p_kwargs)
logger.info('Epoch {} train loss: {:.2e} '
'val loss: {:.2e} for "{}"'
.format(epoch, tr_loss, val_loss, self.name))

self._history.at[epoch, 'elapsed_time'] = time.time() - t0
self._history.at[epoch, 'training_loss'] = tr_loss.numpy()
self._history.at[epoch, 'training_nn_loss'] = tr_nn_loss.numpy()
self._history.at[epoch, 'training_p_loss'] = tr_p_loss.numpy()
self._history.at[epoch, 'validation_loss'] = val_loss.numpy()
self._history.at[epoch, 'validation_nn_loss'] = val_nn_loss.numpy()
self._history.at[epoch, 'validation_p_loss'] = val_p_loss.numpy()

diagnostics = {'x': x, 'y': y, 'p': p,
'x_val': x_val, 'y_val': y_val, 'p_val': p_val,
Expand Down
2 changes: 1 addition & 1 deletion phygnn/version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
"""Physics Guided Neural Network version."""

__version__ = '0.0.9'
__version__ = '0.0.10'
44 changes: 44 additions & 0 deletions tests/test_classifier.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""
Tests for basic phygnn functionality and execution.
"""
import numpy as np
import pandas as pd
from phygnn import PhysicsGuidedNeuralNetwork, p_fun_dummy


x1 = np.arange(500) - 250
x2 = np.arange(500) - 250
x1, x2 = np.meshgrid(x1, x2)
x1 = x1.flatten()
x2 = x2.flatten()
x3 = x1 * x2
features = pd.DataFrame({'x1': x1, 'x2': x2})

y = ((x1 * x2) > 0).astype(bool).astype(float)
labels = pd.DataFrame({'y': y})

hidden_layers = [{'units': 16},
{'activation': 'relu'},
{'units': 16},
{'activation': 'relu'},
]
output_layer = [{'units': 1},
{'activation': 'sigmoid'},
]


def test_classification():
"""Test the phygnn model as a classifier without the pfun"""
PhysicsGuidedNeuralNetwork.seed(0)
model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_dummy,
hidden_layers=hidden_layers,
output_layer=output_layer,
loss_weights=(1.0, 0.0),
metric='binary_crossentropy',
learning_rate=0.05,
n_features=2, n_labels=1)
model.fit(features, labels, features, n_batch=1, n_epoch=50)

y_pred = model.predict(features)
accuracy = 100 * (np.round(y_pred) == labels.values).sum() / len(labels)
assert accuracy > 0.99
87 changes: 87 additions & 0 deletions tests/test_loss_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
"""
Tests for basic phygnn functionality and execution.
"""
# pylint: disable=W0613
import pytest
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import InputLayer, Dense, Activation
from phygnn import PhysicsGuidedNeuralNetwork


N = 100
A = np.linspace(-1, 1, N)
B = np.linspace(-1, 1, N)
A, B = np.meshgrid(A, B)
A = np.expand_dims(A.flatten(), axis=1)
B = np.expand_dims(B.flatten(), axis=1)

Y = np.sqrt(A ** 2 + B ** 2)
X = np.hstack((A, B))
P = X.copy()
Y_NOISE = Y * (1 + (np.random.random(Y.shape) - 0.5) * 0.5) + 0.1


HIDDEN_LAYERS = [{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'},
]


def p_fun_pythag(model, y_true, y_predicted, p):
"""Example function for loss calculation using physical relationships.
Parameters
----------
model : PhysicsGuidedNeuralNetwork
Instance of the phygnn model at the current point in training.
y_true : np.ndarray
Known y values that were given to the PhyGNN fit method.
y_predicted : tf.Tensor
Predicted y values in a 2D tensor based on x values in this batch.
p : np.ndarray
Supplemental physical feature data that can be used to calculate a
y_physical value to compare against y_predicted. The rows in this
array have been carried through the batching process alongside y_true
and the features used to create y_predicted and so can be used 1-to-1
with the rows in y_predicted and y_true.
Returns
-------
p_loss : tf.Tensor
A 0D tensor physical loss value.
"""

p = tf.convert_to_tensor(p, dtype=tf.float32)
y_physical = tf.sqrt(p[:, 0]**2 + p[:, 1]**2)
y_physical = tf.expand_dims(y_physical, 1)

p_loss = tf.math.reduce_mean(tf.math.abs(y_predicted - y_physical))

return p_loss


@pytest.mark.parametrize('metric_name', ('mae', 'mse', 'mbe', 'relative_mae',
'relative_mse', 'relative_mbe'))
def test_loss_metric(metric_name):
"""Test the operation of the PGNN with weighting pfun."""
PhysicsGuidedNeuralNetwork.seed(0)
model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
metric=metric_name,
hidden_layers=HIDDEN_LAYERS,
loss_weights=(0.0, 1.0),
n_features=2, n_labels=1)
model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)

test_mae = np.mean(np.abs(model.predict(X) - Y))

assert len(model.layers) == 6
assert len(model.weights) == 6
assert len(model.history) == 20
assert isinstance(model.layers[0], InputLayer)
assert isinstance(model.layers[1], Dense)
assert isinstance(model.layers[2], Activation)
assert isinstance(model.layers[3], Dense)
assert isinstance(model.layers[4], Activation)
assert isinstance(model.layers[5], Dense)
assert model.history.validation_loss.values[-1] < 0.015
assert test_mae < 0.015

0 comments on commit 44c6133

Please sign in to comment.