Skip to content

Commit

Permalink
Merge pull request #38 from marcpinet/refactor-more-robust-model
Browse files Browse the repository at this point in the history
Refactor more robust model
  • Loading branch information
marcpinet authored Apr 24, 2024
2 parents 1242803 + 87b1513 commit 89a3dcc
Show file tree
Hide file tree
Showing 8 changed files with 448 additions and 186 deletions.
86 changes: 86 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,92 @@ You are free to tweak the hyperparameters and the network architecture to see ho

I used the [MNIST dataset](https://en.wikipedia.org/wiki/MNIST_database) to test the library, but you can use any dataset you want.

## 🚀 Quick examples (more [here](examples/))

### Binary Classification

```python
from neuralnetlib.model import Model
from neuralnetlib.layers import Input, Dense
from neuralnetlib.activations import Sigmoid
from neuralnetlib.losses import BinaryCrossentropy
from neuralnetlib.optimizers import SGD
from neuralnetlib.metrics import accuracy_score

# ... Preprocess x_train, y_train, x_test, y_test if necessary (you can use neuralnetlib.preprocess and neuralnetlib.utils)

# Create a model
model = Model()
model.add(Input(10)) # 10 features
model.add(Dense(8))
model.add(Dense(1))
model.add(Activation(Sigmoid())) # many way to tell the model which Activation Function you'd like, see the next example

# Compile the model
model.compile(loss_function='bce', optimizer='sgd')

# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=32, metrics=[accuracy_score])
```

### Multiclass Classification

```python
from neuralnetlib.activations import Softmax
from neuralnetlib.losses import CategoricalCrossentropy
from neuralnetlib.optimizers import Adam
from neuralnetlib.metrics import accuracy_score

# ... Preprocess x_train, y_train, x_test, y_test if necessary (you can use neuralnetlib.preprocess and neuralnetlib.utils)

# Create and compile a model
model = Model()
model.add(Input(28, 28, 1)) # For example, MNIST images
model.add(Conv2D(32, kernel_size=3, padding='same'))
model.add(Activation('relu')) # activation supports both str...
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation=Softmax())) # ... and ActivationFunction objects
model.compile(loss_function='categorical_crossentropy', optimizer=Adam())


model.compile(loss_function='categorical_crossentropy', optimizer=Adam()) # same for loss_function and optimizer

# Train the model
model.fit(X_train, y_train_ohe, epochs=5, metrics=[accuracy_score])
```

### Regression

```python
from neuralnetlib.losses import MeanSquaredError
from neuralnetlib.metrics import accuracy_score

# ... Preprocess x_train, y_train, x_test, y_test if necessary (you can use neuralnetlib.preprocess and neuralnetlib.utils)

# Create and compile a model
model = Model()
model.add(Input(13))
model.add(Dense(64, activation='leakyrelu'))
model.add(Dense(1), activation="linear")

model.compile(loss_function="mse", optimizer='adam') # you can either put acronyms or full name

# Train the model
model.fit(X_train, y_train, epochs=100, batch_size=128, metrics=[accuracy_score])
```

You can also save and load models:

```python
# Save a model
model.save('my_model.json')

# Load a model
model = Model.load('my_model.json')
```

## 📜 Output of the example file

Here is an example of a model training on the mnist using the library
Expand Down

Large diffs are not rendered by default.

61 changes: 29 additions & 32 deletions neuralnetlib/callbacks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import numpy as np


class EarlyStopping:
def __init__(self, patience: int = 5, min_delta: float = 0.001, restore_best_weights: bool = True,
start_from_epoch: int = 0, monitor: list = None, mode: str = 'auto', baseline: float = None):
Expand All @@ -12,64 +11,62 @@ def __init__(self, patience: int = 5, min_delta: float = 0.001, restore_best_wei
self.mode = mode
self.baseline = baseline
self.best_weights = None
self.best_metrics = None
self.best_metric = None
self.patience_counter = 0
self.epoch = 0
self.stop_training = False

def on_epoch_end(self, model, metrics):
def on_epoch_end(self, model, loss, metrics=None):
self.epoch += 1
if self.epoch < self.start_from_epoch:
return False

if self.best_metrics is None:
if self.best_metric is None:
if self.monitor is None:
self.best_metrics = metrics
if np.any(np.isnan(metrics)):
self.mode = 'min'
else:
self.mode = 'auto'
self.best_metric = loss
self.mode = 'min'
else:
metric_values = [metric(model.predictions, model.y_true) for metric in self.monitor]
self.best_metrics = [np.inf if m > 0 else -np.inf for m in metric_values]
self.mode = 'max'
if metrics is None:
raise ValueError("Metric to monitor is provided, but no metrics are available.")
metric_value = metrics[self.monitor[0].__name__]
self.best_metric = metric_value
if self.mode == 'auto':
if np.isnan(metric_value):
self.mode = 'min'
else:
self.mode = 'max'

improved = False
if self.monitor is None:
current_metric = metrics[-1]
best_metric = self.best_metrics[-1]
if (self.mode == 'min' and current_metric < best_metric - self.min_delta) or \
(self.mode == 'max' and current_metric > best_metric + self.min_delta) or \
(self.mode == 'auto' and current_metric < best_metric - self.min_delta):
self.best_metrics[-1] = current_metric
current_metric = loss
if (self.mode == 'min' and current_metric < self.best_metric - self.min_delta) or \
(self.mode == 'max' and current_metric > self.best_metric + self.min_delta):
self.best_metric = current_metric
improved = True
else:
for i, metric in enumerate(metrics):
best_metric = self.best_metrics[i]
if (self.mode == 'max' and metric > best_metric + self.min_delta) or \
(self.mode == 'min' and metric < best_metric - self.min_delta):
self.best_metrics[i] = metric
improved = True
current_metric = metrics[self.monitor[0].__name__]
if (self.mode == 'max' and current_metric > self.best_metric + self.min_delta) or \
(self.mode == 'min' and current_metric < self.best_metric - self.min_delta):
self.best_metric = current_metric
improved = True

if improved:
self.patience_counter = 0
if self.restore_best_weights:
self.best_weights = [layer.weights for layer in model.layers if hasattr(layer, 'weights')]
else:
self.patience_counter += 1

if self.baseline is not None:
if self.mode == 'max' and max(self.best_metrics) < self.baseline:
self.patience_counter = self.patience + 1
elif self.mode == 'min' and min(self.best_metrics) > self.baseline:
self.patience_counter = self.patience + 1
if self.baseline is not None:
if self.mode == 'max' and self.best_metric < self.baseline:
self.patience_counter = self.patience + 1
elif self.mode == 'min' and self.best_metric > self.baseline:
self.patience_counter = self.patience + 1

if self.patience_counter >= self.patience:
self.stop_training = True
print(f"\nEarly stopping after {self.epoch} epochs.", end='')
if self.restore_best_weights and self.best_weights is not None:
for layer, best_weights in zip([layer for layer in model.layers if hasattr(layer, 'weights')],
self.best_weights):
for layer, best_weights in zip([layer for layer in model.layers if hasattr(layer, 'weights')], self.best_weights):
layer.weights = best_weights
return True

Expand Down
Loading

0 comments on commit 89a3dcc

Please sign in to comment.