Skip to content

Commit

Permalink
Update pyrenn.py
Browse files Browse the repository at this point in the history
The proposed changes help in two ways:
1.  Prevent training from running forever when error improves slightly
2. Stop a training early before k_max is reached if error doesn't improve any more
  • Loading branch information
mc10011 authored Feb 18, 2019
1 parent c332c39 commit 32a6198
Showing 1 changed file with 21 additions and 9 deletions.
30 changes: 21 additions & 9 deletions python/pyrenn.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ def BPTT(net,data):


def train_LM(P,Y,net,k_max=100,E_stop=1e-10,dampfac=3.0,dampconst=10.0,\
verbose = False):
verbose = False,min_E_step=1e-09):
""" Implementation of the Levenberg-Marquardt-Algorithm (LM) based on:
Levenberg, K.: A Method for the Solution of Certain Problems in Least Squares.
Quarterly of Applied Mathematics, 2:164-168, 1944.
Expand All @@ -691,6 +691,7 @@ def train_LM(P,Y,net,k_max=100,E_stop=1e-10,dampfac=3.0,dampconst=10.0,\
E_stop: Termination Error, Training stops when the Error <= E_stop
dampconst: constant to adapt damping factor of LM
dampfac: damping factor of LM
min_E_step: minimum step for error. When reached 5 times, training terminates.
Returns:
net: trained Neural Network
"""
Expand All @@ -705,15 +706,15 @@ def train_LM(P,Y,net,k_max=100,E_stop=1e-10,dampfac=3.0,dampconst=10.0,\
if verbose:
print('Iteration: ',k,' Error: ',E,' scale factor: ',dampfac)

early=0

while True:
#run loop until either k_max or E_stop is reached

JJ = np.dot(J.transpose(),J) #J.transp * J
w = net['w'] #weight vector

while True:
#repeat until optimizing step is successful

#gradient
g = np.dot(J.transpose(),e)

Expand All @@ -729,15 +730,23 @@ def train_LM(P,Y,net,k_max=100,E_stop=1e-10,dampfac=3.0,dampconst=10.0,\

net['w'] = w + w_delta #new weight vector

Enew = calc_error(net,data) #calculate new Error E

if Enew<E:
Enew = calc_error(net,data) #calculate new Error E
if Enew<E and abs(E-Enew)>=min_E_step:
#Optimization Step successful!
dampfac= dampfac/dampconst #adapt scale factor
#if E-Enew<=1e-09:
dampfac= dampfac/dampconst#adapt scale factor
break #go to next iteration
else:
#Optimization Step NOT successful!
#Optimization Step NOT successful!\
dampfac = dampfac*dampconst#adapt scale factor
if abs(E-Enew)<=min_E_step:
if verbose:
print('E-Enew<=min_E_step Encountered!!')
early=early+1
if early>=5.0:
if verbose:
print('5 Times * E-Enew<=min_E_step Encountered!!')
break

#Calculate Jacobian, Error and error vector for next iteration
J,E,e = RTRL(net,data)
Expand All @@ -753,7 +762,10 @@ def train_LM(P,Y,net,k_max=100,E_stop=1e-10,dampfac=3.0,dampconst=10.0,\
elif E<=E_stop:
print('Termination Error reached')
break

elif early>=5.0:
print('Error decreased 5 times by minimum step. Force training exit.')
break

net['ErrorHistory'] = ErrorHistory[:k]
return net

Expand Down

0 comments on commit 32a6198

Please sign in to comment.