Skip to content

Commit

Permalink
matlab and python:
Browse files Browse the repository at this point in the history
modify initialization of a in prepare_data for given P0 and Y0. There was an error using P0 and Y0 when the NN had internal delays

python:
using int() to avoid numpy warnings while indexing
  • Loading branch information
yabata committed Nov 2, 2016
1 parent ad2d226 commit 59179f5
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion matlab/prepare_data.m
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
a=cell(q0,net.M); %initialise layer outputs
for i=1:q0
for j=1:net.M-1
a{i,j}=zeros(net.nn(end),1); %layer ouputs of hidden layers are unknown -> set to zero
a{i,j}=zeros(net.nn(j+1),1); %layer ouputs of hidden layers are unknown -> set to zero
end
a{i,net.M}=Y0(:,i)./net.normY; %set layer ouputs of output layer
end
Expand Down
8 changes: 4 additions & 4 deletions python/pyrenn.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,15 +220,15 @@ def w2Wb(net):
w_i = inputs*layers[m-1]
vec =w_temp[0:w_i]
w_temp = w_temp[w_i:]
IW[m,i,d] = np.reshape(vec,(layers[m-1],len(vec)/layers[m-1]),order='F')
IW[m,i,d] = np.reshape(vec,(layers[m-1],int(len(vec)/layers[m-1])),order='F')

#internal connection weights
for l in L_f[m]:
for d in dL[m,l]:
w_i = layers[l-1]*layers[m-1]
vec =w_temp[0:w_i]
w_temp = w_temp[w_i:]
LW[m,l,d] = np.reshape(vec,(layers[m-1],len(vec)/layers[m-1]),order='F')
LW[m,l,d] = np.reshape(vec,(layers[m-1],int(len(vec)/layers[m-1])),order='F')

#bias weights
w_i = layers[m-1]
Expand Down Expand Up @@ -310,7 +310,7 @@ def NNOut(P,net,P0=None,Y0=None):
Returns:
Y_NN: Neural Network output for input P
"""
Y=np.zeros((net['layers'][-1],np.size(P)/net['nn'][0]))
Y=np.zeros((net['layers'][-1],int(np.size(P)/net['nn'][0])))
data,net = prepare_data(P,Y,net,P0=P0,Y0=Y0)
IW,LW,b = w2Wb(net) #input-weight matrices,connection weight matrices, bias vectors
Y_NN = NNOut_(data['P'],net,IW,LW,b,a=data['a'],q0=data['q0'])[0]
Expand Down Expand Up @@ -822,7 +822,7 @@ def prepare_data(P,Y,net,P0=None,Y0=None):
a = {} #initialise layer outputs
for i in range(1,q0+1):
for j in range(1,net['M']):
a[i,j]=np.zeros(net['nn'][-1]) #layer ouputs of hidden layers are unknown -> set to zero
a[i,j]=np.zeros(net['nn'][j]) #layer ouputs of hidden layers are unknown -> set to zero
a[i,net['M']]=Y0[:,i-1]/net['normY'] #set layer ouputs of output layer

#add previous inputs and outputs to input/output matrices
Expand Down

0 comments on commit 59179f5

Please sign in to comment.