Wednesday 5 December 2018

testDNN analysis 13

% CalcRmse: calculate the rmse between predictions and OUTs
%
% [rmse AveErrNum] = CalcRmse( dbn, IN, OUT )
%
%
%Output parameters:
% rmse: the rmse between predictions and OUTs
% AveErrNum: average error number after binarization
%
%
%Input parameters:
% dbn: network
% IN: input data, where # of row is # of data and # of col is # of input features
% OUT: output data, where # of row is # of data and # of col is # of output labels
%
%
%Version: 20130727

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [rmse AveErrNum] = CalcRmse( dbn, IN, OUT )
 out = v2h( dbn, IN );

 err = power( OUT - out, 2 );
 rmse = sqrt( sum(err(:)) / numel(err) );

 bout = out > 0.5;
 BOUT = OUT > 0.5;

 err = abs( BOUT - bout );
 AveErrNum = mean( sum(err,2) );
end

testDNN analysis 12

% GetDroppedDBN: get dropped dbn
%
% [DropedDBN OnInd] = GetDroppedDBN(dbn, DropOutRate, strbm)
%
%
%Output parameters:
% DropedDBN: the generated dropped Deep Belief Nets (DBN) model
% OnInd: indexes which are used (not dropped) nodes
%
%
%Input parameters:
% dbn: the Original Deep Belief Nets (DBN) model
% DropOutRate: 0 < DropOutRate < 1
% strbm (optional): started rbm layer to dropout (Default: 1)
%
%
%Reference:
%for details of the dropout
% Hinton et al, Improving neural networks by preventing co-adaptation of feature detectors, 2012.
%
%
%Version: 20130920

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [DropedDBN OnInd] = GetDroppedDBN(dbn, DropOutRate, strbm)

if( ~exist('strbm', 'var') || isempty(strbm) )
strbm = 1;
end

nrbm = numel(dbn.rbm); % 3

OnInd = GetOnInd(dbn, DropOutRate, strbm); % {16 x 32, 8 x 16, 4 x 8}

DropedDBN.type = dbn.type; % BBDBN
DropedDBN.rbm = cell(nrbm,1); % 3 x 1

for n=1:nrbm-1 % 1:2
    DropedDBN.rbm{n}.type = dbn.rbm{n}.type; % BBRBM Bernoulli-Bernoulli Restricted Bolztmann Machine
    DropedDBN.rbm{n}.W = dbn.rbm{n}.W(OnInd{n},OnInd{n+1}); % 512 x 128(= 16 x 32, 8 x 16), 128 x 32
    DropedDBN.rbm{n}.b = dbn.rbm{n}.b(1,OnInd{n+1}); % 1 x 128 = 8 x 16, 1 x 32 = 4 x 8
    DropedDBN.rbm{n}.c = dbn.rbm{n}.c(1,OnInd{n}); % 1 x 512 = 16 x 32
    if( isequal(dbn.rbm{n}.type(1:2), 'GB') ) % BB
    DropedDBN.rbm{n}.sig = dbn.rbm{n}.sig(1,OnInd{n});
    end
end

n = nrbm; % 3
DropedDBN.rbm{n}.type = dbn.rbm{n}.type; % BBRBM Bernoulli-Bernoulli Restricted Bolztmann Machine
DropedDBN.rbm{n}.W = dbn.rbm{n}.W(OnInd{n},:); % 32 x 4 OnInd(4 x 8)  dbn.rbm{n}.W(8 x 4)
DropedDBN.rbm{n}.b = dbn.rbm{n}.b; % 1 x 4
DropedDBN.rbm{n}.c = dbn.rbm{n}.c(1,OnInd{n}); % 1 x 32  dbn.rbm{n}.c(1 x 8)
if( isequal(dbn.rbm{n}.type(1:2), 'GB') )
DropedDBN.rbm{n}.sig = dbn.rbm{n}.sig(1,OnInd{n});
end

%{
debug> OnInd{n}(1,:)
ans =

 Columns 1 through 19:

   2   3   3   5   1   1   1   1   8   1   1   1   1   1   3   1   3   4   1

 Columns 20 through 32:

   1   4   2   4   2   1   4   2   2   2   5   2   3

debug> OnInd{n}(1:5,1:5)
ans =

    2    3    3    5    1
   12    6    5    6    3
   13    7    8   10    4
   13    9   13   11    4
   15    9   13   11    5


debug> dbn.rbm{n}.W(OnInd{n}(1:5,1:5),OnInd{n+1}(1:3,1:3))
ans =

 Columns 1 through 6:

  -0.0518891   0.1645196  -0.3648530  -0.0518891   0.1645196   0.1203377
  -0.0509273  -0.1455587  -0.0395848  -0.0509273  -0.1455587  -0.0887734
  -0.1935831   0.0420215  -0.1951475  -0.1935831   0.0420215   0.1720987
  -0.1935831   0.0420215  -0.1951475  -0.1935831   0.0420215   0.1720987
   0.0756364   0.1362832  -0.3240471   0.0756364   0.1362832  -0.0595394
   0.0107394  -0.1234916   0.1234910   0.0107394  -0.1234916  -0.1775845
  -0.0695427  -0.0723231   0.0499915  -0.0695427  -0.0723231   0.0253774
  -0.0848943   0.0936208   0.0172495  -0.0848943   0.0936208  -0.1848024
  -0.0162922   0.1760467  -0.1882073  -0.0162922   0.1760467   0.0518954
  -0.0162922   0.1760467  -0.1882073  -0.0162922   0.1760467   0.0518954
   0.0107394  -0.1234916   0.1234910   0.0107394  -0.1234916  -0.1775845
  -0.1144845  -0.3229443   0.0193068  -0.1144845  -0.3229443  -0.0304485
  -0.0641209   0.0905255   0.3841493  -0.0641209   0.0905255   0.0108430
  -0.1935831   0.0420215  -0.1951475  -0.1935831   0.0420215   0.1720987
  -0.1935831   0.0420215  -0.1951475  -0.1935831   0.0420215   0.1720987
  -0.1144845  -0.3229443   0.0193068  -0.1144845  -0.3229443  -0.0304485
  -0.0695427  -0.0723231   0.0499915  -0.0695427  -0.0723231   0.0253774
   0.0304704   0.0205339   0.0773431   0.0304704   0.0205339  -0.0291555
   0.0077029   0.0241572  -0.0580945   0.0077029   0.0241572  -0.2857645
   0.0077029   0.0241572  -0.0580945   0.0077029   0.0241572  -0.2857645
  -0.0436215   0.1196918  -0.2066504  -0.0436215   0.1196918   0.1916691
   0.0107394  -0.1234916   0.1234910   0.0107394  -0.1234916  -0.1775845
  -0.0384129  -0.2294340  -0.1843899  -0.0384129  -0.2294340  -0.0182508
  -0.0384129  -0.2294340  -0.1843899  -0.0384129  -0.2294340  -0.0182508
  -0.1144845  -0.3229443   0.0193068  -0.1144845  -0.3229443  -0.0304485

 Columns 7 through 9:

   0.0265641   0.0265641  -0.0518891
   0.0238093   0.0238093  -0.0509273
   0.0737077   0.0737077  -0.1935831
   0.0737077   0.0737077  -0.1935831
  -0.1768547  -0.1768547   0.0756364
  -0.0719310  -0.0719310   0.0107394
   0.0053586   0.0053586  -0.0695427
  -0.0316935  -0.0316935  -0.0848943
  -0.1472840  -0.1472840  -0.0162922
  -0.1472840  -0.1472840  -0.0162922
  -0.0719310  -0.0719310   0.0107394
  -0.0654309  -0.0654309  -0.1144845
  -0.0759296  -0.0759296  -0.0641209
   0.0737077   0.0737077  -0.1935831
   0.0737077   0.0737077  -0.1935831
  -0.0654309  -0.0654309  -0.1144845
   0.0053586   0.0053586  -0.0695427
  -0.0218777  -0.0218777   0.0304704
  -0.1045122  -0.1045122   0.0077029
  -0.1045122  -0.1045122   0.0077029
  -0.0757484  -0.0757484  -0.0436215
  -0.0719310  -0.0719310   0.0107394
   0.1710117   0.1710117  -0.0384129
   0.1710117   0.1710117  -0.0384129
  -0.0654309  -0.0654309  -0.1144845



debug> ee
ee =

   1   1   1
   2   2   2

debug> ff
ff =

   3   3   3
   4   4   4


gg =

    1    2    3    4    5    6    7    8    9    0
   11   12   13   14   15   16   17   18   19   20
   21   22   23   24   25   26   27   28   29   30
   31   32   33   34   35   36   37   38   39   40
   41   42   43   44   45   46   47   48   49   50

debug> gg(ee,ff)
ans =

    3    4    3    4    3    4
   13   14   13   14   13   14
    3    4    3    4    3    4
   13   14   13   14   13   14
    3    4    3    4    3    4
   13   14   13   14   13   14

debug> hh=[5,7;6,8]
hh =

   5   7
   6   8

debug> gg(ee,hh)
ans =

    5    6    7    8
   15   16   17   18
    5    6    7    8
   15   16   17   18
    5    6    7    8
   15   16   17   18

debug> exit


C:\Users\ars>
%}

testDNN analysis 11

% GetOnInd: get indexes which are used (not dropped) nodes
%
% OnInd = GetOnInd( dbn, DropOutRate, strbm )
%
%
%Output parameters:
% OnInd: indexes which are used (not dropped) nodes
%
%
%Input parameters:
% dbn: the Original Deep Belief Nets (DBN) model
% DropOutRate: 0 < DropOutRate < 1
% strbm (optional): started rbm layer to dropout (Default: 1)
%
%
%Reference:
%for details of the dropout
% Hinton et al, Improving neural networks by preventing co-adaptation of feature detectors, 2012.
%
%
%Version: 20130821

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function OnInd = GetOnInd( dbn, DropOutRate, strbm ) % ... , 3 x 1, 1

if( ~exist('strbm', 'var') || isempty(strbm) )
strbm = 1;
end

OnInd = cell(numel(dbn.rbm),1); % 3, 1

for n=1:numel(dbn.rbm) % 1:3
    dimV = size(dbn.rbm{n}.W,1); % 32->16->8
    if( n >= strbm )
        OnNum = round(dimV*DropOutRate(n)); % 16->8->4 DropOutRate 0.7 would cause OnNum=22 -->22 x 32 as OnInd{1}
        OnInd{n} = sort(randperm(dimV, OnNum)); % OnNum integers btw 1 and dimV -->randperm(32 x 16, 16 x 8, 8 x 4) = 16 x 32, 8 x 16, 4 x 8
    else
        OnInd{n} = 1:dimV; %1 2 3 4 5 6 7 8
    end
end

%{
randperm (n)
randperm (n, m)
Return a row vector containing a random permutation of 1:n.
If m is supplied, return m permutations, one in each row of an MxN matrix

octave:1> randperm(8,4)
ans =

   1   6   2   8   4   5   3   7
   3   1   6   2   5   4   7   8
   4   6   8   7   5   1   2   3
   2   8   5   7   1   6   4   3

octave:2> sort(randperm(8,4))
ans =

   1   3   2   4   1   6   1   2
   2   3   5   5   2   6   1   4
   5   3   8   6   3   7   4   4
   8   6   8   7   7   7   8   5

%}
%{
debug> aa=[1,2,3,4;5,6,7,8;9,10,11,12]
debug> aa
aa =

    1    2    3    4
    5    6    7    8
    9   10   11   12

debug> bb
bb =

   1   1
   2   2
   3   3
   4   4

debug> cc
cc =

   1
   1

debug> aa(cc,bb)
ans =

   1   2   3   4   1   2   3   4
   1   2   3   4   1   2   3   4

debug> aa
aa = 3 x 4

    1    2    3    4
    5    6    7    8
    9   10   11   12

debug> aa(bb)
ans =

   1   1
   5   5
   9   9
   2   2
debug> bb
bb =  2 x 4

   1   1
   2   2
   3   3
   4   4

debug> dd
dd = 1 x 2

   1
   2

debug> aa(dd,bb) 2 x 8
ans =

   1   2   3   4   1   2   3   4
   5   6   7   8   5   6   7   8

%}

testDNN analysis 10

% v2hall: to transform from visible (input) variables to all hidden (output) variables
%
% Hall = h2val(dnn, V)
%
%
%Output parameters:
% Hall: all hidden (output) variables, where # of row is number of data and # of col is # of hidden (output) nodes
%
%
%Input parameters:
% dnn: the Deep Neural Network model (dbn, rbm)
% V: visible (input) variables, where # of row is number of data and # of col is # of visible (input) nodes
%
%
%Version: 20130727

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function Hall = v2hall(dnn, V) %  dnn.rbm{i}(512 x 128, 128 x 132, 32 x 4), V(250 x 512)

if( isequal(dnn.type(3:5), 'RBM') ) % BBRBM
    Hall = cell(1,1);
    Hall{1} = sigmoid( bsxfun(@plus, V * dnn.W, dnn.b ) );

elseif( isequal(dnn.type(3:5), 'DBN') ) % BBDBN
    nrbm = numel( dnn.rbm ); % 3
    Hall = cell(nrbm,1); % 3 x 1
    H0 = V; % 1000 x 32  , 250 x 512

    for i=1:nrbm % 1:3
        H1 = v2h( dnn.rbm{i}, H0 ); % dnn.rbm{i}.type = BBRBM
        H0 = H1;
        Hall{i} = H1; % 1000 x 4(i=3) <- 1000="" 128="" 16="" 250="" 32="" 4="" 8="" i="1) " nbsp="" p="" x="">
    end
end


testDNN analysis 9

% v2h: to transform from visible (input) variables to hidden (output) variables
%
% H = h2v(dnn, V)
%
%
%Output parameters:
% H: hidden (output) variables, where # of row is number of data and # of col is # of hidden (output) nodes
%
%
%Input parameters:
% dnn: the Deep Neural Network model (dbn, rbm)
% V: visible (input) variables, where # of row is number of data and # of col is # of visible (input) nodes
%
%
%Example:
% datanum = 1024;
% outputnum = 16;
% inputnum = 4;
%
% inputdata = rand(datanum, outputnum);
%
% dnn = randRBM( inputnum, outputnum );
% outputdata = v2h( dnn, input );
%
%
%Version: 20130727


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function H = v2h(dnn, V) % V(1000 x 32, 250 x 32)   dnn.rbm{i}.W(32 x 16)(16 x 8) (8 x 4)

if( isequal(dnn.type, 'BBRBM') ) % BBDBN    Bernoulli-Bernoulli Restricted Bolztmann Machine
    H = sigmoid( bsxfun(@plus, V * dnn.W, dnn.b ) ); % H(250 x 16) * (32 x 16) = 250 x 16
% 250 x 512 * 512 x 128 = 250 x 128

elseif( isequal(dnn.type, 'GBRBM') ) % BBDBN Gaussian-Bernoulli Restricted Bolztmann Machine
    v = bsxfun(@rdivide, V, dnn.sig );
    H = sigmoid( bsxfun(@plus, v * dnn.W, dnn.b ) ); 

elseif( isequal(dnn.type(3:5), 'DBN') ) % BBDBN
    nrbm = numel( dnn.rbm ); % 3
    H0 = V; % 1000 x 32, 250 x 32, 250 x 512
    for i=1:nrbm
        H1 = v2h( dnn.rbm{i}, H0 ); % 1000 x 32, 250 x 32    dnn.rbm{i}.type = BBRBM dnn.rbm{i}.W(32 x 16)(16 x 8) (8 x 4)
        H0 = H1; % 1000 x 4 <- 1000x16="" 1000x8="" 250="" 250x16="" 250x8="" 4="" p="" x="">    end
    H = H1; % 1000 x 4
end

testDNN analysis 8

% trainDBN: training the Deep Belief Nets (DBN) model by back projection algorithm
%
% [dbn rmse] = trainDBN( dbn, IN, OUT, opts)
%
%
%Output parameters:
% dbn: the trained Deep Belief Nets (DBN) model
% rmse: the rmse between the teaching data and the estimates
%
%
%Input parameters:
% dbn: the initial Deep Belief Nets (DBN) model
% IN: visible (input) variables, where # of row is number of data and # of col is # of visible (input) nodes % 1000 x 32
% OUT: teaching hidden (output) variables, where # of row is number of data and # of col is # of hidden (output) nodes % 1000 x 4
% opts (optional): options
%
% options (defualt value):
%  opts.LayerNum: # of tarining RBMs counted from output layer (all layer)
%  opts.MaxIter: Maxium iteration number (100)
%  opts.InitialMomentum: Initial momentum until InitialMomentumIter (0.5)
%  opts.InitialMomentumIter: Iteration number for initial momentum (5)
%  opts.FinalMomentum: Final momentum after InitialMomentumIter (0.9)
%  opts.WeightCost: Weight cost (0.0002)
%  opts.DropOutRate: List of Dropout rates for each layer (0)
%  opts.StepRatio: Learning step size (0.01)
%  opts.BatchSize: # of mini-batch data (# of all data)
%  opts.Object: specify the object function ('Square')
%              'Square'
%              'CrossEntorpy'
%  opts.Verbose: verbose or not (false)
%
%
%Example:
% datanum = 1024;
% outputnum = 16;
% hiddennum = 8;
% inputnum = 4;
%
% inputdata = rand(datanum, inputnum);
% outputdata = rand(datanum, outputnum);
%
% dbn = randDBN([inputnum, hiddennum, outputnum]);
% dbn = pretrainDBN( dbn, inputdata );
% dbn = SetLinearMapping( dbn, inputdata, outputdata );
% dbn = trainDBN( dbn, inputdata, outputdata );
%
% estimate = v2h( dbn, inputdata );
%
%
%Reference:
%for details of the dropout
% Hinton et al, Improving neural networks by preventing co-adaptation of feature detectors, 2012.
%
%
%Version: 20131204

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [dbn rmse] = trainDBN( dbn, IN, OUT, opts)

% Important parameters
InitialMomentum = 0.5;     % momentum for first five iterations
FinalMomentum = 0.9;       % momentum for remaining iterations
WeightCost = 0.0002;       % costs of weight update
InitialMomentumIter = 5;

MaxIter = 100;
StepRatio = 0.01;
BatchSize = 0;
Verbose = false;

Layer = 0;
strbm = 1;

nrbm = numel( dbn.rbm ); % 3
DropOutRate = zeros(nrbm,1); % 3 x 1

OBJECTSQUARE = 1;
OBJECTCROSSENTROPY = 2;
Object = OBJECTSQUARE; % 1

if( exist('opts' ) )
 if( isfield(opts,'MaxIter') )
  MaxIter = opts.MaxIter;
 end
 if( isfield(opts,'InitialMomentum') )
  InitialMomentum = opts.InitialMomentum;
 end
 if( isfield(opts,'InitialMomentumIter') )
  InitialMomentumIter = opts.InitialMomentumIter;
 end
 if( isfield(opts,'FinalMomentum') )
  FinalMomentum = opts.FinalMomentum;
 end
 if( isfield(opts,'WeightCost') )
  WeightCost = opts.WeightCost;
 end
 if( isfield(opts,'DropOutRate') )
  DropOutRate = opts.DropOutRate;
  if( numel(DropOutRate) == 1 )
      DropOutRate = ones(nrbm,1) * DropOutRate;
  end
 end
 if( isfield(opts,'StepRatio') )
  StepRatio = opts.StepRatio;
 end
 if( isfield(opts,'BatchSize') )
  BatchSize = opts.BatchSize;
 end
 if( isfield(opts,'Verbose') )
  Verbose = opts.Verbose;
 end
 if( isfield(opts,'Layer') )
  Layer = opts.Layer;
 end
 if( isfield(opts,'Object') )
  if( strcmpi( opts.object, 'Square' ) )
   Object = OBJECTSQUARE;
  elseif( strcmpi( opts.object, 'CrossEntropy' ) )
   Object = OBJECTCROSSENTROPY;
  end
 end
end

num = size(IN,1); % 1000
if( BatchSize <= 0 ) % 250
  BatchSize = num;
end

if( Layer > 0 ) % 0  <- strbm="1</p">    strbm = nrbm - Layer + 1;
end

deltaDbn = dbn;
for n=strbm:nrbm % 1:3
    deltaDbn.rbm{n}.W = zeros(size(dbn.rbm{n}.W)); % 32 x 16, 16 x 8, 8 x 4
    deltaDbn.rbm{n}.b = zeros(size(dbn.rbm{n}.b)); % 1x16, 1 x 8, 1 x 4
end

if( Layer > 0 ) % 0
    strbm = nrbm - Layer + 1;
end

if( sum(DropOutRate > 0) ) % 1 x 1
    OnInd = GetOnInd( dbn, DropOutRate, strbm ); % 3 x 1 {16 x 32, 8 x 16, 4 x 8}
    for n=max([2,strbm]):nrbm % 2:3
        dbn.rbm{n}.W = dbn.rbm{n}.W / numel(OnInd{n-1}) * size(dbn.rbm{n-1}.W,2);
% 16 x 8                    / 512(=16*32) * 16(32 x 16)
% 8 x 4 = (dbn.rbm{n}.W(8 x 4) / 128(=8*16)) * 8(16 x 8)
    end
end

for iter=1:MaxIter % 1:20
     
    % Set momentum
if( iter <= InitialMomentumIter )
momentum = InitialMomentum; % 0.5
else
momentum = FinalMomentum; % 0.9
    end
   
ind = randperm(num); % 1 x 1000 num=1000 random numbers with values upto 1000
for batch=1:BatchSize:num % 1:250:1000   1 251 501 751
bind = ind(batch:min([batch + BatchSize - 1, num])); % 1 x 250 <-- -="" ind="" min="">ind(1:250)   batch==1
% extract num of Batchsize items or if less upto the upper limit num
        % bind(1:10)= 166 919 119 895 754 24 500 318 249 659
        trainDBN = dbn;
        if( DropOutRate > 0 )
    % 1 x 1 ->  trainDBN.rbm{i}(16 x 32, 8 x 16, 4 x 8)
            [trainDBN OnInd] = GetDroppedDBN( trainDBN, DropOutRate, strbm );
% trainDBN.rbm{i}(512 x 128(n=1), 128 x 32(n=2), 32 x 4(n=3))

            Hall = v2hall( trainDBN, IN(bind,OnInd{1}) );
% IN(1000 x 32) bind(1 x 250) OnInd(16 x 32)
% size(IN(bind,OnInd{1}))-> 250 x 512
% Hall(1->250 x 128, 2->250 x 32, 3->250 x 4)
%  size(IN(bind,OnInd{1}))->(250 x 512)  * trainDBN(512 x 128) * = Hall->250 x 128, 250 x 32, 250 x 4(n=3)
else
             Hall = v2hall( trainDBN, IN(bind,:) ); % IN(1000 x 32) bind(1 x 250)
        end
   
        for n=nrbm:-1:strbm % 3 : -1 : 1
            derSgm = Hall{n} .* ( 1 - Hall{n} ); %derivative sigmoid 250 x 4(n=3), 250 x 32(n=2), 250 x 1288(n=1)
            if( n+1 > nrbm )
                der = ( Hall{nrbm} - OUT(bind,:) );  %der(250 x 4) Hall{nrbm}(250 x 4) 250 x 4 = OUT(1000 x 4) (bind(1 x 250),:)
                if( Object == OBJECTSQUARE )
                    der = derSgm .* der; % 250 x 4(n=3)
                end
            else
                der = derSgm .* ( der * trainDBN.rbm{n+1}.W' );  % (n=1)(250 x 128 * 32 x 128'), 250 x 32( 250 x 4 * 4 x 32 )(n=2)
            end
           
            if( n-1 > 0 )
                in = Hall{n-1}; % (n=3)250 x 32, (n=2)250 x 128, (n=1)250 x 512, 
            else
                if( DropOutRate > 0 )
                    in = IN(bind,OnInd{1}); %  IN(1000 x 32) bind(1 x 250) OnInd{1}(16 x 32) = 250 x 512(n=3,n=2,n=1)
                else
                    in = IN(bind,:); % 250 x 32(n=3), 250 x 128(n=2), 250 x 512(n=1)
                end
            end
           
            in = cat(2, ones(numel(bind),1), in); %(n=3)250 x 33,(n=2)250 x 129, ones(numel(bind),1)(250 x 1)(250 x 512) = 250 x 513
            deltaWb = in' * der / numel(bind);
% (n=3)33 x 4=250 x 33' * 250 x 4 ,
% 129 x 32 = 250 x 129' * 250 x 32,
% 513 x 128 =250 x 513' * 250 x 128  / 250
            deltab = deltaWb(1,:); % 1 x 4(n=3), 1 x 32, 1 x 128
            deltaW = deltaWb(2:end,:); % 32 x 4(n=3), 128 x 32(n=2), 512 x 128(n=1)
           
            if( strcmpi( dbn.rbm{n}.type, 'GBRBM' ) ) % BBRBM   Gaussian-Binary Restricted Bolztmann
deltaW = bsxfun( @rdivide, deltaW, trainDBN.rbm{n}.sig' );
            end
           
            deltaDbn.rbm{n}.W = momentum * deltaDbn.rbm{n}.W; % 0.5 * 32 x 16(n=1), 16 x 8(n=2), 8 x 4(n=3)
            deltaDbn.rbm{n}.b = momentum * deltaDbn.rbm{n}.b; % 0.5 * 1x16(n=1), 1 x 8(n=2), 1 x 4(n=3)
             
            if( DropOutRate > 0 ) %(0.5, 0.5, 0.5)'
                if( n == nrbm )
                    deltaDbn.rbm{n}.W(OnInd{n},:) = deltaDbn.rbm{n}.W(OnInd{n},:) - StepRatio * deltaW;
% 32 x 4                        32 x 4                           32 x 4
                    deltaDbn.rbm{n}.b = deltaDbn.rbm{n}.b - StepRatio * deltab; % 1 x 4
                else
                    deltaDbn.rbm{n}.W(OnInd{n},OnInd{n+1}) = deltaDbn.rbm{n}.W(OnInd{n},OnInd{n+1}) - StepRatio * deltaW;
% (n=2)128 x 32=(8 x 16,4 x 8) (n=1)512 x 128=(32x16(16x32,8x16))      (n=2)128 x 32 (n=1)512 x 128
                    deltaDbn.rbm{n}.b(1,OnInd{n+1}) = deltaDbn.rbm{n}.b(1,OnInd{n+1}) - StepRatio * deltab;
% 1 x 32(n=2) (n=1)1 x 128(onInd{n+1}=8 x 16)
                end
            else
                deltaDbn.rbm{n}.W = deltaDbn.rbm{n}.W - StepRatio * deltaW;
                deltaDbn.rbm{n}.b = deltaDbn.rbm{n}.b - StepRatio * deltab;
            end
keyboard()
        end
           
        for n=strbm:nrbm           
            dbn.rbm{n}.W = dbn.rbm{n}.W + deltaDbn.rbm{n}.W;
            dbn.rbm{n}.b = dbn.rbm{n}.b + deltaDbn.rbm{n}.b; 
        end
    end
   
    if( Verbose )
        tdbn = dbn;
        if( sum(DropOutRate > 0) )
            OnInd = GetOnInd( tdbn, DropOutRate, strbm );
            for n=max([2,strbm]):nrbm
                tdbn.rbm{n}.W = tdbn.rbm{n}.W * numel(OnInd{n-1}) / size(tdbn.rbm{n-1}.W,2);
            end
        end
        out = v2h( tdbn, IN );
        err = power( OUT - out, 2 );
        rmse = sqrt( sum(err(:)) / numel(err) );
        fprintf( '%3d : %9.4f\n', iter, rmse );
    end
end

if( sum(DropOutRate > 0) )
    OnInd = GetOnInd( dbn, DropOutRate, strbm );
    for n=max([2,strbm]):nrbm
        dbn.rbm{n}.W = dbn.rbm{n}.W * numel(OnInd{n-1}) / size(dbn.rbm{n-1}.W,2);
    end
end

testDNN analysis 7

% linearMaping: calculate the linear mapping matrix between the input data and the output data
%
% M = linearMapping( IN, OUT )
%
%
%Output parameters:
% M: The linear mapping matrix
%
%
%Input parameters:
% IN: input data, where # of row is # of data and # of col is # of input features
% 1 input1 1-4
% 2 input2 1-4
% ...
% 1024 inputn 1-4
% OUT: output data, where # of row is # of data and # of col is # of output labels
% 1 output1 1-16
% 2 output2 1-16
% ...
% 1024 outputn 1-16
%
%Example:
% datanum = 1024;
% outputnum = 16;
% inputnum = 4;
%
% inputdata = rand(datanum, inputnum);                rand(1024,4)
% outputdata = rand(datanum, outputnum);              rand(1024,16)
%
% M = linearMapping(inputdata, outputdata);         1024 x 4, 1024 x 16
%
%
%Version: 20130727

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function M = linearMapping( IN, OUT ) % IN 1000 x 8 OUT 1000 x 4
M = pinv(IN) * OUT; % 8 x 1000 * 1000 x 4 = 8 x 4

%OUT = IN * M;

testDNN analysis 6

% SetLinearMapping: set the RBM associated to the linear mapping to the last layer
%
% dbn = SetLinearMapping( dbn, IN, OUT )
%
%
%Input parameters:
% dbn: the Deep Belief Nets (DBN) model
% IN: visible (input) variables, where # of row is number of data and # of col is # of visible (input) nodes
% OUT: teaching data, where # of row is number of data and # of col is # of hidden (output) nodes
%
%
%Output parameters:
% dbn: the set Deep Belief Nets (DBN) model
%
%
%Example:
% datanum = 1024;
% outputnum = 16;
% hiddennum = 8;
% inputnum = 4;
%
% inputdata = rand(datanum, inputnum);
% outputdata = rand(datanum, outputnum);
%
% dbn = randDBN([inputnum, hiddennum, outputnum]);
% dbn = pretrainDBN( dbn, inputdata );
% dbn = SetLinearMapping( dbn, inputdata, outputdata );
% dbn = trainDBN( dbn, inputdata, outputdata );
%
% estimate = v2h( dbn, inputdata );

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function dbn = SetLinearMapping( dbn, IN, OUT ) % 1000 x 32 , 1000 x 4
nrbm = numel(dbn.rbm);
if( nrbm > 1 ) % 3
    Hall = v2hall( dbn, IN ); % 3 x 1 Hall{1}->1000 x 16, 1000 x 8, 1000 x 4
    dbn.rbm{nrbm}.W = linearMapping( Hall{nrbm-1}, OUT ); % 1000 x 8, 1000 x 4 = 8 x 4
    dbn.rbm{nrbm}.b = -0.5 * ones(size(dbn.rbm{nrbm}.b)); % 1 x 4
else
    dbn.rbm{nrbm}.W = linearMapping( IN, OUT );
    dbn.rbm{nrbm}.b = -0.5 * ones(size(dbn.rbm{nrbm}.b));
end

testDNN analysis 5

% pretrainRBM: pre-training the restricted boltzmann machine (RBM) model by Contrastive Divergence Learning
%
% rbm = pretrainRBM(rbm, V, opts)
%
%
%Output parameters:
% rbm: the restricted boltzmann machine (RBM) model
%
%
%Input parameters:
% rbm: the initial boltzmann machine (RBM) model
% V: visible (input) variables, where # of row is number of data and # of col is # of visible (input) nodes
% opts (optional): options
%
% options (defualt value):
%  opts.MaxIter: Maxium iteration number (100)
%  opts.InitialMomentum: Initial momentum until InitialMomentumIter (0.5)
%  opts.InitialMomentumIter: Iteration number for initial momentum (5)
%  opts.FinalMomentum: Final momentum after InitialMomentumIter (0.9)
%  opts.WeightCost: Weight cost (0.0002)
%  opts.DropOutRate: Dropour rate (0)
%  opts.StepRatio: Learning step size (0.01)
%  opts.BatchSize: # of mini-batch data (# of all data)
%  opts.Verbose: verbose or not (false)
%  opts.SparseQ: q parameter of sparse learning (0)
%  opts.SparseLambda: lambda parameter (weight) of sparse learning (0)
%
%
%Example:
% datanum = 1024;
% outputnum = 16;
% inputnum = 4;
%
% inputdata = rand(datanum, inputnum);
% outputdata = rand(datanum, outputnum);
%
% rbm = randRBM(inputnum, outputnum);
% rbm = pretrainRBM( rbm, inputdata );
%
%
%Reference:
%for details of the dropout
% Hinton et al, Improving neural networks by preventing co-adaptation of feature detectors, 2012.
%for details of the sparse learning
% Lee et al, Sparse deep belief net model for visual area V2, NIPS 2008.
%for implimentation of contrastive divergence learning
% http://read.pudn.com/downloads103/sourcecode/math/421402/drtoolbox/techniques/train_rbm.m__.htm
%
%
%Version: 20130727


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function rbm = pretrainRBM(rbm, V, opts )

% Important parameters
InitialMomentum = 0.5;     % momentum for first five iterations
FinalMomentum = 0.9;       % momentum for remaining iterations
WeightCost = 0.0002;       % costs of weight update
InitialMomentumIter = 5;

MaxIter = 100;
DropOutRate = 0;
StepRatio = 0.01;
BatchSize = 0;
Verbose = false;

SparseQ = 0;
SparseLambda = 0;


if( exist('opts' ) )
 if( isfield(opts,'MaxIter') )
  MaxIter = opts.MaxIter;
 end
 if( isfield(opts,'InitialMomentum') )
  InitialMomentum = opts.InitialMomentum;
 end
 if( isfield(opts,'InitialMomentumIter') )
  InitialMomentumIter = opts.InitialMomentumIter;
 end
 if( isfield(opts,'FinalMomentum') )
  FinalMomentum = opts.FinalMomentum;
 end
 if( isfield(opts,'WeightCost') )
  WeightCost = opts.WeightCost;
 end
 if( isfield(opts,'DropOutRate') )
  DropOutRate = opts.DropOutRate;
 end
 if( isfield(opts,'StepRatio') )
  StepRatio = opts.StepRatio;
 end
 if( isfield(opts,'BatchSize') )
  BatchSize = opts.BatchSize;
 end
 if( isfield(opts,'Verbose') )
  Verbose = opts.Verbose;
 end
 if( isfield(opts,'SparseQ') )
  SparseQ = opts.SparseQ;
 end
 if( isfield(opts,'SparseLambda') )
  SparseLambda = opts.SparseLambda;
 end

else
 opts = [];
end

num = size(V,1); % 1000 x 32
dimH = size(rbm.b, 2); % 16
dimV = size(rbm.c, 2); % 32

if( BatchSize <= 0 ) % 250
  BatchSize = num;
end

if( DropOutRate > 0 )
    DropOutNum = round(dimV * DropOutRate); % 32 * 0.5
    DropOutRate = DropOutNum / num;  % 0.016
end


deltaW = zeros(dimV, dimH); % 32 x 16
deltaB = zeros(1, dimH); % 1 x 16
deltaC = zeros(1, dimV); % 1 x 32

for iter=1:MaxIter % 1:20
         
    % Set momentum
if( iter <= InitialMomentumIter )
momentum = InitialMomentum; % 0.500
else
momentum = FinalMomentum; % 0.9XXX
    end
               
    if( SparseLambda > 0 ) % 0
        dsW = zeros(dimV, dimH); % 32 x 16
        dsB = zeros(1, dimH); % 1 x 16
           
        vis0 = V; % 250 % 32
        hid0 = v2h( rbm, vis0 ); % 250 x 16
           
        dH = hid0 .* ( 1.0 - hid0 ); %derivative hidden  % 250 x 16
        sH = sum( hid0, 1 ); % sum of columns of each row % 1 x 16
    end
                   
    if( SparseLambda > 0 ) %0
        mH = sH / num; % sH/1000 or 250   1 x 16
        sdH = sum( dH, 1 ); % sum of derivative hidden columns of each row 1 x 16
        svdH = dH' * vis0;  % derivative hidden' times vis0 16 x 250 * 250 x 32 = 16 x 32
             
        dsW = dsW + SparseLambda * 2.0 * bsxfun(@times, (SparseQ-mH)', svdH)'; % 32 x 16 + (1 x 16' 1 x 32)'
        dsB = dsB + SparseLambda * 2.0 * (SparseQ-mH) .* sdH; % 1 x 16 + 1 x 16 .* 1 x 16 = 1 x 16
    end
       
       
ind = randperm(num); % randperm(1000)

for batch=1:BatchSize:num % 1 251 501 751
         
bind = ind(batch:min([batch + BatchSize - 1, num])); % 1 x 250 = 1 + 249, 1000 -> 1:250 -> ind( randperm(num))
           
        if( DropOutRate > 0 ) % 0.016
            cMat = zeros(dimV,1); % 32 x 1
            p = randperm(dimV, DropOutNum); % randperm(32 x 16)=16 x 32
            cMat(p) = 1;  % 16 x 32     cmat 32 x 1(16 x 32) = 16 x 32  --> cMat(1 x 32) = 1
            cMat = diag(cMat); %32 x 32  diag(32 x 1)
        end
       
        % Gibbs sampling step 0
        vis0 = double(V(bind,:)); % Set values of visible nodes 250 x 32 = 1000 x 32  1 x 250
        if( DropOutRate > 0 ) % 0.016
            vis0 = vis0 * cMat; % 250 x 32 * 32 x 32 = 250 x 32
        end
        hid0 = v2h( rbm, vis0 );  % Compute hidden nodes 250 x 16
       
        % Gibbs sampling step 1
        bhid0 = double( rand(size(hid0)) < hid0 ); % 250 x 16
        vis1 = h2v( rbm, bhid0 );  % Compute visible nodes % 250 x 32
        if( DropOutRate > 0 )  % 0.016
            vis1 = vis1 * cMat;% 250 x 32 * 32 x 32 = 250 x 32
        end
        hid1 = v2h( rbm, vis1 );  % Compute hidden nodes 250 x 16
       
posprods = hid0' * vis0; % 250 x 16' * 250 x 32 = 16 x 32
negprods = hid1' * vis1; % 250 x 16' * 250 x 32 = 16 x 32
% Compute the weights update by contrastive divergence
       
        dW = (posprods - negprods)'; %16 x 32'
        dB = (sum(hid0, 1) - sum(hid1, 1)); % 1 x 16
        dC = (sum(vis0, 1) - sum(vis1, 1)); % 1 x 32
     
        if( strcmpi( 'GBRBM', rbm.type ) ) % BBRBM   Gaussian-Binary Restricted Bolztmann
        dW = bsxfun(@rdivide, dW, rbm.sig');
        dC = bsxfun(@rdivide, dC, rbm.sig .* rbm.sig);
        end
       
deltaW = momentum * deltaW + (StepRatio / num) * dW; % 0.1/1000    32 x 16
deltaB = momentum * deltaB + (StepRatio / num) * dB; % 0.1/1000    1 x 16
deltaC = momentum * deltaC + (StepRatio / num) * dC; % 0.1/1000    1 x 32
         
        if( SparseLambda > 0 ) % 0
            deltaW = deltaW + numel(bind) / num * dsW;
            deltaB = deltaB + numel(bind) / num * dsB;
        end
       
% Update the network weights
rbm.W = rbm.W + deltaW - WeightCost * rbm.W; % 32 x 16   0.0002
rbm.b = rbm.b + deltaB; % 1 x 16
rbm.c = rbm.c + deltaC; % 1 x 32
     
end
       
if( SparseLambda > 0 && strcmpi( 'GBRBM', rbm.type ) )  % BBRBM  Gaussian-Binary Restricted Bolztmann
dsW = bsxfun(@rdivide, dsW, rbm.sig');  % sim = exp( -1 * sum((x1 - x2).^2) / (2 * (sigma ^ 2))); x1,x2 column vectors
end

if( Verbose )
        H = v2h( rbm, V ); % 1000 x 16 = ... , 1000 x 32
        Vr = h2v( rbm, H ); % 1000 x 32= ..., 1000 x 16
err = power( V - Vr, 2 ); % 1000 x 32
rmse = sqrt( sum(err(:)) / numel(err) ); % 1 x 1
fprintf( '%3d : %9.4f %9.4f\n', iter, rmse, mean(H(:)) );
    end
   
end  %% iter 1 : maxIter

testDNN analysis 4

% pretrainDBN: pre-training the Deep Belief Nets (DBN) model by Contrastive Divergence Learning
%
% dbn = pretrainDBN(dbn, V, opts)
%
%
%Output parameters:
% dbn: the trained Deep Belief Nets (DBN) model
%
%
%Input parameters:
% dbn: the initial Deep Belief Nets (DBN) model
% V: visible (input) variables, where # of row is number of data and # of col is # of visible (input) nodes
% opts (optional): options
%
% options (defualt value):
%  opts.LayerNum: # of tarining RBMs counted from input layer (all layer)
%  opts.MaxIter: Maxium iteration number (100)
%  opts.InitialMomentum: Initial momentum until InitialMomentumIter (0.5)
%  opts.InitialMomentumIter: Iteration number for initial momentum (5)
%  opts.FinalMomentum: Final momentum after InitialMomentumIter (0.9)
%  opts.WeightCost: Weight cost (0.0002)
%  opts.DropOutRate: List of Dropout rates for each layer (0)
%  opts.StepRatio: Learning step size (0.01)
%  opts.BatchSize: # of mini-batch data (# of all data)
%  opts.Verbose: verbose or not (false)
%  opts.SparseQ: q parameter of sparse learning (0)
%  opts.SparseLambda: lambda parameter (weight) of sparse learning (0)
%
%
%Example:
% datanum = 1024;
% outputnum = 16;
% hiddennum = 8;
% inputnum = 4;
%
% inputdata = rand(datanum, inputnum);
% outputdata = rand(datanum, outputnum);
%
% dbn = randDBN([inputnum, hiddennum, outputnum]);
% dbn = pretrainDBN( dbn, inputdata );
% dbn = SetLinearMapping( dbn, inputdata, outputdata );
% dbn = trainDBN( dbn, inputdata, outputdata );
%
% estimate = v2h( dbn, inputdata );
%
%
%Reference:
%for details of the dropout
% Hinton et al, Improving neural networks by preventing co-adaptation of feature detectors, 2012.
%for details of the sparse learning
% Lee et al, Sparse deep belief net model for visual area V2, NIPS 2008.
%
%
%Version: 20130821


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function dbn = pretrainDBN(dbn, V, opts)

LayerNum = numel( dbn.rbm );  % 3
DropOutRate = zeros(LayerNum,1); % 3 x 1

X = V;  % 1000 x 32

if( exist('opts' ) )
 if( isfield(opts,'LayerNum') ) % false
  LayerNum = opts.LayerNum;
 end
 if( isfield(opts,'DropOutRate') ) % true
  DropOutRate = opts.DropOutRate; % 0.5
  if( numel( DropOutRate ) == 1 )
   DropOutRate = ones(LayerNum,1) * DropOutRate; % 3 x 1   [0.5 0.5 0.5]'
  end
 end

else
 opts = [];
end

for i=1:LayerNum % 1:3
opts.DropOutRate = DropOutRate(i);
    dbn.rbm{i} = pretrainRBM(dbn.rbm{i}, X, opts); % i=1 X=1000 x 32 
    X0 = X; % 1000 x 32, 1000 x 16, 1000 x 8
    X = v2h( dbn.rbm{i}, X0 ); %      V * dnn.W-> X0 * dbn.rbm{i} <----h bsxfun="" dnn.b="" dnn.w="" p="" plus="" sigmoid="" v=""> % i(1) X(1000 x 16) dbn.rbm{i}(32 x 16) X0(1000 x 32)
% i(2) X(1000 x 8) dbn.rbm{i}(16 x 8) X0(1000 x 16)
% i(3) X(1000 x 4) dbn.rbm{i}(8 x 4) X0(1000 x 8)
end

testDNN analysis 3

% randRBM: get randomized restricted boltzmann machine (RBM) model
%
% rbm = randRBM( dimV, dimH, type )
%
%
%Output parameters:
% dbn: the randomized restricted boltzmann machine (RBM) model
%
%
%Input parameters:
% dimV: number of visible (input) nodes
% dimH: number of hidden (output) nodes
% type (optional): (default: 'BBRBM' )
%                 'BBRBM': the Bernoulli-Bernoulli RBM
%                 'GBRBM': the Gaussian-Bernoulli RBM
%
%
%Version: 20130727


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function rbm = randRBM( dimV, dimH, type )  % 32V 16H - 16V 8H - 8V 4H

if( ~exist('type', 'var') || isempty(type) )
type = 'BBRBM';
end

if( strcmpi( 'GB', type(1:2) ) )
    rbm.type = 'GBRBM';
    rbm.W = randn(dimV, dimH) * 0.1;
    rbm.b = zeros(1, dimH);
    rbm.c = zeros(1, dimV);
    rbm.sig = ones(1, dimV);  % variance= sigma^2 mean=mu <-normal distribution="" nbsp="" p="">   % Gaussian function is the probability density function of the normal distribution
else
    rbm.type = 'BBRBM';
    rbm.W = randn(dimV, dimH) * 0.1;
    rbm.b = zeros(1, dimH);
    rbm.c = zeros(1, dimV);
end

testDNN analysis2

% randDBN: get randomized Deep Belief Nets (DBN) model
%
% dbn = randDBN( dims, type )
%
%
%Output parameters:
% dbn: the randomized Deep Belief Nets (DBN) model
%
%
%Input parameters:
% dims: number of nodes
% type (optional): (default: 'BBDBN' )
%                 'BBDBN': all RBMs are the Bernoulli-Bernoulli RBMs
%                 'GBDBN': the input RBM is the Gaussian-Bernoulli RBM, other RBMs are the Bernoulli-Bernoulli RBMs
%
%
%Version: 20130727


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function dbn = randDBN( dims, type )

if( ~exist('type', 'var') || isempty(type) )
type = 'BBDBN';
end


if( strcmpi( 'GB', type(1:2) ) ) %Gaussian-Binary
 dbn.type = 'GBDBN';
else
 dbn.type = 'BBDBN';
end

dbn.rbm = cell( numel(dims)-1, 1 );

i = 1;
if( strcmpi( 'GB', type(1:2) ) )
 dbn.rbm{i} = randRBM( dims(i), dims(i+1), 'GBRBM' ); % Gaussian-Binary Restricted Bolztmann Machine
else
 dbn.rbm{i} = randRBM( dims(i), dims(i+1), 'BBRBM' ); % Bernoulli-Bernoulli Restricted Bolztmann Machine
end

for i=2:numel(dbn.rbm) - 1
 dbn.rbm{i} = randRBM( dims(i), dims(i+1), 'BBRBM' );
end

i = numel(dbn.rbm);
dbn.rbm{i} = randRBM( dims(i), dims(i+1), 'BBRBM' );

testDNN analysis 1

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deep Neural Network:                                     %
%                                                          %
% Copyright (C) 2013 Masayuki Tanaka. All rights reserved. %
%                    mtanaka@ctrl.titech.ac.jp             %
%                                                          %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
clear all;

num = 1000;  % num of cases
nodes = [32 16 8 4]; %input to output layers' neuron counts input layer -> output layer

IN = rand(num,32);  % 1000 x 32
OUT = rand(num,4);  % 1000 x 4

dnn = randDBN( nodes ); % 1 x 1 3 structures of type rbm
%dnn = randDBN( nodes, 'GBDBN' );
nrbm = numel(dnn.rbm); % 3

opts.MaxIter = 20;
opts.BatchSize = num/4; % 250
opts.Verbose = true;
opts.StepRatio = 0.1;
opts.Layer = nrbm-1;  % 2
opts.DropOutRate = 0.5;
opts.object = 'CrossEntropy';

dnn = pretrainDBN(dnn, IN, opts);

dnn= SetLinearMapping(dnn, IN, OUT);

opts.Layer = 0;
dnn = trainDBN(dnn, IN, OUT, opts);
rmse = CalcRmse(dnn, IN, OUT);
rmse

%{
debug> size(IN)
ans =

   1000     32

debug> size(OUT)
ans =

   1000      4

debug> size(dnn)
ans =

   1   1

debug> displayentries(dnn)
error: `displayentries' undefined near line 3 column 1
debug> fieldnames(dnn)
ans =
{
  [1,1] = type
  [2,1] = rbm
}
debug> fieldnames(dnn)
ans =
{
  [1,1] = type
  [2,1] = rbm
}
debug> size(dnn.type)
ans =

   1   5

debug> dnn.type
ans = BBDBN
debug> size(dnn.rbm)
ans =

   3   1

debug> dnn.rbm
ans =
{
  [1,1] =

    scalar structure containing the fields:  32ROWS

      type = BBRBM
      W =

       Columns 1 through 5:

        -9.5439e-002  2.2939e-002  7.2628e-002  -1.6725e-001  -1.8585e-001
        -6.3338e-002  5.5813e-002  9.9650e-002  -3.2411e-002  1.9054e-001
        -2.8721e-002  -3.0517e-001  4.2261e-002  1.8782e-002  1.7918e-002
        1.6754e-001  -6.9326e-003  8.4993e-002  1.3912e-001  1.7788e-001
        1.2296e-001  2.8321e-003  6.8273e-002  -4.1696e-002  1.8666e-001
        -7.1908e-002  7.6576e-002  1.8943e-002  6.0098e-002  -8.2744e-002
        -6.1932e-002  -1.9985e-001  1.7706e-002  -1.7833e-001  3.2422e-002
        1.4648e-001  1.3261e-001  -8.6583e-002  -5.8321e-002  -7.2068e-002
        8.9776e-002  -3.7070e-002  5.6221e-002  -4.0033e-002  9.4303e-003
        8.9454e-002  -4.9070e-002  -6.3078e-002  3.1675e-002  -3.0792e-002
        1.8290e-001  -1.8830e-001  -8.8328e-002  -1.0021e-002  1.1692e-001
        -1.5825e-002  -8.2422e-002  -4.5168e-002  -1.0671e-001  7.1336e-002
        -1.4302e-001  1.1588e-002  -1.9893e-002  -1.8817e-002  8.0709e-002
        5.8547e-002  1.0674e-001  9.2972e-002  7.3739e-004  -8.8002e-002
        -5.2856e-002  5.6408e-002  -2.7927e-002  -5.2166e-002  -2.9046e-002
        -2.3111e-002  -2.5470e-002  1.4229e-002  6.3788e-002  -1.0690e-001
        1.0172e-001  1.6693e-001  -4.2641e-002  -6.0183e-003  8.3208e-002
        9.4523e-003  -1.0553e-001  3.2596e-002  -7.3335e-002  -6.9497e-002
        5.5084e-002  -4.9742e-002  6.3967e-002  -3.0585e-002  -2.2389e-003
        8.7456e-002  -9.3202e-003  5.7180e-002  -3.3230e-002  -1.3572e-002
        1.4025e-001  1.3006e-001  -9.2925e-002  -7.1881e-002  7.9407e-002
        4.6632e-002  2.5287e-002  1.8483e-001  -1.2771e-001  4.0131e-002
        -4.6402e-002  1.8763e-002  1.2175e-001  3.1542e-002  1.1833e-001
        -6.8776e-002  9.5478e-002  1.0184e-001  -1.3870e-001  -1.8715e-002
        -1.3226e-002  4.9959e-002  1.3911e-001  -1.2609e-001  -1.1164e-001
        5.1048e-003  8.2061e-002  -1.3399e-001  1.4166e-002  4.5473e-002
        4.7427e-002  -1.6272e-002  1.2218e-003  2.0601e-002  1.5357e-001
        1.2823e-002  9.5251e-002  9.6114e-002  -5.1072e-002  -4.2220e-003
        -2.7675e-002  1.3981e-001  -6.3217e-002  1.4558e-002  -1.0971e-001
        3.4808e-002  1.0162e-001  -2.7371e-002  -9.5146e-003  -8.1046e-002
        -8.9656e-004  5.3675e-002  3.5876e-002  1.1082e-001  -2.6857e-001
        -4.4603e-002  5.2974e-002  -1.0659e-001  -8.3114e-002  3.8094e-002

      ...
       Column 16:

        1.0590e-001
        -5.0035e-002
        9.8314e-002
        3.4597e-003
        1.8471e-001
        1.1792e-002
        -8.3624e-003
        8.8980e-003
        8.6437e-002
        1.0525e-001
        -1.0155e-001
        5.0527e-002
        -1.0684e-001
        8.2844e-002
        1.3908e-001
        1.8100e-001
        -2.6154e-002
        1.7376e-002
        -1.1795e-001
        -1.4004e-001
        4.5323e-002
        -1.1169e-002
        4.8986e-002
        7.6310e-002
        2.6240e-001
        2.4331e-002
        7.2168e-003
        -1.9634e-002
        -9.0908e-002
        -1.1385e-002
        -1.5389e-001
        -8.0523e-002

      b =

         0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0

      c =

       Columns 1 through 18:

         0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0

       Columns 19 through 32:

         0   0   0   0   0   0   0   0   0   0   0   0   0   0


  [2,1] =

    scalar structure containing the fields:  16ROWS

      type = BBRBM
      W =

       Columns 1 through 5:

        2.0102e-001  4.3688e-003  1.2167e-002  3.8655e-002  -1.0856e-001
        7.6643e-003  -2.0929e-002  2.1684e-001  2.7952e-002  -1.0454e-001
        -1.8838e-001  -1.1319e-001  1.3241e-001  2.4613e-001  -4.1756e-003
        -4.1513e-002  -2.4252e-001  4.6049e-002  -7.2507e-002  9.5904e-003
        8.3872e-002  9.5719e-002  5.9736e-002  1.0170e-002  -1.9051e-001
        -1.6029e-003  -8.5765e-002  1.3034e-001  1.3020e-001  -4.4126e-002
        -1.1046e-001  -1.6009e-001  8.4373e-002  1.4629e-001  -4.9012e-002
        1.3466e-003  -3.4392e-002  -2.4080e-001  3.8773e-002  -5.2277e-002
        8.6146e-004  -2.1853e-002  1.1403e-001  -9.8974e-002  -5.0794e-002
        -3.2462e-003  1.0488e-001  3.2717e-002  6.8939e-004  7.9416e-002
        -1.1660e-001  -1.1468e-001  1.3624e-001  4.0556e-002  3.1838e-003
        4.7486e-002  -1.3841e-001  -5.5895e-002  -1.5473e-001  -4.0310e-002
        2.6561e-002  -1.2955e-003  1.6016e-001  6.6688e-002  -1.0138e-002
        -1.2804e-001  5.2227e-003  7.8851e-002  -4.8367e-002  2.6254e-004
        1.5797e-003  6.0113e-002  3.2811e-002  -1.1808e-001  7.7672e-002
        1.4736e-001  -1.7833e-001  -8.0439e-002  -3.6698e-003  -8.5635e-002

       Columns 6 through 8:

        -4.6001e-002  -6.9266e-002  -2.2738e-001
        4.7092e-002  -2.0253e-001  -3.2701e-003
        5.1861e-002  -2.1056e-001  -1.8411e-002
        -7.1343e-002  1.2337e-001  1.9276e-002
        -1.8914e-003  -1.5765e-001  -8.6933e-002
        -2.0187e-001  4.3527e-002  -1.7456e-002
        1.0685e-001  1.7238e-002  -2.8500e-002
        -3.1129e-002  -1.1785e-002  -1.8281e-001
        6.6779e-002  -1.9351e-002  1.3626e-001
        1.0567e-001  5.0517e-002  -3.2258e-002
        -6.2616e-002  -4.1738e-005  -3.2652e-003
        2.0329e-003  1.2461e-001  3.1411e-002
        8.1571e-002  8.1036e-002  6.0923e-002
        5.1120e-002  -4.4483e-002  -1.1280e-001
        5.0619e-002  1.1328e-001  1.4899e-001
        3.8269e-002  -1.4240e-001  -3.6423e-002

      b =

         0   0   0   0   0   0   0   0

      c =

         0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0


  [3,1] =

    scalar structure containing the fields:   8ROWS

      type = BBRBM
      W =

         0.0186368  -0.0967863   0.0086856  -0.0792971
        -0.0265770  -0.0345573   0.0390910  -0.2094785
         0.1285115   0.0039218   0.1239428  -0.0057840
        -0.0358651  -0.2717663  -0.0276068   0.0775830
         0.0222669   0.0783432  -0.0770633   0.0245969
         0.0052533   0.0777309  -0.0423200  -0.0332311
        -0.1173345   0.0099952  -0.0984870  -0.1093623
         0.0689255   0.0407822   0.0991716   0.0154337

      b =

         0   0   0   0

      c =

         0   0   0   0   0   0   0   0


}
debug> nrbm
nrbm =  3
%}
%{
dnn = pretrainDBN(dnn, IN, opts);
***************************************
debug> size(dnn)
ans =

   1   1

debug> fieldnames(dnn)
ans =
{
  [1,1] = type
  [2,1] = rbm
}
debug> size(dnn.type)
ans =

   1   5

debug> dnn.type
ans = BBDBN
debug> size(dnn.rbm)
ans =

   3   1

debug> dnn.rbm
scalar structure containing the fields:

      type = BBRBM
      W =

       Columns 1 through 5:

        -1.4456e-001  1.3457e-001  -1.5983e-001  -2.1078e-001  1.4789e-001
        1.9967e-001  3.1571e-002  1.3676e-001  -1.9001e-001  5.2966e-002
        -7.9528e-002  -7.3805e-002  1.3651e-001  -1.4727e-002  -5.0989e-002
        4.6101e-001  -2.0480e-001  2.2967e-002  -4.4049e-002  -1.4408e-001
        -1.5530e-002  5.8309e-002  2.7578e-002  -2.6401e-001  1.6906e-001
        -2.3830e-002  7.3771e-002  -4.0720e-002  1.0873e-001  3.2833e-002
        -1.1991e-001  -2.7482e-002  -1.7003e-001  -4.4027e-002  -1.6771e-001
        -1.4532e-001  1.4782e-001  1.6026e-001  -3.9349e-002  -2.3665e-001
        9.6552e-002  9.6118e-002  1.0965e-002  4.8927e-002  1.3123e-001
        -1.8393e-001  8.8770e-002  9.7955e-002  1.0479e-001  1.9539e-001
        1.5810e-001  8.2610e-003  7.7519e-002  -1.7944e-001  2.0037e-002
        -1.8539e-002  2.2513e-002  -1.4977e-001  -6.7766e-002  1.3686e-001
        1.3339e-001  -2.9161e-002  7.3629e-002  -7.3675e-002  -1.2211e-001
        1.9417e-002  1.4506e-001  -9.1872e-002  -4.9863e-002  -1.4877e-001
        6.8912e-002  -7.8553e-002  -1.5891e-001  1.6622e-001  -9.9249e-002
        4.6266e-002  1.2187e-001  8.3142e-002  -1.9155e-001  1.5987e-001
        1.6179e-002  6.4626e-002  -2.8579e-001  -1.4409e-002  -1.3780e-001
        -9.7421e-002  -3.9605e-002  6.2394e-002  -9.7487e-002  2.5618e-002
        -2.2313e-002  1.1810e-001  4.9739e-002  -1.9756e-001  -3.7360e-002
        1.1228e-001  -2.2252e-001  3.8728e-002  -1.6449e-001  1.3446e-001
        -1.0710e-001  2.4637e-001  -3.1594e-002  8.3380e-002  1.0684e-001
        -8.0976e-002  -2.2602e-003  1.2758e-001  1.2872e-001  -3.5073e-002
        -5.6544e-002  -2.2636e-004  -1.4674e-001  -6.3959e-002  2.7805e-002
        3.1768e-001  -6.6169e-002  -7.9164e-002  1.0105e-001  -2.5982e-001
        -7.4598e-002  1.4319e-001  2.6001e-001  -1.7961e-001  -1.2858e-001
        2.1387e-003  7.2934e-003  -2.0793e-001  -7.9268e-002  2.5656e-001
        -1.5909e-001  -1.6167e-002  3.3853e-002  -9.1057e-002  1.8289e-001
        3.3792e-002  9.1442e-002  1.0779e-001  1.3711e-001  -3.9855e-002
        5.1293e-002  -2.0965e-002  1.6927e-001  -2.1026e-001  -2.0306e-001
        -1.4660e-001  1.3455e-002  -5.2939e-002  -3.4808e-002  1.5054e-001
        1.0657e-001  -1.4081e-002  -1.0672e-001  1.0797e-001  9.5403e-002
        4.2248e-002  -1.3737e-001  -1.6574e-002  1.0592e-001  -1.7172e-001

       Columns 6 through 10:

        -8.5914e-002  1.0224e-002  2.0198e-001  -1.9529e-001  2.9960e-002
        -1.5373e-001  -1.8566e-001  -8.7281e-002  7.6317e-002  -9.4733e-002
        -1.3825e-001  6.1988e-002  -1.0458e-001  5.9529e-002  1.6094e-001
        2.2550e-002  -1.4377e-001  1.2013e-002  9.3174e-002  -6.1050e-004
        -1.1033e-001  3.0341e-001  3.9760e-002  2.7072e-002  -6.9339e-003
        2.7696e-001  -8.5717e-002  -3.9012e-002  -1.0869e-002  -4.4833e-002
        -9.3750e-003  -1.3472e-001  1.2080e-002  7.9816e-002  -9.2314e-003
        1.2384e-001  2.8670e-001  -1.7781e-002  -1.2156e-001  -9.3283e-002
        7.7685e-002  -1.8240e-001  2.3156e-002  -1.0981e-001  7.0797e-004
        4.6324e-002  1.8556e-002  -2.5338e-001  -1.1749e-001  -1.1827e-001
        1.4409e-001  1.0703e-001  -1.9620e-001  -3.1371e-002  -8.1335e-002
        -8.4576e-002  8.0624e-002  3.8084e-002  1.2493e-001  9.2895e-003
        -1.5890e-001  5.5033e-002  1.2631e-001  1.3307e-001  1.5517e-001
        -9.5496e-002  -1.7814e-002  2.5346e-001  1.6847e-001  -5.3799e-002
        2.6378e-001  1.8021e-002  -5.2915e-002  -1.3608e-001  2.7665e-002
        9.1235e-002  -8.6926e-002  -7.1771e-002  4.5970e-003  9.2774e-002
        -9.0323e-002  2.8599e-002  -3.5934e-002  -1.0172e-001  2.7208e-001
        9.0449e-002  1.1858e-001  2.3264e-001  -1.5732e-001  -4.1013e-002
        -1.1754e-001  1.3658e-001  5.4594e-002  9.1868e-002  -6.6272e-002
        -5.5522e-002  -1.4106e-002  -1.0810e-002  -2.2745e-001  1.7566e-002
        4.8655e-002  1.3820e-001  -1.2193e-001  -2.5636e-001  -4.7235e-002
        9.5951e-002  1.2619e-001  -1.9169e-001  2.1087e-001  -1.9027e-001
        3.0819e-002  1.0616e-001  1.0001e-001  1.1986e-001  -3.8117e-001
        -1.1276e-001  -5.5931e-002  9.0106e-003  4.9986e-002  -3.6583e-002
        1.5102e-001  -2.4015e-001  -5.5786e-002  -2.4883e-002  1.4220e-002
        2.2478e-002  -1.2901e-001  -2.4733e-002  -4.6572e-002  7.9630e-002
        5.8555e-002  2.7067e-002  -4.7607e-002  4.0240e-002  -4.0228e-002
        -1.8334e-001  -1.1726e-001  1.6269e-001  1.0437e-001  -3.3466e-001
        -1.3963e-001  -4.8113e-002  -2.4723e-001  1.5928e-001  7.8055e-002
        1.4487e-001  -4.0303e-002  3.6406e-002  7.6593e-002  1.0397e-001
        -1.0251e-002  -1.1552e-001  3.5737e-002  -1.2316e-001  2.1050e-001
        1.9225e-001  -3.8724e-002  -7.8417e-002  5.2468e-002  -9.5619e-002

       Columns 11 through 15:

        2.4098e-003  -2.6345e-001  -4.6159e-002  1.5794e-001  -3.3164e-003
        -1.7913e-001  -2.3133e-002  4.2701e-002  2.3828e-001  1.9139e-001
        2.2087e-002  -1.4815e-001  6.2519e-002  -2.5090e-002  8.0639e-003
        -2.6046e-001  -1.5779e-001  1.6938e-001  -6.9762e-002  1.4018e-001
        8.6579e-002  3.6462e-002  -8.0427e-002  -2.0413e-001  -1.7592e-001
        -1.2733e-001  1.0460e-001  -1.7352e-001  -1.4951e-001  -4.0147e-002
        2.5901e-002  8.6292e-002  1.3644e-001  2.2090e-001  1.3782e-001
        1.1347e-001  4.5161e-002  -4.4923e-002  -3.5492e-002  1.9974e-001
        1.7800e-002  3.5112e-002  -9.3266e-002  9.9701e-002  -2.5843e-001
        -4.9081e-002  -2.1651e-002  4.9206e-002  4.1672e-002  4.5893e-002
        -1.6028e-001  6.9965e-002  -9.9095e-002  -9.8361e-002  1.5742e-002
        -2.2755e-001  -1.3005e-001  3.2077e-001  -2.1002e-002  -1.8576e-002
        -1.0820e-001  2.2273e-002  1.4682e-001  -1.7895e-001  -2.0020e-001
        -1.0212e-001  -9.6141e-002  7.2759e-002  -5.6987e-002  -8.3177e-002
        -1.2048e-002  1.4154e-001  1.8797e-001  -7.5862e-002  -2.2842e-001
        1.9373e-001  -1.5303e-001  7.4419e-002  -2.0184e-001  -1.2280e-002
        8.8329e-002  -5.7753e-002  1.5731e-001  6.5880e-002  -2.9509e-002
        5.1196e-002  -6.7306e-002  6.8033e-002  -1.5876e-002  -1.2128e-001
        1.3562e-001  1.7572e-001  -9.5858e-002  -5.7359e-002  -4.9138e-002
        -1.6606e-002  7.4453e-002  2.2216e-001  -8.0778e-002  9.9390e-002
        -1.4711e-001  -2.0306e-002  3.7244e-002  2.0792e-001  -5.3220e-002
        -2.0906e-001  -7.5341e-002  3.2026e-002  2.1813e-002  -8.0194e-002
        -8.9684e-002  -1.3909e-001  6.9256e-002  1.8700e-001  -3.0837e-002
        2.0385e-001  1.0729e-001  5.6024e-002  -1.2941e-001  1.2849e-001
        -8.7558e-002  6.1155e-002  2.8669e-002  1.3912e-002  8.1313e-002
        -1.1198e-001  1.1722e-001  3.7843e-002  -1.3390e-001  4.9545e-002
        1.1977e-001  3.7913e-002  1.4384e-001  -1.8977e-001  -1.2410e-002
        -4.7232e-002  -3.8173e-002  5.9145e-002  -3.9736e-002  1.1931e-001
        2.6636e-001  1.2498e-001  -2.1388e-002  -2.1876e-001  -4.1982e-003
        -2.5778e-002  1.9604e-001  -2.0281e-001  -3.0428e-002  -6.2732e-002
        2.0315e-001  -1.7387e-001  1.1643e-002  -9.8135e-002  8.3186e-002
        1.8663e-002  2.0766e-002  4.0007e-002  1.4561e-001  1.3943e-002

       Column 16:

        1.2361e-001
        5.3400e-003
        3.9879e-002
        1.8711e-002
        1.3219e-001
        -5.1279e-002
        1.2698e-001
        -5.9815e-002
        6.0131e-002
        -3.3446e-002
        4.0260e-002
        -1.0416e-001
        -1.4745e-001
        -4.7535e-002
        -6.4048e-002
        -2.2644e-001
        -1.8675e-002
        6.2748e-002
        -2.2745e-001
        -7.9144e-002
        -1.1377e-001
        1.4382e-001
        1.2872e-001
        -5.1093e-002
        -1.5694e-001
        5.4393e-002
        -1.9857e-001
        4.2873e-002
        9.1268e-002
        5.2551e-002
        -1.1336e-001
        -1.1882e-001

      b =

       Columns 1 through 5:

        -1.5586e-002  -1.1496e-002  -5.8476e-003  1.5063e-002  -6.7685e-003

       Columns 6 through 10:

        -5.7987e-003  -6.9171e-003  -7.8332e-003  -1.4338e-004  -4.1969e-003

       Columns 11 through 15:

        -8.1333e-003  -9.0226e-003  -2.2251e-002  -1.5057e-003  -1.6091e-002

       Column 16:

        5.6817e-004

      c =

       Columns 1 through 6:

         0.0395916  -0.0773137   0.0312343  -0.0407844  -0.0025245   0.0896613

       Columns 7 through 12:

        -0.0425101  -0.1091811  -0.0149902   0.0279449   0.0730091  -0.0038612

       Columns 13 through 18:

         0.0314979   0.0748003   0.0157778  -0.0173519  -0.0313998  -0.0555946

       Columns 19 through 24:

        -0.0412943  -0.0241210   0.0050137   0.0062377   0.0211264  -0.0494209

       Columns 25 through 30:

         0.0124015  -0.0334159   0.0270998   0.0090238   0.0583709  -0.0652171

       Columns 31 and 32:

        -0.0365972   0.0718705


  [2,1] =

    scalar structure containing the fields:

      type = BBRBM
      W =

       Columns 1 through 6:

        -0.1580402   0.1646551   0.1544616   0.0127687   0.0475242   0.0354418
         0.1059701  -0.0584153   0.1202782   0.0424994   0.0615194   0.0647433
         0.0307732  -0.0471046  -0.2426331   0.1067414   0.0483431  -0.0798042
        -0.0731989  -0.0210007  -0.1234566  -0.1587460   0.0332187  -0.2252511
         0.0978923  -0.1253813   0.0735168  -0.1567597  -0.0283528  -0.0407749
         0.0145115   0.0742178   0.1333530  -0.0073586   0.0637584   0.0700637
         0.0969561   0.0043284  -0.1200178   0.1004060  -0.0754928   0.0167230
        -0.1466019   0.1610189  -0.1535090   0.0476767  -0.0862423  -0.0467296
        -0.0041632   0.1136615  -0.0324736  -0.0667652   0.0516534  -0.0535831
        -0.0434391  -0.1836850   0.0426363   0.0200211   0.1795177  -0.1665324
        -0.1280892  -0.1473259   0.0141963  -0.0618399  -0.0636552   0.1216574
         0.0529018  -0.0140015   0.0391559  -0.0730919   0.2005384  -0.1336324
         0.1655463   0.1814396   0.2060112   0.0534279   0.1115840  -0.0068995
        -0.2106281  -0.1304211   0.1142619  -0.1817210  -0.1178374   0.1359360
         0.0081734  -0.0306936   0.0177958   0.1006536  -0.0477942  -0.0826642
        -0.0018466  -0.0195038  -0.0971977   0.0341731  -0.1154291   0.0489353

       Columns 7 and 8:

        -0.0284126   0.0200409
        -0.0930402   0.0764000
         0.0341746  -0.0478940
        -0.1998030  -0.1460855
         0.0696642   0.0428283
         0.0259319  -0.0889552
        -0.0194931   0.1268362
         0.0665831  -0.0574145
         0.0629943  -0.0650520
        -0.0046916  -0.1639800
        -0.0519509  -0.0733316
        -0.0539535  -0.1465853
         0.2265682   0.0198238
        -0.0749331  -0.0618888
        -0.0175542   0.0231235
        -0.1475020  -0.0175232

      b =

       Columns 1 through 6:

         0.0057857  -0.0016897   0.0027805  -0.0032890  -0.0065730  -0.0010049

       Columns 7 and 8:

         0.0108362   0.0027294

      c =

       Columns 1 through 6:

         0.0519208   0.1673262   0.1023236  -0.2268309   0.0366060   0.0182441

       Columns 7 through 12:

        -0.0512072  -0.0443540   0.0091081  -0.1174627  -0.0111318  -0.0157029

       Columns 13 through 16:

         0.1704334  -0.0983854  -0.0740015  -0.1733870


  [3,1] =

    scalar structure containing the fields:

      type = BBRBM
      W =

        6.4865e-003  3.1768e-003  -4.8833e-002  -6.2117e-002
        -4.3082e-004  -9.2065e-002  -9.6846e-002  3.7618e-002
        -7.6594e-002  1.0289e-001  1.0863e-001  3.7683e-002
        -6.6581e-002  2.9303e-002  -6.4712e-002  8.4750e-002
        1.8016e-001  1.2360e-001  -1.3360e-001  -2.9996e-002
        -1.9455e-002  3.0877e-002  -9.0625e-002  -4.8582e-002
        -7.6954e-002  -1.1390e-001  4.1767e-002  1.1894e-001
        2.4895e-002  -4.6617e-003  -1.1059e-001  -6.8426e-002

      b =

        -0.0018479  -0.0032890  -0.0012351  -0.0015600

      c =

       Columns 1 through 6:

         0.0308390   0.0916032   0.0644636  -0.0433083   0.0800952  -0.0703054

       Columns 7 and 8:

         0.0054586  -0.1400794


}

%}