MATLAB深度学习代码详细分析(一)__nnff.m
NNFF为神经网络前向传播的运算函数代码如下:function nn = nnff(nn, x, y)%NNFF performs a feedforward pass NNFF为前向传播运行过程% nn = nnff(nn, x, y) returns an neural network structure with updated %函数输入为(网络架构,数据集,数据标签)
NNFF为神经网络前向传播的运算函数代码如下:
function nn = nnff(nn, x, y)
%NNFF performs a feedforward pass NNFF为前向传播运行过程
% nn = nnff(nn, x, y) returns an neural network structure with updated %函数输入为(网络架构,数据集,数据标签)
% layer activations, error and loss (nn.a, nn.e and nn.L)%函数前向更新每层的参数
n = nn.n;%nn.n为网络层数比如(784,25,10),则n为3
m = size(x, 1);%将数据集的行数赋值给m,
x = [ones(m,1) x];%ones()产生全1数组。
nn.a{1} = x;
%feedforward pass %前向传播
for i = 2 : n-1
switch nn.activation_function %根据输入判断激活函数
case 'sigm'
% Calculate the unit's outputs (including the bias term)%使用sigmod激活函数,其中已包含偏执。
nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
case 'tanh_opt'
nn.a{i} = tanh_opt(nn.a{i - 1} * nn.W{i - 1}');%使用tanh_opt激活函数
end
%dropout%进行dropout处理
if(nn.dropoutFraction > 0)%用在训练隐含层中,随机将激活值按比例清零(清零后权值不起作用)
if(nn.testing)
nn.a{i} = nn.a{i}.*(1 - nn.dropoutFraction);
else
nn.dropOutMask{i} = (rand(size(nn.a{i}))>nn.dropoutFraction);
nn.a{i} = nn.a{i}.*nn.dropOutMask{i};
end
end
%calculate running exponential activations for use with sparsity
if(nn.nonSparsityPenalty>0) %在nnbp中用的到
nn.p{i} = 0.99 * nn.p{i} + 0.01 * mean(nn.a{i}, 1);
end
%Add the bias term%添加与bias相乘的1,等于加上偏执
nn.a{i} = [ones(m,1) nn.a{i}];
end
switch nn.output %输出层的激活函数
case 'sigm'
nn.a{n} = sigm(nn.a{n - 1} * nn.W{n - 1}');
case 'linear'
nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
case 'softmax'
nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
nn.a{n} = exp(bsxfun(@minus, nn.a{n}, max(nn.a{n},[],2)));
nn.a{n} = bsxfun(@rdivide, nn.a{n}, sum(nn.a{n}, 2));
end
%error and loss %真实值减去预测值
nn.e = y - nn.a{n};
switch nn.output %调用不同的损失函数
case {'sigm', 'linear'}
nn.L = 1/2 * sum(sum(nn.e .^ 2)) / m;
case 'softmax'
nn.L = -sum(sum(y .* log(nn.a{n}))) / m;
end
end
更多推荐
所有评论(0)