文档库 最新最全的文档下载
当前位置:文档库 › 深度学习算法

深度学习算法

%共4个m文件第一个m文件
%命名为DeepLearning.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%功能:演示深度学习算法在计算机视觉中的应用
%训练DBN用于分类;
%环境:Win7,Matlab2012b
%Modi: NUDT-VAP
%时间:2014-10-10
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

function DeepLearning()

%% load training data
clear;
data = load('../data/mnist.mat');
train_x = double(data.train_x) / 255;
train_y = double(data.train_y);
test_x = double(data.test_x) / 255;
test_y = double(data.test_y);
clear('data');

%% setup DBN model
input_dim = size(train_x, 2);
output_dim = size(train_y, 2);
hidden_sz1 = 100;
hidden_sz2 = 100;
dbn.sizes = [input_dim, hidden_sz1, hidden_sz2];
dbn_opts.numepochs = 50;
dbn_opts.batchsize = 100;
dbn_opts.momentum = 0.5;
dbn_opts.alpha = 1;
for i = 1:numel(dbn.sizes)-1
dbn.rbm{i}.alpha = dbn_opts.alpha;
dbn.rbm{i}.momentum = dbn_opts.momentum;
dbn.rbm{i}.W = zeros(dbn.sizes(i+1), dbn.sizes(i));
dbn.rbm{i}.vW = zeros(dbn.sizes(i+1), dbn.sizes(i));
dbn.rbm{i}.b = zeros(dbn.sizes(i), 1);
dbn.rbm{i}.vb = zeros(dbn.sizes(i), 1);
dbn.rbm{i}.c = zeros(dbn.sizes(i+1), 1);
dbn.rbm{i}.vc = zeros(dbn.sizes(i+1), 1);
end

%% layer-wise pretrain DBN model
x = train_x;
rng('default');
dbn.rbm{1} = rbmtrain(dbn.rbm{1}, x, dbn_opts);
for i = 2:numel(dbn.sizes)-1
x = sigm(repmat(dbn.rbm{i-1}.c', size(x, 1), 1) + x * dbn.rbm{i-1}.W');
dbn.rbm{i} = rbmtrain(dbn.rbm{i}, x, dbn_opts);
end

%% use DBN to initialize NN to peform fine-tuning
nn.sizes = [dbn.sizes, output_dim];
nn.n = numel(nn.sizes);
nn.learning_rate = 0.1;
nn.momentum = 0.5;

nn_opts.numepochs = 50;
nn_opts.batchsize = 100;


rng('default');
for i = 1:numel(dbn.rbm)
nn.W{i} = [dbn.rbm{i}.c, dbn.rbm{i}.W];
nn.vW{i} = zeros(size(nn.W{i}));
end
i = nn.n - 1;
nn.W{i} = (rand(nn.sizes(i+1), nn.sizes(i)+1) - 0.5) * 2 * 4 * sqrt(6 / (nn.sizes(i+1) + nn.sizes(i)));
nn.vW{i} = zeros(size(nn.W{i}));

nn = nntrain(nn, train_x, train_y, nn_opts);
[err_rate, ~] = nntest(nn, test_x, test_y);

% With 50 epochs, the error rate: could be around 5%.
disp(['Final classification error rate: ' num2str(err_rate*100), '%.']);

end

function X = sigm(P)
X = 1./(1+exp(-P));
end



%第二个m文件命名为nntest.m
function [err_rate, err_num] = nntest(nn, x, y)
nn = nnff(nn, x, zeros(size(x,1), nn.sizes(end)));
[~, labels] = max(nn.a{end},[],2);
[~, expected] = max(y,[],2);
err_num = find(labels ~= expected);
err_rate = numel(err_num) / size(x, 1);
end

function nn = nnff(nn, x, y)
n = nn.n;
m = size(x, 1);
x = [ones(m,1) x];
nn.a{1} = x;
%feedforward pass
for i = 2 : n-1
nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
%Add the bias term
nn.a{i} = [ones(m,1) nn.a{i}];
end
nn.a{n

} = sigm(nn.a{n - 1} * nn.W{n - 1}');
%error and loss
nn.error = y - nn.a{n};
nn.loss = 1/2 * sum(sum(nn.error .^ 2)) / m;
end

function X = sigm(P)
X = 1./(1+exp(-P));
end
%第三个m文件命名为nntrain.m
function [nn, L] = nntrain(nn, train_x, train_y, opts)
m = size(train_x, 1);
batchsize = opts.batchsize;
numepochs = opts.numepochs;
numbatches = floor(m / batchsize);
L = zeros(numepochs*numbatches,1);
n = 1;
for k = 1 : numepochs
tic;
kk = randperm(m);
for j = 1 : numbatches
batch_x = train_x(kk((j - 1) * batchsize + 1 : j * batchsize), :);
batch_y = train_y(kk((j - 1) * batchsize + 1 : j * batchsize), :);

nn = nnff(nn, batch_x, batch_y);
nn = nnbp(nn);
nn = nngrads(nn);

L(n) = nn.loss;
n = n + 1;
end
t = toc;
nn = nnff(nn, train_x, train_y);
str_perf = sprintf('; Full-batch train err = %f', nn.loss);
disp(['NN train: epoch ' num2str(k) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
end
end

function nn = nnff(nn, x, y)
n = nn.n;
m = size(x, 1);
x = [ones(m,1) x];
nn.a{1} = x;
%feedforward pass
for i = 2 : n-1
nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
%Add the bias term
nn.a{i} = [ones(m,1) nn.a{i}];
end
nn.a{n} = sigm(nn.a{n - 1} * nn.W{n - 1}');
%error and loss
nn.error = y - nn.a{n};
nn.loss = 1/2 * sum(sum(nn.error .^ 2)) / m;
end

function nn = nnbp(nn)
n = nn.n;
d{n} = - nn.error .* (nn.a{n} .* (1 - nn.a{n}));
for i = (n - 1) : -1 : 2
% Derivative of the activation function
d_act = nn.a{i} .* (1 - nn.a{i});
% Backpropagate first derivatives
if i+1==n % in this case in d{n} there is not the bias term to be removed
d{i} = (d{i + 1} * nn.W{i}) .* d_act; % Bishop (5.56)
else % in this case in d{i} the bias term has to be removed
d{i} = (d{i + 1}(:,2:end) * nn.W{i}) .* d_act;
end
end
for i = 1 : (n - 1)
if i+1==n
nn.dW{i} = (d{i + 1}' * nn.a{i}) / size(d{i + 1}, 1);
else
nn.dW{i} = (d{i + 1}(:,2:end)' * nn.a{i}) / size(d{i + 1}, 1);
end
end
end

function nn = nngrads(nn)
for i = 1 : (nn.n - 1)
dW = nn.dW{i};
dW = nn.learning_rate * dW;
if(nn.momentum>0)
nn.vW{i} = nn.momentum*nn.vW{i} + dW;
dW = nn.vW{i};
end
nn.W{i} = nn.W{i} - dW;
end
end

function X = sigm(P)
X = 1./(1+exp(-P));
end
%第4个m文件命名为rbmtrain.m
function rbm = rbmtrain(rbm, x, opts)
m = size(x, 1);
numbatches = round(m / opts.batchsize);
for i = 1 : opts.numepochs
kk = randperm(m);
err = 0;
for l = 1 : numbatches
batch = x(kk((l - 1) * opts.batchsize + 1 : l * opts.batchsize), :);

v1 = batch;
h1 = sigmrnd(repmat(rbm.c', opts.batchsize, 1) + v1 * rbm.W');

v2 = sigmrnd(repmat(rbm.b', opts.batchsize, 1) + h1 * rbm.W);
h2 = sigm(repmat(rbm.c', opts.batchsize, 1) + v2 * rbm.W');

c1 = h1' * v1;
c2 = h2' * v2;

rbm.vW = rbm.momentum * rbm.vW + rbm.alpha * (c1 - c2) / opts.batchsize;
rbm.vb = rbm.momentum * rbm.vb + rbm.alpha * sum(v1 - v2)' / opts.batchsize;
rbm.vc = rbm.momentum * rbm.vc + rbm.alpha * sum(h1 - h2)' / opts.batchsize;

rbm.W = rbm.W + rbm.vW;
rbm.b = rbm.b + rbm.vb;
rbm.c = rbm.c + rbm.vc;

err = err + sum(sum((v1 - v2) .^ 2)) / opts.batchsize;
end
disp(['RBM Train: epoch ' num2str(i) '/' num2str(opts.numepochs) '. Average reconstruction error is: ' num2str(err / numbatches)]);
end
end
function X = sigm(P)
X = 1./(1+exp(-P));
end

function X = sigmrnd(P)
% X = double(1./(1+exp(-P)))+1*randn(size(P));
X = double(1./(1+exp(-P)) > rand(size(P)));
end

相关文档
相关文档 最新文档