avatar

目录
Coursera-机器学习-吴恩达-ex4

代码和作业说明下载
这次作业我们需要实现 Neural Networks Learning。

需要完成下列代码文件:

  • sigmoidGradient.m
  • randInitializeWeights.m
  • nnCostFunction.m

sigmoidGradient.m

matlab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
function g = sigmoidGradient(z)
%SIGMOIDGRADIENT returns the gradient of the sigmoid function
%evaluated at z
% g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
% evaluated at z. This should work regardless if z is a matrix or a
% vector. In particular, if z is a vector or matrix, you should return
% the gradient for each element.
g = zeros(size(z));
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the gradient of the sigmoid function evaluated at
% each value of z (z can be a matrix, vector or scalar).
g = sigmoid(z) .* (1 - sigmoid(z));
% =============================================================
end

randInitializeWeights.m

matlab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
function W = randInitializeWeights(L_in, L_out)
%RANDINITIALIZEWEIGHTS Randomly initialize the weights of a layer with L_in
%incoming connections and L_out outgoing connections
% W = RANDINITIALIZEWEIGHTS(L_in, L_out) randomly initializes the weights
% of a layer with L_in incoming connections and L_out outgoing
% connections.
%
% Note that W should be set to a matrix of size(L_out, 1 + L_in) as
% the first column of W handles the "bias" terms
%
% You need to return the following variables correctly
W = zeros(L_out, 1 + L_in);
% ====================== YOUR CODE HERE ======================
% Instructions: Initialize W randomly so that we break the symmetry while
% training the neural network.
%
% Note: The first column of W corresponds to the parameters for the bias unit
%
epsilon_init = 0.12;
W = rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init;
% =========================================================================
end

nnCostFunction.m

matlab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
function [J grad] = nnCostFunction(nn_params, ...
input_layer_size, ...
hidden_layer_size, ...
num_labels, ...
X, y, lambda)
%NNCOSTFUNCTION Implements the neural network cost function for a two layer
%neural network which performs classification
% [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...
% X, y, lambda) computes the cost and gradient of the neural network. The
% parameters for the neural network are "unrolled" into the vector
% nn_params and need to be converted back into the weight matrices.
%
% The returned parameter grad should be a "unrolled" vector of the
% partial derivatives of the neural network.
%
% Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
% for our 2 layer neural network
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
hidden_layer_size, (input_layer_size + 1));
Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
num_labels, (hidden_layer_size + 1));
% Setup some useful variables
m = size(X, 1);
% You need to return the following variables correctly
J = 0;
Theta1_grad = zeros(size(Theta1));
Theta2_grad = zeros(size(Theta2));
% ====================== YOUR CODE HERE ======================
% Instructions: You should complete the code by working through the
% following parts.
%
% Part 1: Feedforward the neural network and return the cost in the
% variable J. After implementing Part 1, you can verify that your
% cost function computation is correct by verifying the cost
% computed in ex4.m
%
% Part 2: Implement the backpropagation algorithm to compute the gradients
% Theta1_grad and Theta2_grad. You should return the partial derivatives of
% the cost function with respect to Theta1 and Theta2 in Theta1_grad and
% Theta2_grad, respectively. After implementing Part 2, you can check
% that your implementation is correct by running checkNNGradients
%
% Note: The vector y passed into the function is a vector of labels
% containing values from 1..K. You need to map this vector into a
% binary vector of 1's and 0's to be used with the neural network
% cost function.
%
% Hint: We recommend implementing backpropagation using a for-loop
% over the training examples if you are implementing it for the
% first time.
%
% Part 3: Implement regularization with the cost function and gradients.
%
% Hint: You can implement this around the code for
% backpropagation. That is, you can compute the gradients for
% the regularization separately and then add them to Theta1_grad
% and Theta2_grad from Part 2.
%
%
a1 = [ones(m, 1) X];
Y_tmp = zeros(m, num_labels);
for i = 1 : m
Y_tmp(i, y(i)) = 1;
end
a2 = [ones(m, 1) sigmoid(a1 * (Theta1'))];
h = sigmoid(a2 * (Theta2'));
J = (-1 / m) * sum(sum((Y_tmp .* log(h) + (1 - Y_tmp) .* log(1 - h))'));
%--------------------------------------------------------------------------
Theta1_tmp = Theta1(:, 2:end);
Theta2_tmp = Theta2(:, 2:end);
regular_term = (lambda / (2 * m)) * (sum(sum((Theta1_tmp .^ 2)')) + ...
sum(sum((Theta2_tmp .^ 2)')));
J = J + regular_term;
%--------------------------------------------------------------------------
Yk = zeros(m, num_labels);
for i = 1 : m
Yk(i, y(i)) = 1;
end
%{
for i = 1 : m
a1 = [1, X(i, :)]; % 1x401
z2 = a1 * Theta1';
a2 = sigmoid(z2);
a2 = [1, a2]; % 1x26
z3 = a2 * Theta2';
a3 = sigmoid(z3);
delta3 = a3 - Yk(i, :); % 1x10
delta2 = delta3 * Theta2(:, 2:end) .* sigmoidGradient(z2); % 1x25
Theta1_grad = Theta1_grad + delta2' * a1;
Theta2_grad = Theta2_grad + delta3' * a2;
endfor
%}
a1 = [ones(m, 1) X]; % 5000 x 401
z2 = a1 * Theta1'; % 5000 x 401 401 x 25 -> 5000 x 25
a2 = [ones(m, 1) sigmoid(z2)]; % 5000 x 26
z3 = a2 * Theta2'; % 5000 x 26 26 x 10 -> 5000 - 10
a3 = sigmoid(z3); % 5000 x 10
delta3 = a3 - Yk; % 5000 x 10
delta2 = delta3 * Theta2(:, 2:end) .* sigmoidGradient(z2); % 5000 x 10 10 x 25 -> 5000 x 25
Theta1_grad = Theta1_grad + delta2' * a1; % 25 x 5000 5000 x 401 -> 25 x 401
Theta2_grad = Theta2_grad + delta3' * a2; % 10 x 5000 5000 x 26 -> 10 x 26
Theta1_grad = Theta1_grad ./ m;
Theta2_grad = Theta2_grad ./ m;
%----------------------------------------------------------------
Theta1_tmp = Theta1;
Theta2_tmp = Theta2;
Theta1_tmp(:, 1) = 0;
Theta2_tmp(:, 1) = 0;
Theta1_grad = Theta1_grad + (lambda / m) .* Theta1_tmp;
Theta2_grad = Theta2_grad + (lambda / m) .* Theta2_tmp;
% =========================================================================
% Unroll gradients
grad = [Theta1_grad(:) ; Theta2_grad(:)];
end
文章作者: 大巴斯基
文章链接: http://nieblog.me/2020/01/14/ml_ex4/
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 CodeTrainer