1
0
Fork 0

Learning curve function

master
neingeist 10 years ago
parent 90f2928cee
commit 1cc58802eb

@ -1,11 +1,11 @@
function [error_train, error_val] = ... function [error_train, error_val] = ...
learningCurve(X, y, Xval, yval, lambda) learningCurve(X, y, Xval, yval, lambda)
%LEARNINGCURVE Generates the train and cross validation set errors needed %LEARNINGCURVE Generates the train and cross validation set errors needed
%to plot a learning curve %to plot a learning curve
% [error_train, error_val] = ... % [error_train, error_val] = ...
% LEARNINGCURVE(X, y, Xval, yval, lambda) returns the train and % LEARNINGCURVE(X, y, Xval, yval, lambda) returns the train and
% cross validation set errors for a learning curve. In particular, % cross validation set errors for a learning curve. In particular,
% it returns two vectors of the same length - error_train and % it returns two vectors of the same length - error_train and
% error_val. Then, error_train(i) contains the training error for % error_val. Then, error_train(i) contains the training error for
% i examples (and similarly for error_val(i)). % i examples (and similarly for error_val(i)).
% %
@ -22,9 +22,9 @@ error_train = zeros(m, 1);
error_val = zeros(m, 1); error_val = zeros(m, 1);
% ====================== YOUR CODE HERE ====================== % ====================== YOUR CODE HERE ======================
% Instructions: Fill in this function to return training errors in % Instructions: Fill in this function to return training errors in
% error_train and the cross validation errors in error_val. % error_train and the cross validation errors in error_val.
% i.e., error_train(i) and % i.e., error_train(i) and
% error_val(i) should give you the errors % error_val(i) should give you the errors
% obtained after training on i examples. % obtained after training on i examples.
% %
@ -35,25 +35,41 @@ error_val = zeros(m, 1);
% the _entire_ cross validation set (Xval and yval). % the _entire_ cross validation set (Xval and yval).
% %
% Note: If you are using your cost function (linearRegCostFunction) % Note: If you are using your cost function (linearRegCostFunction)
% to compute the training and cross validation error, you should % to compute the training and cross validation error, you should
% call the function with the lambda argument set to 0. % call the function with the lambda argument set to 0.
% Do note that you will still need to use lambda when running % Do note that you will still need to use lambda when running
% the training to obtain the theta parameters. % the training to obtain the theta parameters.
% %
% Hint: You can loop over the examples with the following: % Hint: You can loop over the examples with the following:
% %
% for i = 1:m for i = 1:m
% % Compute train/cross validation errors using training examples % Compute train/cross validation errors using training examples
% % X(1:i, :) and y(1:i), storing the result in % X(1:i, :) and y(1:i), storing the result in
% % error_train(i) and error_val(i) % error_train(i) and error_val(i)
% ....
% X_ = X(1:i,:);
% end y_ = y(1:i);
% Train with regularization
lambda = 1;
theta = trainLinearReg(X_, y_, lambda);
% Compute the error with lambda = 0
lambda = 0;
error_train(i) = linearRegCostFunction(X_, y_, theta, lambda);
error_val(i) = linearRegCostFunction(Xval, yval, theta, lambda);
end
% %
% ---------------------- Sample Solution ---------------------- % ---------------------- Sample Solution ----------------------
% for i = 1:m
% % Compute train/cross validation errors using training examples
% % X(1:i, :) and y(1:i), storing the result in
% % error_train(i) and error_val(i)
% ....