diff --git a/ex5/validationCurve.m b/ex5/validationCurve.m index 24b56bc..6ca0710 100644 --- a/ex5/validationCurve.m +++ b/ex5/validationCurve.m @@ -17,36 +17,36 @@ error_train = zeros(length(lambda_vec), 1); error_val = zeros(length(lambda_vec), 1); % ====================== YOUR CODE HERE ====================== -% Instructions: Fill in this function to return training errors in -% error_train and the validation errors in error_val. The -% vector lambda_vec contains the different lambda parameters -% to use for each calculation of the errors, i.e, -% error_train(i), and error_val(i) should give -% you the errors obtained after training with +% Instructions: Fill in this function to return training errors in +% error_train and the validation errors in error_val. The +% vector lambda_vec contains the different lambda parameters +% to use for each calculation of the errors, i.e, +% error_train(i), and error_val(i) should give +% you the errors obtained after training with % lambda = lambda_vec(i) % % Note: You can loop over lambda_vec with the following: % % for i = 1:length(lambda_vec) % lambda = lambda_vec(i); -% % Compute train / val errors when training linear +% % Compute train / val errors when training linear % % regression with regularization parameter lambda % % You should store the result in error_train(i) % % and error_val(i) % .... -% +% % end % % +for i = 1:length(lambda_vec) + lambda = lambda_vec(i); + theta = trainLinearReg(X, y, lambda); - - - - - - - + lambda = 0; + error_train(i) = linearRegCostFunction(X, y, theta, lambda); + error_val(i) = linearRegCostFunction(Xval, yval, theta, lambda); +end % =========================================================================