import exercise 1: IV. Linear Regression with Multiple Variables (Week 2)
This commit is contained in:
		
						commit
						79825e97f4
					
				
					 15 changed files with 1245 additions and 0 deletions
				
			
		
							
								
								
									
										22
									
								
								ex1/computeCost.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								ex1/computeCost.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,22 @@ | |||
| function J = computeCost(X, y, theta) | ||||
| %COMPUTECOST Compute cost for linear regression | ||||
| %   J = COMPUTECOST(X, y, theta) computes the cost of using theta as the | ||||
| %   parameter for linear regression to fit the data points in X and y | ||||
| 
 | ||||
| % Initialize some useful values | ||||
| m = length(y); % number of training examples | ||||
| 
 | ||||
| % You need to return the following variables correctly  | ||||
| J = 0; | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: Compute the cost of a particular choice of theta | ||||
| %               You should set J to the cost. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| % ========================================================================= | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										22
									
								
								ex1/computeCostMulti.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								ex1/computeCostMulti.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,22 @@ | |||
| function J = computeCostMulti(X, y, theta) | ||||
| %COMPUTECOSTMULTI Compute cost for linear regression with multiple variables | ||||
| %   J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the | ||||
| %   parameter for linear regression to fit the data points in X and y | ||||
| 
 | ||||
| % Initialize some useful values | ||||
| m = length(y); % number of training examples | ||||
| 
 | ||||
| % You need to return the following variables correctly  | ||||
| J = 0; | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: Compute the cost of a particular choice of theta | ||||
| %               You should set J to the cost. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| % ========================================================================= | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										122
									
								
								ex1/ex1.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								ex1/ex1.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,122 @@ | |||
| %% Machine Learning Online Class - Exercise 1: Linear Regression | ||||
| 
 | ||||
| %  Instructions | ||||
| %  ------------ | ||||
| %  | ||||
| %  This file contains code that helps you get started on the | ||||
| %  linear exercise. You will need to complete the following functions  | ||||
| %  in this exericse: | ||||
| % | ||||
| %     warmUpExercise.m | ||||
| %     plotData.m | ||||
| %     gradientDescent.m | ||||
| %     computeCost.m | ||||
| %     gradientDescentMulti.m | ||||
| %     computeCostMulti.m | ||||
| %     featureNormalize.m | ||||
| %     normalEqn.m | ||||
| % | ||||
| %  For this exercise, you will not need to change any code in this file, | ||||
| %  or any other files other than those mentioned above. | ||||
| % | ||||
| % x refers to the population size in 10,000s | ||||
| % y refers to the profit in $10,000s | ||||
| % | ||||
| 
 | ||||
| %% Initialization | ||||
| clear ; close all; clc | ||||
| 
 | ||||
| %% ==================== Part 1: Basic Function ==================== | ||||
| % Complete warmUpExercise.m  | ||||
| fprintf('Running warmUpExercise ... \n'); | ||||
| fprintf('5x5 Identity Matrix: \n'); | ||||
| warmUpExercise() | ||||
| 
 | ||||
| fprintf('Program paused. Press enter to continue.\n'); | ||||
| pause; | ||||
| 
 | ||||
| 
 | ||||
| %% ======================= Part 2: Plotting ======================= | ||||
| fprintf('Plotting Data ...\n') | ||||
| data = load('ex1data1.txt'); | ||||
| X = data(:, 1); y = data(:, 2); | ||||
| m = length(y); % number of training examples | ||||
| 
 | ||||
| % Plot Data | ||||
| % Note: You have to complete the code in plotData.m | ||||
| plotData(X, y); | ||||
| 
 | ||||
| fprintf('Program paused. Press enter to continue.\n'); | ||||
| pause; | ||||
| 
 | ||||
| %% =================== Part 3: Gradient descent =================== | ||||
| fprintf('Running Gradient Descent ...\n') | ||||
| 
 | ||||
| X = [ones(m, 1), data(:,1)]; % Add a column of ones to x | ||||
| theta = zeros(2, 1); % initialize fitting parameters | ||||
| 
 | ||||
| % Some gradient descent settings | ||||
| iterations = 1500; | ||||
| alpha = 0.01; | ||||
| 
 | ||||
| % compute and display initial cost | ||||
| computeCost(X, y, theta) | ||||
| 
 | ||||
| % run gradient descent | ||||
| theta = gradientDescent(X, y, theta, alpha, iterations); | ||||
| 
 | ||||
| % print theta to screen | ||||
| fprintf('Theta found by gradient descent: '); | ||||
| fprintf('%f %f \n', theta(1), theta(2)); | ||||
| 
 | ||||
| % Plot the linear fit | ||||
| hold on; % keep previous plot visible | ||||
| plot(X(:,2), X*theta, '-') | ||||
| legend('Training data', 'Linear regression') | ||||
| hold off % don't overlay any more plots on this figure | ||||
| 
 | ||||
| % Predict values for population sizes of 35,000 and 70,000 | ||||
| predict1 = [1, 3.5] *theta; | ||||
| fprintf('For population = 35,000, we predict a profit of %f\n',... | ||||
|     predict1*10000); | ||||
| predict2 = [1, 7] * theta; | ||||
| fprintf('For population = 70,000, we predict a profit of %f\n',... | ||||
|     predict2*10000); | ||||
| 
 | ||||
| fprintf('Program paused. Press enter to continue.\n'); | ||||
| pause; | ||||
| 
 | ||||
| %% ============= Part 4: Visualizing J(theta_0, theta_1) ============= | ||||
| fprintf('Visualizing J(theta_0, theta_1) ...\n') | ||||
| 
 | ||||
| % Grid over which we will calculate J | ||||
| theta0_vals = linspace(-10, 10, 100); | ||||
| theta1_vals = linspace(-1, 4, 100); | ||||
| 
 | ||||
| % initialize J_vals to a matrix of 0's | ||||
| J_vals = zeros(length(theta0_vals), length(theta1_vals)); | ||||
| 
 | ||||
| % Fill out J_vals | ||||
| for i = 1:length(theta0_vals) | ||||
|     for j = 1:length(theta1_vals) | ||||
| 	  t = [theta0_vals(i); theta1_vals(j)];     | ||||
| 	  J_vals(i,j) = computeCost(X, y, t); | ||||
|     end | ||||
| end | ||||
| 
 | ||||
| 
 | ||||
| % Because of the way meshgrids work in the surf command, we need to  | ||||
| % transpose J_vals before calling surf, or else the axes will be flipped | ||||
| J_vals = J_vals'; | ||||
| % Surface plot | ||||
| figure; | ||||
| surf(theta0_vals, theta1_vals, J_vals) | ||||
| xlabel('\theta_0'); ylabel('\theta_1'); | ||||
| 
 | ||||
| % Contour plot | ||||
| figure; | ||||
| % Plot J_vals as 15 contours spaced logarithmically between 0.01 and 100 | ||||
| contour(theta0_vals, theta1_vals, J_vals, logspace(-2, 3, 20)) | ||||
| xlabel('\theta_0'); ylabel('\theta_1'); | ||||
| hold on; | ||||
| plot(theta(1), theta(2), 'rx', 'MarkerSize', 10, 'LineWidth', 2); | ||||
							
								
								
									
										
											BIN
										
									
								
								ex1/ex1.pdf
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								ex1/ex1.pdf
									
										
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										159
									
								
								ex1/ex1_multi.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										159
									
								
								ex1/ex1_multi.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,159 @@ | |||
| %% Machine Learning Online Class | ||||
| %  Exercise 1: Linear regression with multiple variables | ||||
| % | ||||
| %  Instructions | ||||
| %  ------------ | ||||
| %  | ||||
| %  This file contains code that helps you get started on the | ||||
| %  linear regression exercise.  | ||||
| % | ||||
| %  You will need to complete the following functions in this  | ||||
| %  exericse: | ||||
| % | ||||
| %     warmUpExercise.m | ||||
| %     plotData.m | ||||
| %     gradientDescent.m | ||||
| %     computeCost.m | ||||
| %     gradientDescentMulti.m | ||||
| %     computeCostMulti.m | ||||
| %     featureNormalize.m | ||||
| %     normalEqn.m | ||||
| % | ||||
| %  For this part of the exercise, you will need to change some | ||||
| %  parts of the code below for various experiments (e.g., changing | ||||
| %  learning rates). | ||||
| % | ||||
| 
 | ||||
| %% Initialization | ||||
| 
 | ||||
| %% ================ Part 1: Feature Normalization ================ | ||||
| 
 | ||||
| %% Clear and Close Figures | ||||
| clear ; close all; clc | ||||
| 
 | ||||
| fprintf('Loading data ...\n'); | ||||
| 
 | ||||
| %% Load Data | ||||
| data = load('ex1data2.txt'); | ||||
| X = data(:, 1:2); | ||||
| y = data(:, 3); | ||||
| m = length(y); | ||||
| 
 | ||||
| % Print out some data points | ||||
| fprintf('First 10 examples from the dataset: \n'); | ||||
| fprintf(' x = [%.0f %.0f], y = %.0f \n', [X(1:10,:) y(1:10,:)]'); | ||||
| 
 | ||||
| fprintf('Program paused. Press enter to continue.\n'); | ||||
| pause; | ||||
| 
 | ||||
| % Scale features and set them to zero mean | ||||
| fprintf('Normalizing Features ...\n'); | ||||
| 
 | ||||
| [X mu sigma] = featureNormalize(X); | ||||
| 
 | ||||
| % Add intercept term to X | ||||
| X = [ones(m, 1) X]; | ||||
| 
 | ||||
| 
 | ||||
| %% ================ Part 2: Gradient Descent ================ | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: We have provided you with the following starter | ||||
| %               code that runs gradient descent with a particular | ||||
| %               learning rate (alpha).  | ||||
| % | ||||
| %               Your task is to first make sure that your functions -  | ||||
| %               computeCost and gradientDescent already work with  | ||||
| %               this starter code and support multiple variables. | ||||
| % | ||||
| %               After that, try running gradient descent with  | ||||
| %               different values of alpha and see which one gives | ||||
| %               you the best result. | ||||
| % | ||||
| %               Finally, you should complete the code at the end | ||||
| %               to predict the price of a 1650 sq-ft, 3 br house. | ||||
| % | ||||
| % Hint: By using the 'hold on' command, you can plot multiple | ||||
| %       graphs on the same figure. | ||||
| % | ||||
| % Hint: At prediction, make sure you do the same feature normalization. | ||||
| % | ||||
| 
 | ||||
| fprintf('Running gradient descent ...\n'); | ||||
| 
 | ||||
| % Choose some alpha value | ||||
| alpha = 0.01; | ||||
| num_iters = 400; | ||||
| 
 | ||||
| % Init Theta and Run Gradient Descent  | ||||
| theta = zeros(3, 1); | ||||
| [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters); | ||||
| 
 | ||||
| % Plot the convergence graph | ||||
| figure; | ||||
| plot(1:numel(J_history), J_history, '-b', 'LineWidth', 2); | ||||
| xlabel('Number of iterations'); | ||||
| ylabel('Cost J'); | ||||
| 
 | ||||
| % Display gradient descent's result | ||||
| fprintf('Theta computed from gradient descent: \n'); | ||||
| fprintf(' %f \n', theta); | ||||
| fprintf('\n'); | ||||
| 
 | ||||
| % Estimate the price of a 1650 sq-ft, 3 br house | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Recall that the first column of X is all-ones. Thus, it does | ||||
| % not need to be normalized. | ||||
| price = 0; % You should change this | ||||
| 
 | ||||
| 
 | ||||
| % ============================================================ | ||||
| 
 | ||||
| fprintf(['Predicted price of a 1650 sq-ft, 3 br house ' ... | ||||
|          '(using gradient descent):\n $%f\n'], price); | ||||
| 
 | ||||
| fprintf('Program paused. Press enter to continue.\n'); | ||||
| pause; | ||||
| 
 | ||||
| %% ================ Part 3: Normal Equations ================ | ||||
| 
 | ||||
| fprintf('Solving with normal equations...\n'); | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: The following code computes the closed form  | ||||
| %               solution for linear regression using the normal | ||||
| %               equations. You should complete the code in  | ||||
| %               normalEqn.m | ||||
| % | ||||
| %               After doing so, you should complete this code  | ||||
| %               to predict the price of a 1650 sq-ft, 3 br house. | ||||
| % | ||||
| 
 | ||||
| %% Load Data | ||||
| data = csvread('ex1data2.txt'); | ||||
| X = data(:, 1:2); | ||||
| y = data(:, 3); | ||||
| m = length(y); | ||||
| 
 | ||||
| % Add intercept term to X | ||||
| X = [ones(m, 1) X]; | ||||
| 
 | ||||
| % Calculate the parameters from the normal equation | ||||
| theta = normalEqn(X, y); | ||||
| 
 | ||||
| % Display normal equation's result | ||||
| fprintf('Theta computed from the normal equations: \n'); | ||||
| fprintf(' %f \n', theta); | ||||
| fprintf('\n'); | ||||
| 
 | ||||
| 
 | ||||
| % Estimate the price of a 1650 sq-ft, 3 br house | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| price = 0; % You should change this | ||||
| 
 | ||||
| 
 | ||||
| % ============================================================ | ||||
| 
 | ||||
| fprintf(['Predicted price of a 1650 sq-ft, 3 br house ' ... | ||||
|          '(using normal equations):\n $%f\n'], price); | ||||
| 
 | ||||
							
								
								
									
										97
									
								
								ex1/ex1data1.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								ex1/ex1data1.txt
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,97 @@ | |||
| 6.1101,17.592 | ||||
| 5.5277,9.1302 | ||||
| 8.5186,13.662 | ||||
| 7.0032,11.854 | ||||
| 5.8598,6.8233 | ||||
| 8.3829,11.886 | ||||
| 7.4764,4.3483 | ||||
| 8.5781,12 | ||||
| 6.4862,6.5987 | ||||
| 5.0546,3.8166 | ||||
| 5.7107,3.2522 | ||||
| 14.164,15.505 | ||||
| 5.734,3.1551 | ||||
| 8.4084,7.2258 | ||||
| 5.6407,0.71618 | ||||
| 5.3794,3.5129 | ||||
| 6.3654,5.3048 | ||||
| 5.1301,0.56077 | ||||
| 6.4296,3.6518 | ||||
| 7.0708,5.3893 | ||||
| 6.1891,3.1386 | ||||
| 20.27,21.767 | ||||
| 5.4901,4.263 | ||||
| 6.3261,5.1875 | ||||
| 5.5649,3.0825 | ||||
| 18.945,22.638 | ||||
| 12.828,13.501 | ||||
| 10.957,7.0467 | ||||
| 13.176,14.692 | ||||
| 22.203,24.147 | ||||
| 5.2524,-1.22 | ||||
| 6.5894,5.9966 | ||||
| 9.2482,12.134 | ||||
| 5.8918,1.8495 | ||||
| 8.2111,6.5426 | ||||
| 7.9334,4.5623 | ||||
| 8.0959,4.1164 | ||||
| 5.6063,3.3928 | ||||
| 12.836,10.117 | ||||
| 6.3534,5.4974 | ||||
| 5.4069,0.55657 | ||||
| 6.8825,3.9115 | ||||
| 11.708,5.3854 | ||||
| 5.7737,2.4406 | ||||
| 7.8247,6.7318 | ||||
| 7.0931,1.0463 | ||||
| 5.0702,5.1337 | ||||
| 5.8014,1.844 | ||||
| 11.7,8.0043 | ||||
| 5.5416,1.0179 | ||||
| 7.5402,6.7504 | ||||
| 5.3077,1.8396 | ||||
| 7.4239,4.2885 | ||||
| 7.6031,4.9981 | ||||
| 6.3328,1.4233 | ||||
| 6.3589,-1.4211 | ||||
| 6.2742,2.4756 | ||||
| 5.6397,4.6042 | ||||
| 9.3102,3.9624 | ||||
| 9.4536,5.4141 | ||||
| 8.8254,5.1694 | ||||
| 5.1793,-0.74279 | ||||
| 21.279,17.929 | ||||
| 14.908,12.054 | ||||
| 18.959,17.054 | ||||
| 7.2182,4.8852 | ||||
| 8.2951,5.7442 | ||||
| 10.236,7.7754 | ||||
| 5.4994,1.0173 | ||||
| 20.341,20.992 | ||||
| 10.136,6.6799 | ||||
| 7.3345,4.0259 | ||||
| 6.0062,1.2784 | ||||
| 7.2259,3.3411 | ||||
| 5.0269,-2.6807 | ||||
| 6.5479,0.29678 | ||||
| 7.5386,3.8845 | ||||
| 5.0365,5.7014 | ||||
| 10.274,6.7526 | ||||
| 5.1077,2.0576 | ||||
| 5.7292,0.47953 | ||||
| 5.1884,0.20421 | ||||
| 6.3557,0.67861 | ||||
| 9.7687,7.5435 | ||||
| 6.5159,5.3436 | ||||
| 8.5172,4.2415 | ||||
| 9.1802,6.7981 | ||||
| 6.002,0.92695 | ||||
| 5.5204,0.152 | ||||
| 5.0594,2.8214 | ||||
| 5.7077,1.8451 | ||||
| 7.6366,4.2959 | ||||
| 5.8707,7.2029 | ||||
| 5.3054,1.9869 | ||||
| 8.2934,0.14454 | ||||
| 13.394,9.0551 | ||||
| 5.4369,0.61705 | ||||
							
								
								
									
										47
									
								
								ex1/ex1data2.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								ex1/ex1data2.txt
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,47 @@ | |||
| 2104,3,399900 | ||||
| 1600,3,329900 | ||||
| 2400,3,369000 | ||||
| 1416,2,232000 | ||||
| 3000,4,539900 | ||||
| 1985,4,299900 | ||||
| 1534,3,314900 | ||||
| 1427,3,198999 | ||||
| 1380,3,212000 | ||||
| 1494,3,242500 | ||||
| 1940,4,239999 | ||||
| 2000,3,347000 | ||||
| 1890,3,329999 | ||||
| 4478,5,699900 | ||||
| 1268,3,259900 | ||||
| 2300,4,449900 | ||||
| 1320,2,299900 | ||||
| 1236,3,199900 | ||||
| 2609,4,499998 | ||||
| 3031,4,599000 | ||||
| 1767,3,252900 | ||||
| 1888,2,255000 | ||||
| 1604,3,242900 | ||||
| 1962,4,259900 | ||||
| 3890,3,573900 | ||||
| 1100,3,249900 | ||||
| 1458,3,464500 | ||||
| 2526,3,469000 | ||||
| 2200,3,475000 | ||||
| 2637,3,299900 | ||||
| 1839,2,349900 | ||||
| 1000,1,169900 | ||||
| 2040,4,314900 | ||||
| 3137,3,579900 | ||||
| 1811,4,285900 | ||||
| 1437,3,249900 | ||||
| 1239,3,229900 | ||||
| 2132,4,345000 | ||||
| 4215,4,549000 | ||||
| 2162,4,287000 | ||||
| 1664,2,368500 | ||||
| 2238,3,329900 | ||||
| 2567,4,314000 | ||||
| 1200,3,299000 | ||||
| 852,2,179900 | ||||
| 1852,4,299900 | ||||
| 1203,3,239500 | ||||
							
								
								
									
										39
									
								
								ex1/featureNormalize.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								ex1/featureNormalize.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,39 @@ | |||
| function [X_norm, mu, sigma] = featureNormalize(X) | ||||
| %FEATURENORMALIZE Normalizes the features in X  | ||||
| %   FEATURENORMALIZE(X) returns a normalized version of X where | ||||
| %   the mean value of each feature is 0 and the standard deviation | ||||
| %   is 1. This is often a good preprocessing step to do when | ||||
| %   working with learning algorithms. | ||||
| 
 | ||||
| % You need to set these values correctly | ||||
| X_norm = X; | ||||
| mu = zeros(1, size(X, 2)); | ||||
| sigma = zeros(1, size(X, 2)); | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: First, for each feature dimension, compute the mean | ||||
| %               of the feature and subtract it from the dataset, | ||||
| %               storing the mean value in mu. Next, compute the  | ||||
| %               standard deviation of each feature and divide | ||||
| %               each feature by it's standard deviation, storing | ||||
| %               the standard deviation in sigma.  | ||||
| % | ||||
| %               Note that X is a matrix where each column is a  | ||||
| %               feature and each row is an example. You need  | ||||
| %               to perform the normalization separately for  | ||||
| %               each feature.  | ||||
| % | ||||
| % Hint: You might find the 'mean' and 'std' functions useful. | ||||
| %        | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| % ============================================================ | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										33
									
								
								ex1/gradientDescent.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								ex1/gradientDescent.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,33 @@ | |||
| function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters) | ||||
| %GRADIENTDESCENT Performs gradient descent to learn theta | ||||
| %   theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by  | ||||
| %   taking num_iters gradient steps with learning rate alpha | ||||
| 
 | ||||
| % Initialize some useful values | ||||
| m = length(y); % number of training examples | ||||
| J_history = zeros(num_iters, 1); | ||||
| 
 | ||||
| for iter = 1:num_iters | ||||
| 
 | ||||
|     % ====================== YOUR CODE HERE ====================== | ||||
|     % Instructions: Perform a single gradient step on the parameter vector | ||||
|     %               theta.  | ||||
|     % | ||||
|     % Hint: While debugging, it can be useful to print out the values | ||||
|     %       of the cost function (computeCost) and gradient here. | ||||
|     % | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|     % ============================================================ | ||||
| 
 | ||||
|     % Save the cost J in every iteration     | ||||
|     J_history(iter) = computeCost(X, y, theta); | ||||
| 
 | ||||
| end | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										37
									
								
								ex1/gradientDescentMulti.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								ex1/gradientDescentMulti.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,37 @@ | |||
| function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters) | ||||
| %GRADIENTDESCENTMULTI Performs gradient descent to learn theta | ||||
| %   theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by | ||||
| %   taking num_iters gradient steps with learning rate alpha | ||||
| 
 | ||||
| % Initialize some useful values | ||||
| m = length(y); % number of training examples | ||||
| J_history = zeros(num_iters, 1); | ||||
| 
 | ||||
| for iter = 1:num_iters | ||||
| 
 | ||||
|     % ====================== YOUR CODE HERE ====================== | ||||
|     % Instructions: Perform a single gradient step on the parameter vector | ||||
|     %               theta.  | ||||
|     % | ||||
|     % Hint: While debugging, it can be useful to print out the values | ||||
|     %       of the cost function (computeCostMulti) and gradient here. | ||||
|     % | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|     % ============================================================ | ||||
| 
 | ||||
|     % Save the cost J in every iteration     | ||||
|     J_history(iter) = computeCostMulti(X, y, theta); | ||||
| 
 | ||||
| end | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										23
									
								
								ex1/normalEqn.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								ex1/normalEqn.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,23 @@ | |||
| function [theta] = normalEqn(X, y) | ||||
| %NORMALEQN Computes the closed-form solution to linear regression  | ||||
| %   NORMALEQN(X,y) computes the closed-form solution to linear  | ||||
| %   regression using the normal equations. | ||||
| 
 | ||||
| theta = zeros(size(X, 2), 1); | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: Complete the code to compute the closed form solution | ||||
| %               to linear regression and put the result in theta. | ||||
| % | ||||
| 
 | ||||
| % ---------------------- Sample Solution ---------------------- | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| % ------------------------------------------------------------- | ||||
| 
 | ||||
| 
 | ||||
| % ============================================================ | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										26
									
								
								ex1/plotData.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								ex1/plotData.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,26 @@ | |||
| function plotData(x, y) | ||||
| %PLOTDATA Plots the data points x and y into a new figure  | ||||
| %   PLOTDATA(x,y) plots the data points and gives the figure axes labels of | ||||
| %   population and profit. | ||||
| 
 | ||||
| % ====================== YOUR CODE HERE ====================== | ||||
| % Instructions: Plot the training data into a figure using the  | ||||
| %               "figure" and "plot" commands. Set the axes labels using | ||||
| %               the "xlabel" and "ylabel" commands. Assume the  | ||||
| %               population and revenue data have been passed in | ||||
| %               as the x and y arguments of this function. | ||||
| % | ||||
| % Hint: You can use the 'rx' option with plot to have the markers | ||||
| %       appear as red crosses. Furthermore, you can make the | ||||
| %       markers larger by using plot(..., 'rx', 'MarkerSize', 10); | ||||
| 
 | ||||
| figure; % open a new figure window | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| % ============================================================ | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										577
									
								
								ex1/submit.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										577
									
								
								ex1/submit.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,577 @@ | |||
| function submit(partId, webSubmit) | ||||
| %SUBMIT Submit your code and output to the ml-class servers | ||||
| %   SUBMIT() will connect to the ml-class server and submit your solution | ||||
| 
 | ||||
|   fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ... | ||||
|           homework_id()); | ||||
|   if ~exist('partId', 'var') || isempty(partId) | ||||
|     partId = promptPart(); | ||||
|   end | ||||
| 
 | ||||
|   if ~exist('webSubmit', 'var') || isempty(webSubmit) | ||||
|     webSubmit = 0; % submit directly by default  | ||||
|   end | ||||
| 
 | ||||
|   % Check valid partId | ||||
|   partNames = validParts(); | ||||
|   if ~isValidPartId(partId) | ||||
|     fprintf('!! Invalid homework part selected.\n'); | ||||
|     fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1); | ||||
|     fprintf('!! Submission Cancelled\n'); | ||||
|     return | ||||
|   end | ||||
| 
 | ||||
|   if ~exist('ml_login_data.mat','file') | ||||
|     [login password] = loginPrompt(); | ||||
|     save('ml_login_data.mat','login','password'); | ||||
|   else   | ||||
|     load('ml_login_data.mat'); | ||||
|     [login password] = quickLogin(login, password); | ||||
|     save('ml_login_data.mat','login','password'); | ||||
|   end | ||||
| 
 | ||||
|   if isempty(login) | ||||
|     fprintf('!! Submission Cancelled\n'); | ||||
|     return | ||||
|   end | ||||
| 
 | ||||
|   fprintf('\n== Connecting to ml-class ... ');  | ||||
|   if exist('OCTAVE_VERSION')  | ||||
|     fflush(stdout); | ||||
|   end | ||||
| 
 | ||||
|   % Setup submit list | ||||
|   if partId == numel(partNames) + 1 | ||||
|     submitParts = 1:numel(partNames); | ||||
|   else | ||||
|     submitParts = [partId]; | ||||
|   end | ||||
| 
 | ||||
|   for s = 1:numel(submitParts) | ||||
|     thisPartId = submitParts(s); | ||||
|     if (~webSubmit) % submit directly to server | ||||
|       [login, ch, signature, auxstring] = getChallenge(login, thisPartId); | ||||
|       if isempty(login) || isempty(ch) || isempty(signature) | ||||
|         % Some error occured, error string in first return element. | ||||
|         fprintf('\n!! Error: %s\n\n', login); | ||||
|         return | ||||
|       end | ||||
| 
 | ||||
|       % Attempt Submission with Challenge | ||||
|       ch_resp = challengeResponse(login, password, ch); | ||||
| 
 | ||||
|       [result, str] = submitSolution(login, ch_resp, thisPartId, ... | ||||
|              output(thisPartId, auxstring), source(thisPartId), signature); | ||||
| 
 | ||||
|       partName = partNames{thisPartId}; | ||||
| 
 | ||||
|       fprintf('\n== [ml-class] Submitted Assignment %s - Part %d - %s\n', ... | ||||
|         homework_id(), thisPartId, partName); | ||||
|       fprintf('== %s\n', strtrim(str)); | ||||
| 
 | ||||
|       if exist('OCTAVE_VERSION') | ||||
|         fflush(stdout); | ||||
|       end | ||||
|     else | ||||
|       [result] = submitSolutionWeb(login, thisPartId, output(thisPartId), ... | ||||
|                             source(thisPartId)); | ||||
|       result = base64encode(result); | ||||
| 
 | ||||
|       fprintf('\nSave as submission file [submit_ex%s_part%d.txt (enter to accept default)]:', ... | ||||
|         homework_id(), thisPartId); | ||||
|       saveAsFile = input('', 's'); | ||||
|       if (isempty(saveAsFile)) | ||||
|         saveAsFile = sprintf('submit_ex%s_part%d.txt', homework_id(), thisPartId); | ||||
|       end | ||||
| 
 | ||||
|       fid = fopen(saveAsFile, 'w'); | ||||
|       if (fid) | ||||
|         fwrite(fid, result); | ||||
|         fclose(fid); | ||||
|         fprintf('\nSaved your solutions to %s.\n\n', saveAsFile); | ||||
|         fprintf(['You can now submit your solutions through the web \n' ... | ||||
|                  'form in the programming exercises. Select the corresponding \n' ... | ||||
|                  'programming exercise to access the form.\n']); | ||||
| 
 | ||||
|       else | ||||
|         fprintf('Unable to save to %s\n\n', saveAsFile); | ||||
|         fprintf(['You can create a submission file by saving the \n' ... | ||||
|                  'following text in a file: (press enter to continue)\n\n']); | ||||
|         pause; | ||||
|         fprintf(result); | ||||
|       end | ||||
|     end | ||||
|   end | ||||
| end | ||||
| 
 | ||||
| % ================== CONFIGURABLES FOR EACH HOMEWORK ================== | ||||
| 
 | ||||
| function id = homework_id() | ||||
|   id = '1'; | ||||
| end | ||||
| 
 | ||||
| function [partNames] = validParts() | ||||
|   partNames = { 'Warm up exercise ', ... | ||||
|                 'Computing Cost (for one variable)', ... | ||||
|                 'Gradient Descent (for one variable)', ... | ||||
|                 'Feature Normalization', ... | ||||
|                 'Computing Cost (for multiple variables)', ... | ||||
|                 'Gradient Descent (for multiple variables)', ... | ||||
|                 'Normal Equations'}; | ||||
| end | ||||
| 
 | ||||
| function srcs = sources() | ||||
|   % Separated by part | ||||
|   srcs = { { 'warmUpExercise.m' }, ... | ||||
|            { 'computeCost.m' }, ... | ||||
|            { 'gradientDescent.m' }, ... | ||||
|            { 'featureNormalize.m' }, ... | ||||
|            { 'computeCostMulti.m' }, ... | ||||
|            { 'gradientDescentMulti.m' }, ... | ||||
|            { 'normalEqn.m' }, ... | ||||
|          }; | ||||
| end | ||||
| 
 | ||||
| function out = output(partId, auxstring) | ||||
|   % Random Test Cases | ||||
|   X1 = [ones(20,1) (exp(1) + exp(2) * (0.1:0.1:2))']; | ||||
|   Y1 = X1(:,2) + sin(X1(:,1)) + cos(X1(:,2)); | ||||
|   X2 = [X1 X1(:,2).^0.5 X1(:,2).^0.25]; | ||||
|   Y2 = Y1.^0.5 + Y1; | ||||
|   if partId == 1 | ||||
|     out = sprintf('%0.5f ', warmUpExercise()); | ||||
|   elseif partId == 2 | ||||
|     out = sprintf('%0.5f ', computeCost(X1, Y1, [0.5 -0.5]')); | ||||
|   elseif partId == 3 | ||||
|     out = sprintf('%0.5f ', gradientDescent(X1, Y1, [0.5 -0.5]', 0.01, 10)); | ||||
|   elseif partId == 4 | ||||
|     out = sprintf('%0.5f ', featureNormalize(X2(:,2:4))); | ||||
|   elseif partId == 5 | ||||
|     out = sprintf('%0.5f ', computeCostMulti(X2, Y2, [0.1 0.2 0.3 0.4]')); | ||||
|   elseif partId == 6 | ||||
|     out = sprintf('%0.5f ', gradientDescentMulti(X2, Y2, [-0.1 -0.2 -0.3 -0.4]', 0.01, 10)); | ||||
|   elseif partId == 7 | ||||
|     out = sprintf('%0.5f ', normalEqn(X2, Y2)); | ||||
|   end  | ||||
| end | ||||
| 
 | ||||
| % ====================== SERVER CONFIGURATION =========================== | ||||
| 
 | ||||
| % ***************** REMOVE -staging WHEN YOU DEPLOY ********************* | ||||
| function url = site_url() | ||||
|   url = 'http://class.coursera.org/ml-007'; | ||||
| end | ||||
| 
 | ||||
| function url = challenge_url() | ||||
|   url = [site_url() '/assignment/challenge']; | ||||
| end | ||||
| 
 | ||||
| function url = submit_url() | ||||
|   url = [site_url() '/assignment/submit']; | ||||
| end | ||||
| 
 | ||||
| % ========================= CHALLENGE HELPERS ========================= | ||||
| 
 | ||||
| function src = source(partId) | ||||
|   src = ''; | ||||
|   src_files = sources(); | ||||
|   if partId <= numel(src_files) | ||||
|       flist = src_files{partId}; | ||||
|       for i = 1:numel(flist) | ||||
|           fid = fopen(flist{i}); | ||||
|           if (fid == -1)  | ||||
|             error('Error opening %s (is it missing?)', flist{i}); | ||||
|           end | ||||
|           line = fgets(fid); | ||||
|           while ischar(line) | ||||
|             src = [src line];             | ||||
|             line = fgets(fid); | ||||
|           end | ||||
|           fclose(fid); | ||||
|           src = [src '||||||||']; | ||||
|       end | ||||
|   end | ||||
| end | ||||
| 
 | ||||
| function ret = isValidPartId(partId) | ||||
|   partNames = validParts(); | ||||
|   ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1); | ||||
| end | ||||
| 
 | ||||
| function partId = promptPart() | ||||
|   fprintf('== Select which part(s) to submit:\n'); | ||||
|   partNames = validParts(); | ||||
|   srcFiles = sources(); | ||||
|   for i = 1:numel(partNames) | ||||
|     fprintf('==   %d) %s [', i, partNames{i}); | ||||
|     fprintf(' %s ', srcFiles{i}{:}); | ||||
|     fprintf(']\n'); | ||||
|   end | ||||
|   fprintf('==   %d) All of the above \n==\nEnter your choice [1-%d]: ', ... | ||||
|           numel(partNames) + 1, numel(partNames) + 1); | ||||
|   selPart = input('', 's'); | ||||
|   partId = str2num(selPart); | ||||
|   if ~isValidPartId(partId) | ||||
|     partId = -1; | ||||
|   end | ||||
| end | ||||
| 
 | ||||
| function [email,ch,signature,auxstring] = getChallenge(email, part) | ||||
|   str = urlread(challenge_url(), 'post', {'email_address', email, 'assignment_part_sid', [homework_id() '-' num2str(part)], 'response_encoding', 'delim'}); | ||||
| 
 | ||||
|   str = strtrim(str); | ||||
|   r = struct; | ||||
|   while(numel(str) > 0) | ||||
|     [f, str] = strtok (str, '|'); | ||||
|     [v, str] = strtok (str, '|'); | ||||
|     r = setfield(r, f, v); | ||||
|   end | ||||
| 
 | ||||
|   email = getfield(r, 'email_address'); | ||||
|   ch = getfield(r, 'challenge_key'); | ||||
|   signature = getfield(r, 'state'); | ||||
|   auxstring = getfield(r, 'challenge_aux_data'); | ||||
| end | ||||
| 
 | ||||
| function [result, str] = submitSolutionWeb(email, part, output, source) | ||||
| 
 | ||||
|   result = ['{"assignment_part_sid":"' base64encode([homework_id() '-' num2str(part)], '') '",' ... | ||||
|             '"email_address":"' base64encode(email, '') '",' ... | ||||
|             '"submission":"' base64encode(output, '') '",' ... | ||||
|             '"submission_aux":"' base64encode(source, '') '"' ... | ||||
|             '}']; | ||||
|   str = 'Web-submission'; | ||||
| end | ||||
| 
 | ||||
| function [result, str] = submitSolution(email, ch_resp, part, output, ... | ||||
|                                         source, signature) | ||||
| 
 | ||||
|   params = {'assignment_part_sid', [homework_id() '-' num2str(part)], ... | ||||
|             'email_address', email, ... | ||||
|             'submission', base64encode(output, ''), ... | ||||
|             'submission_aux', base64encode(source, ''), ... | ||||
|             'challenge_response', ch_resp, ... | ||||
|             'state', signature}; | ||||
| 
 | ||||
|   str = urlread(submit_url(), 'post', params); | ||||
| 
 | ||||
|   % Parse str to read for success / failure | ||||
|   result = 0; | ||||
| 
 | ||||
| end | ||||
| 
 | ||||
| % =========================== LOGIN HELPERS =========================== | ||||
| 
 | ||||
| function [login password] = loginPrompt() | ||||
|   % Prompt for password | ||||
|   [login password] = basicPrompt(); | ||||
|    | ||||
|   if isempty(login) || isempty(password) | ||||
|     login = []; password = []; | ||||
|   end | ||||
| end | ||||
| 
 | ||||
| 
 | ||||
| function [login password] = basicPrompt() | ||||
|   login = input('Login (Email address): ', 's'); | ||||
|   password = input('Password: ', 's'); | ||||
| end | ||||
| 
 | ||||
| function [login password] = quickLogin(login,password) | ||||
|   disp(['You are currently logged in as ' login '.']); | ||||
|   cont_token = input('Is this you? (y/n - type n to reenter password)','s'); | ||||
|   if(isempty(cont_token) || cont_token(1)=='Y'||cont_token(1)=='y') | ||||
|     return; | ||||
|   else | ||||
|     [login password] = loginPrompt(); | ||||
|   end | ||||
| end | ||||
| 
 | ||||
| function [str] = challengeResponse(email, passwd, challenge) | ||||
|   str = sha1([challenge passwd]); | ||||
| end | ||||
| 
 | ||||
| % =============================== SHA-1 ================================ | ||||
| 
 | ||||
| function hash = sha1(str) | ||||
|    | ||||
|   % Initialize variables | ||||
|   h0 = uint32(1732584193); | ||||
|   h1 = uint32(4023233417); | ||||
|   h2 = uint32(2562383102); | ||||
|   h3 = uint32(271733878); | ||||
|   h4 = uint32(3285377520); | ||||
|    | ||||
|   % Convert to word array | ||||
|   strlen = numel(str); | ||||
| 
 | ||||
|   % Break string into chars and append the bit 1 to the message | ||||
|   mC = [double(str) 128]; | ||||
|   mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')]; | ||||
|    | ||||
|   numB = strlen * 8; | ||||
|   if exist('idivide') | ||||
|     numC = idivide(uint32(numB + 65), 512, 'ceil'); | ||||
|   else | ||||
|     numC = ceil(double(numB + 65)/512); | ||||
|   end | ||||
|   numW = numC * 16; | ||||
|   mW = zeros(numW, 1, 'uint32'); | ||||
|    | ||||
|   idx = 1; | ||||
|   for i = 1:4:strlen + 1 | ||||
|     mW(idx) = bitor(bitor(bitor( ... | ||||
|                   bitshift(uint32(mC(i)), 24), ... | ||||
|                   bitshift(uint32(mC(i+1)), 16)), ... | ||||
|                   bitshift(uint32(mC(i+2)), 8)), ... | ||||
|                   uint32(mC(i+3))); | ||||
|     idx = idx + 1; | ||||
|   end | ||||
|    | ||||
|   % Append length of message | ||||
|   mW(numW - 1) = uint32(bitshift(uint64(numB), -32)); | ||||
|   mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32)); | ||||
| 
 | ||||
|   % Process the message in successive 512-bit chs | ||||
|   for cId = 1 : double(numC) | ||||
|     cSt = (cId - 1) * 16 + 1; | ||||
|     cEnd = cId * 16; | ||||
|     ch = mW(cSt : cEnd); | ||||
|      | ||||
|     % Extend the sixteen 32-bit words into eighty 32-bit words | ||||
|     for j = 17 : 80 | ||||
|       ch(j) = ch(j - 3); | ||||
|       ch(j) = bitxor(ch(j), ch(j - 8)); | ||||
|       ch(j) = bitxor(ch(j), ch(j - 14)); | ||||
|       ch(j) = bitxor(ch(j), ch(j - 16)); | ||||
|       ch(j) = bitrotate(ch(j), 1); | ||||
|     end | ||||
|    | ||||
|     % Initialize hash value for this ch | ||||
|     a = h0; | ||||
|     b = h1; | ||||
|     c = h2; | ||||
|     d = h3; | ||||
|     e = h4; | ||||
|      | ||||
|     % Main loop | ||||
|     for i = 1 : 80 | ||||
|       if(i >= 1 && i <= 20) | ||||
|         f = bitor(bitand(b, c), bitand(bitcmp(b), d)); | ||||
|         k = uint32(1518500249); | ||||
|       elseif(i >= 21 && i <= 40) | ||||
|         f = bitxor(bitxor(b, c), d); | ||||
|         k = uint32(1859775393); | ||||
|       elseif(i >= 41 && i <= 60) | ||||
|         f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d)); | ||||
|         k = uint32(2400959708); | ||||
|       elseif(i >= 61 && i <= 80) | ||||
|         f = bitxor(bitxor(b, c), d); | ||||
|         k = uint32(3395469782); | ||||
|       end | ||||
|        | ||||
|       t = bitrotate(a, 5); | ||||
|       t = bitadd(t, f); | ||||
|       t = bitadd(t, e); | ||||
|       t = bitadd(t, k); | ||||
|       t = bitadd(t, ch(i)); | ||||
|       e = d; | ||||
|       d = c; | ||||
|       c = bitrotate(b, 30); | ||||
|       b = a; | ||||
|       a = t; | ||||
|        | ||||
|     end | ||||
|     h0 = bitadd(h0, a); | ||||
|     h1 = bitadd(h1, b); | ||||
|     h2 = bitadd(h2, c); | ||||
|     h3 = bitadd(h3, d); | ||||
|     h4 = bitadd(h4, e); | ||||
| 
 | ||||
|   end | ||||
| 
 | ||||
|   hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]); | ||||
|    | ||||
|   hash = lower(hash); | ||||
| 
 | ||||
| end | ||||
| 
 | ||||
| function ret = bitadd(iA, iB) | ||||
|   ret = double(iA) + double(iB); | ||||
|   ret = bitset(ret, 33, 0); | ||||
|   ret = uint32(ret); | ||||
| end | ||||
| 
 | ||||
| function ret = bitrotate(iA, places) | ||||
|   t = bitshift(iA, places - 32); | ||||
|   ret = bitshift(iA, places); | ||||
|   ret = bitor(ret, t); | ||||
| end | ||||
| 
 | ||||
| % =========================== Base64 Encoder ============================ | ||||
| % Thanks to Peter John Acklam | ||||
| % | ||||
| 
 | ||||
| function y = base64encode(x, eol) | ||||
| %BASE64ENCODE Perform base64 encoding on a string. | ||||
| % | ||||
| %   BASE64ENCODE(STR, EOL) encode the given string STR.  EOL is the line ending | ||||
| %   sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). | ||||
| %   The returned encoded string is broken into lines of no more than 76 | ||||
| %   characters each, and each line will end with EOL unless it is empty.  Let | ||||
| %   EOL be empty if you do not want the encoded string broken into lines. | ||||
| % | ||||
| %   STR and EOL don't have to be strings (i.e., char arrays).  The only | ||||
| %   requirement is that they are vectors containing values in the range 0-255. | ||||
| % | ||||
| %   This function may be used to encode strings into the Base64 encoding | ||||
| %   specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions).  The | ||||
| %   Base64 encoding is designed to represent arbitrary sequences of octets in a | ||||
| %   form that need not be humanly readable.  A 65-character subset | ||||
| %   ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per | ||||
| %   printable character. | ||||
| % | ||||
| %   Examples | ||||
| %   -------- | ||||
| % | ||||
| %   If you want to encode a large file, you should encode it in chunks that are | ||||
| %   a multiple of 57 bytes.  This ensures that the base64 lines line up and | ||||
| %   that you do not end up with padding in the middle.  57 bytes of data fills | ||||
| %   one complete base64 line (76 == 57*4/3): | ||||
| % | ||||
| %   If ifid and ofid are two file identifiers opened for reading and writing, | ||||
| %   respectively, then you can base64 encode the data with | ||||
| % | ||||
| %      while ~feof(ifid) | ||||
| %         fwrite(ofid, base64encode(fread(ifid, 60*57))); | ||||
| %      end | ||||
| % | ||||
| %   or, if you have enough memory, | ||||
| % | ||||
| %      fwrite(ofid, base64encode(fread(ifid))); | ||||
| % | ||||
| %   See also BASE64DECODE. | ||||
| 
 | ||||
| %   Author:      Peter John Acklam | ||||
| %   Time-stamp:  2004-02-03 21:36:56 +0100 | ||||
| %   E-mail:      pjacklam@online.no | ||||
| %   URL:         http://home.online.no/~pjacklam | ||||
| 
 | ||||
|    if isnumeric(x) | ||||
|       x = num2str(x); | ||||
|    end | ||||
| 
 | ||||
|    % make sure we have the EOL value | ||||
|    if nargin < 2 | ||||
|       eol = sprintf('\n'); | ||||
|    else | ||||
|       if sum(size(eol) > 1) > 1 | ||||
|          error('EOL must be a vector.'); | ||||
|       end | ||||
|       if any(eol(:) > 255) | ||||
|          error('EOL can not contain values larger than 255.'); | ||||
|       end | ||||
|    end | ||||
| 
 | ||||
|    if sum(size(x) > 1) > 1 | ||||
|       error('STR must be a vector.'); | ||||
|    end | ||||
| 
 | ||||
|    x   = uint8(x); | ||||
|    eol = uint8(eol); | ||||
| 
 | ||||
|    ndbytes = length(x);                 % number of decoded bytes | ||||
|    nchunks = ceil(ndbytes / 3);         % number of chunks/groups | ||||
|    nebytes = 4 * nchunks;               % number of encoded bytes | ||||
| 
 | ||||
|    % add padding if necessary, to make the length of x a multiple of 3 | ||||
|    if rem(ndbytes, 3) | ||||
|       x(end+1 : 3*nchunks) = 0; | ||||
|    end | ||||
| 
 | ||||
|    x = reshape(x, [3, nchunks]);        % reshape the data | ||||
|    y = repmat(uint8(0), 4, nchunks);    % for the encoded data | ||||
| 
 | ||||
|    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||||
|    % Split up every 3 bytes into 4 pieces | ||||
|    % | ||||
|    %    aaaaaabb bbbbcccc ccdddddd | ||||
|    % | ||||
|    % to form | ||||
|    % | ||||
|    %    00aaaaaa 00bbbbbb 00cccccc 00dddddd | ||||
|    % | ||||
|    y(1,:) = bitshift(x(1,:), -2);                  % 6 highest bits of x(1,:) | ||||
| 
 | ||||
|    y(2,:) = bitshift(bitand(x(1,:), 3), 4);        % 2 lowest bits of x(1,:) | ||||
|    y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4));   % 4 highest bits of x(2,:) | ||||
| 
 | ||||
|    y(3,:) = bitshift(bitand(x(2,:), 15), 2);       % 4 lowest bits of x(2,:) | ||||
|    y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6));   % 2 highest bits of x(3,:) | ||||
| 
 | ||||
|    y(4,:) = bitand(x(3,:), 63);                    % 6 lowest bits of x(3,:) | ||||
| 
 | ||||
|    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||||
|    % Now perform the following mapping | ||||
|    % | ||||
|    %   0  - 25  ->  A-Z | ||||
|    %   26 - 51  ->  a-z | ||||
|    %   52 - 61  ->  0-9 | ||||
|    %   62       ->  + | ||||
|    %   63       ->  / | ||||
|    % | ||||
|    % We could use a mapping vector like | ||||
|    % | ||||
|    %   ['A':'Z', 'a':'z', '0':'9', '+/'] | ||||
|    % | ||||
|    % but that would require an index vector of class double. | ||||
|    % | ||||
|    z = repmat(uint8(0), size(y)); | ||||
|    i =           y <= 25;  z(i) = 'A'      + double(y(i)); | ||||
|    i = 26 <= y & y <= 51;  z(i) = 'a' - 26 + double(y(i)); | ||||
|    i = 52 <= y & y <= 61;  z(i) = '0' - 52 + double(y(i)); | ||||
|    i =           y == 62;  z(i) = '+'; | ||||
|    i =           y == 63;  z(i) = '/'; | ||||
|    y = z; | ||||
| 
 | ||||
|    %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | ||||
|    % Add padding if necessary. | ||||
|    % | ||||
|    npbytes = 3 * nchunks - ndbytes;     % number of padding bytes | ||||
|    if npbytes | ||||
|       y(end-npbytes+1 : end) = '=';     % '=' is used for padding | ||||
|    end | ||||
| 
 | ||||
|    if isempty(eol) | ||||
| 
 | ||||
|       % reshape to a row vector | ||||
|       y = reshape(y, [1, nebytes]); | ||||
| 
 | ||||
|    else | ||||
| 
 | ||||
|       nlines = ceil(nebytes / 76);      % number of lines | ||||
|       neolbytes = length(eol);          % number of bytes in eol string | ||||
| 
 | ||||
|       % pad data so it becomes a multiple of 76 elements | ||||
|       y = [y(:) ; zeros(76 * nlines - numel(y), 1)]; | ||||
|       y(nebytes + 1 : 76 * nlines) = 0; | ||||
|       y = reshape(y, 76, nlines); | ||||
| 
 | ||||
|       % insert eol strings | ||||
|       eol = eol(:); | ||||
|       y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines)); | ||||
| 
 | ||||
|       % remove padding, but keep the last eol string | ||||
|       m = nebytes + neolbytes * (nlines - 1); | ||||
|       n = (76+neolbytes)*nlines - neolbytes; | ||||
|       y(m+1 : n) = ''; | ||||
| 
 | ||||
|       % extract and reshape to row vector | ||||
|       y = reshape(y, 1, m+neolbytes); | ||||
| 
 | ||||
|    end | ||||
| 
 | ||||
|    % output is a character array | ||||
|    y = char(y); | ||||
| 
 | ||||
| end | ||||
							
								
								
									
										20
									
								
								ex1/submitWeb.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								ex1/submitWeb.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,20 @@ | |||
| % submitWeb Creates files from your code and output for web submission. | ||||
| % | ||||
| %   If the submit function does not work for you, use the web-submission mechanism. | ||||
| %   Call this function to produce a file for the part you wish to submit. Then, | ||||
| %   submit the file to the class servers using the "Web Submission" button on the  | ||||
| %   Programming Exercises page on the course website. | ||||
| % | ||||
| %   You should call this function without arguments (submitWeb), to receive | ||||
| %   an interactive prompt for submission; optionally you can call it with the partID | ||||
| %   if you so wish. Make sure your working directory is set to the directory  | ||||
| %   containing the submitWeb.m file and your assignment files. | ||||
| 
 | ||||
| function submitWeb(partId) | ||||
|   if ~exist('partId', 'var') || isempty(partId) | ||||
|     partId = []; | ||||
|   end | ||||
|    | ||||
|   submit(partId, 1); | ||||
| end | ||||
| 
 | ||||
							
								
								
									
										21
									
								
								ex1/warmUpExercise.m
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								ex1/warmUpExercise.m
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,21 @@ | |||
| function A = warmUpExercise() | ||||
| %WARMUPEXERCISE Example function in octave | ||||
| %   A = WARMUPEXERCISE() is an example function that returns the 5x5 identity matrix | ||||
| 
 | ||||
| A = []; | ||||
| % ============= YOUR CODE HERE ============== | ||||
| % Instructions: Return the 5x5 identity matrix  | ||||
| %               In octave, we return values by defining which variables | ||||
| %               represent the return values (at the top of the file) | ||||
| %               and then set them accordingly.  | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| % =========================================== | ||||
| 
 | ||||
| 
 | ||||
| end | ||||
		Reference in a new issue