Skip to content

Instantly share code, notes, and snippets.

@gravityfargo
Created August 3, 2023 19:03
Show Gist options
  • Save gravityfargo/feb71264105fa4285646501dfda4aeb2 to your computer and use it in GitHub Desktop.
Save gravityfargo/feb71264105fa4285646501dfda4aeb2 to your computer and use it in GitHub Desktop.
ECE498 Matlab Graph - Figure 2
% Define ratio labels
Ratio = {'10/90', '20/80', '30/70', '40/60', '50/50', '60/40', '70/30', '80/20', '90/10'};
% Convert to categorical
RatioCat = categorical(Ratio);
% Adaptive Backpropagation
TrainingError_AdaptiveBackpropagation = [0.0838717, 0.0772489, 0.0677041, 0.0685053, 0.0694665, 0.0660926, 0.0657811, 0.0646259, 0.644504];
TestingError_AdaptiveBackpropagation = [8.658576, 7.694914, 6.696497, 6.895893, 6.978141, 6.622154, 6.420111, 6.4860063, 6.266074];
ValidationError_AdaptiveBackpropagation = [8.833146, 7.8823543, 6.8417497, 7.025997, 7.08813, 6.7571335, 6.585877, 6.6701446, 6.468319];
% Conjugate Gradient
TrainingError_ConjugateGradient = [0.079843, 0.0705143, 0.0672097, 0.0694786, 0.0700395, 0.068498, 0.063214437, 0.0671455, 0.642109];
TestingError_ConjugateGradient = [8.120603, 7.2570846, 6.7983123, 6.938184, 6.901984, 6.781409, 6.42185509, 6.5806203, 6.398567];
ValidationError_ConjugateGradient = [8.465016, 7.3992871, 6.8400613, 7.0910597, 7.116482, 6.932401, 6.63991264, 6.963359, 6.468719];
% Quasi-Newton method (MLP)
TrainingError_QuasiNewton = [0.07601072, 0.06815241, 0.06670812, 0.6660671, 0.06851928, 0.0656073, 0.06382228, 0.06399301, 0.0636099];
TestingError_QuasiNewton = [7.81672, 7.0214787, 6.6731277, 6.6525407, 6.9050035, 6.564301, 6.3074484, 6.2626634, 6.4226537];
ValidationError_QuasiNewton = [8.181952, 7.1462, 6.7927256, 6.7880244, 7.03227063, 6.696533, 6.4821978, 6.489995, 6.5142446];
% Normalize the data
TrainingError_AdaptiveBackpropagation = (TrainingError_AdaptiveBackpropagation - min(TrainingError_AdaptiveBackpropagation)) / (max(TrainingError_AdaptiveBackpropagation) - min(TrainingError_AdaptiveBackpropagation));
TestingError_AdaptiveBackpropagation = (TestingError_AdaptiveBackpropagation - min(TestingError_AdaptiveBackpropagation)) / (max(TestingError_AdaptiveBackpropagation) - min(TestingError_AdaptiveBackpropagation));
ValidationError_AdaptiveBackpropagation = (ValidationError_AdaptiveBackpropagation - min(ValidationError_AdaptiveBackpropagation)) / (max(ValidationError_AdaptiveBackpropagation) - min(ValidationError_AdaptiveBackpropagation));
TrainingError_ConjugateGradient = (TrainingError_ConjugateGradient - min(TrainingError_ConjugateGradient)) / (max(TrainingError_ConjugateGradient) - min(TrainingError_ConjugateGradient));
TestingError_ConjugateGradient = (TestingError_ConjugateGradient - min(TestingError_ConjugateGradient)) / (max(TestingError_ConjugateGradient) - min(TestingError_ConjugateGradient));
ValidationError_ConjugateGradient = (ValidationError_ConjugateGradient - min(ValidationError_ConjugateGradient)) / (max(ValidationError_ConjugateGradient) - min(ValidationError_ConjugateGradient));
TrainingError_QuasiNewton = (TrainingError_QuasiNewton - min(TrainingError_QuasiNewton)) / (max(TrainingError_QuasiNewton) - min(TrainingError_QuasiNewton));
TestingError_QuasiNewton = (TestingError_QuasiNewton - min(TestingError_QuasiNewton)) / (max(TestingError_QuasiNewton) - min(TestingError_QuasiNewton));
ValidationError_QuasiNewton = (ValidationError_QuasiNewton - min(ValidationError_QuasiNewton)) / (max(ValidationError_QuasiNewton) - min(ValidationError_QuasiNewton));
% Plot
figure
% Adaptive Backpropagation plot
subplot(3,1,1)
plot(RatioCat, TrainingError_AdaptiveBackpropagation, '-', 'LineWidth', 2)
hold on
plot(RatioCat, TestingError_AdaptiveBackpropagation, '-', 'LineWidth', 2)
plot(RatioCat, ValidationError_AdaptiveBackpropagation, '-', 'LineWidth', 2)
hold off
xlabel('Ratio')
ylabel('Normalized Error')
title('Adaptive Backpropagation')
legend('Training Error', 'Testing Error', 'Validation Error')
% Conjugate Gradient plot
subplot(3,1,2)
plot(RatioCat, TrainingError_ConjugateGradient, '-', 'LineWidth', 2)
hold on
plot(RatioCat, TestingError_ConjugateGradient, '-', 'LineWidth', 2)
plot(RatioCat, ValidationError_ConjugateGradient, '-', 'LineWidth', 2)
hold off
xlabel('Ratio')
ylabel('Normalized Error')
title('Conjugate Gradient')
legend('Training Error', 'Testing Error', 'Validation Error')
% Quasi-Newton method (MLP) plot
subplot(3,1,3)
plot(RatioCat, TrainingError_QuasiNewton, '-', 'LineWidth', 2)
hold on
plot(RatioCat, TestingError_QuasiNewton, '-', 'LineWidth', 2)
plot(RatioCat, ValidationError_QuasiNewton, '-', 'LineWidth', 2)
hold off
xlabel('Ratio')
ylabel('Normalized Error')
title('Quasi-Newton method (MLP)')
legend('Training Error', 'Testing Error', 'Validation Error')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment