Last active
September 26, 2020 13:21
-
-
Save Aakash-kaushik/9e12b5a0f0de6a66a7b9b1311f322ee4 to your computer and use it in GitHub Desktop.
training the model
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
ens::Adam optimizer( | |
STEP_SIZE, // Step size of the optimizer. | |
BATCH_SIZE, // Batch size. Number of data points that are used in each iteration. | |
0.9, // Exponential decay rate for the first moment estimates. | |
0.999, // Exponential decay rate for the weighted infinity norm estimates. | |
1e-8, // Value used to initialise the mean squared gradient parameter. | |
MAX_ITERATIONS, // Max number of iterations. | |
1e-8, // Tolerance. | |
true); | |
model.Train(trainX, | |
trainY, | |
optimizer, | |
ens::PrintLoss(), | |
ens::ProgressBar(), | |
ens::EarlyStopAtMinLoss(EPOCH), | |
ens::EarlyStopAtMinLoss( | |
[&](const arma::mat& /* param */) | |
{ | |
double validationLoss = model.Evaluate(validX, validY); | |
std::cout << "Validation loss: " << validationLoss | |
<< "." << std::endl; | |
return validationLoss; | |
})); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment