Skip to content

Instantly share code, notes, and snippets.

@mshabunin
Created February 25, 2015 10:07
Show Gist options
  • Save mshabunin/883114cfab0c10fae16c to your computer and use it in GitHub Desktop.
Save mshabunin/883114cfab0c10fae16c to your computer and use it in GitHub Desktop.
legacy_params.hpp
#ifndef __LEGACY_ML_PARAMS_DEFINED__
#define __LEGACY_ML_PARAMS_DEFINED__
#include "opencv2/ml.hpp"
namespace cv { namespace ml {
template <typename T>
class MLParams
{
public:
void init(Ptr<T> model) {}
};
template<>
class MLParams<KNearest>
{
public:
MLParams(int defaultK_=10, bool isclassifier_=true, int Emax_=INT_MAX, int algorithmType_=KNearest::BRUTE_FORCE)
: defaultK(defaultK_), isclassifier(isclassifier_), Emax(Emax_), algorithmType(algorithmType_)
{ }
int defaultK;
bool isclassifier;
int Emax;
int algorithmType;
void init(Ptr<KNearest> model)
{
model->setDefaultK(defaultK);
model->setIsClassifier(isclassifier);
model->setEmax(Emax);
model->setAlgorithmType(algorithmType);
}
};
template<>
class MLParams<SVM>
{
public:
MLParams()
{
svmType = SVM::C_SVC;
kernelType = SVM::RBF;
degree = 0;
gamma = 1;
coef0 = 0;
C = 1;
nu = 0;
p = 0;
termCrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, FLT_EPSILON );
}
MLParams( int _svmType, int _kernelType,
double _degree, double _gamma, double _coef0,
double _Cvalue, double _nu, double _p,
const Mat& _classWeights, TermCriteria _termCrit )
{
svmType = _svmType;
kernelType = _kernelType;
degree = _degree;
gamma = _gamma;
coef0 = _coef0;
C = _Cvalue;
nu = _nu;
p = _p;
classWeights = _classWeights;
termCrit = _termCrit;
}
int svmType;
int kernelType;
double gamma;
double coef0;
double degree;
double C;
double nu;
double p;
Mat classWeights;
TermCriteria termCrit;
void init(Ptr<SVM> model)
{
model->setType(svmType);
model->setKernel(kernelType);
model->setGamma(gamma);
model->setCoef0(coef0);
model->setDegree(degree);
model->setC(C);
model->setNu(nu);
model->setP(p);
model->setClassWeights(classWeights);
model->setTermCriteria(termCrit);
}
};
template<>
class MLParams<EM>
{
public:
MLParams(int _nclusters=EM::DEFAULT_NCLUSTERS, int _covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& _termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, 1e-6))
{
nclusters = _nclusters;
covMatType = _covMatType;
termCrit = _termCrit;
}
int nclusters;
int covMatType;
TermCriteria termCrit;
void init(Ptr<EM> model)
{
model->setClustersNumber(nclusters);
model->setCovarianceMatrixType(covMatType);
model->setTermCriteria(termCrit);
}
};
template<>
class MLParams<DTrees>
{
public:
MLParams()
{
maxDepth = INT_MAX;
minSampleCount = 10;
regressionAccuracy = 0.01f;
useSurrogates = false;
maxCategories = 10;
CVFolds = 10;
use1SERule = true;
truncatePrunedTree = true;
priors = Mat();
}
MLParams( int _maxDepth, int _minSampleCount,
double _regressionAccuracy, bool _useSurrogates,
int _maxCategories, int _CVFolds,
bool _use1SERule, bool _truncatePrunedTree,
const Mat& _priors )
{
maxDepth = _maxDepth;
minSampleCount = _minSampleCount;
regressionAccuracy = (float)_regressionAccuracy;
useSurrogates = _useSurrogates;
maxCategories = _maxCategories;
CVFolds = _CVFolds;
use1SERule = _use1SERule;
truncatePrunedTree = _truncatePrunedTree;
priors = _priors;
}
int maxCategories;
int maxDepth;
int minSampleCount;
int CVFolds;
bool useSurrogates;
bool use1SERule;
bool truncatePrunedTree;
float regressionAccuracy;
Mat priors;
void init(Ptr<DTrees> model)
{
model->setMaxCategories(maxCategories);
model->setMaxDepth(maxDepth);
model->setMinSampleCount(minSampleCount);
model->setCVFolds(CVFolds);
model->setUseSurrogates(useSurrogates);
model->setUse1SERule(use1SERule);
model->setTruncatePrunedTree(truncatePrunedTree);
model->setRegressionAccuracy(regressionAccuracy);
model->setPriors(priors);
}
};
template<>
class MLParams<RTrees> : public MLParams<DTrees>
{
public:
MLParams()
: MLParams<DTrees>(5, 10, 0.f, false, 10, 0, false, false, Mat())
{
calcVarImportance = false;
nactiveVars = 0;
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
}
MLParams( int _maxDepth, int _minSampleCount,
double _regressionAccuracy, bool _useSurrogates,
int _maxCategories, const Mat& _priors,
bool _calcVarImportance, int _nactiveVars,
TermCriteria _termCrit )
: MLParams<DTrees>(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates,
_maxCategories, 0, false, false, _priors)
{
calcVarImportance = _calcVarImportance;
nactiveVars = _nactiveVars;
termCrit = _termCrit;
}
bool calcVarImportance;
int nactiveVars;
TermCriteria termCrit;
void init(Ptr<RTrees> model)
{
MLParams<DTrees>::init(model);
model->setCalculateVarImportance(calcVarImportance);
model->setActiveVarCount(nactiveVars);
model->setTermCriteria(termCrit);
}
};
template<>
class MLParams<Boost> : public MLParams<DTrees>
{
public:
MLParams()
{
boostType = Boost::REAL;
weakCount = 100;
weightTrimRate = 0.95;
CVFolds = 0;
maxDepth = 1;
}
MLParams( int _boostType, int _weakCount, double _weightTrimRate,
int _maxDepth, bool _useSurrogates, const Mat& _priors )
{
boostType = _boostType;
weakCount = _weakCount;
weightTrimRate = _weightTrimRate;
CVFolds = 0;
maxDepth = _maxDepth;
useSurrogates = _useSurrogates;
priors = _priors;
}
int boostType;
int weakCount;
double weightTrimRate;
};
template<>
class MLParams<ANN_MLP>
{
public:
MLParams()
{
layerSizes = Mat();
activateFunc = ANN_MLP::SIGMOID_SYM;
fparam1 = fparam2 = 0;
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
trainMethod = ANN_MLP::RPROP;
bpDWScale = bpMomentScale = 0.1;
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
}
MLParams(const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2,
TermCriteria _termCrit, int _trainMethod, double _param1, double _param2=0 )
{
layerSizes = _layerSizes;
activateFunc = _activateFunc;
fparam1 = _fparam1;
fparam2 = _fparam2;
termCrit = _termCrit;
trainMethod = _trainMethod;
bpDWScale = bpMomentScale = 0.1;
rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
if( trainMethod == ANN_MLP::RPROP )
{
rpDW0 = _param1;
if( rpDW0 < FLT_EPSILON )
rpDW0 = 1.;
rpDWMin = _param2;
rpDWMin = std::max( rpDWMin, 0. );
}
else if( trainMethod == ANN_MLP::BACKPROP )
{
bpDWScale = _param1;
if( bpDWScale <= 0 )
bpDWScale = 0.1;
bpDWScale = std::max( bpDWScale, 1e-3 );
bpDWScale = std::min( bpDWScale, 1. );
bpMomentScale = _param2;
if( bpMomentScale < 0 )
bpMomentScale = 0.1;
bpMomentScale = std::min( bpMomentScale, 1. );
}
else
trainMethod = ANN_MLP::RPROP;
}
Mat layerSizes;
int activateFunc;
double fparam1;
double fparam2;
TermCriteria termCrit;
int trainMethod;
double bpDWScale;
double bpMomentScale;
double rpDW0;
double rpDWPlus;
double rpDWMinus;
double rpDWMin;
double rpDWMax;
void init(Ptr<ANN_MLP> model)
{
model->setLayerSizes(layerSizes);
model->setActivationFunction(activateFunc, fparam1, fparam2);
model->setTermCriteria(termCrit);
model->setTrainMethod(trainMethod);
model->setBackpropWeightScale(bpDWScale);
model->setBackpropMomentumScale(bpMomentScale);
model->setRpropDW0(rpDW0);
model->setRpropDWMin(rpDWMin);
model->setRpropDWMax(rpDWMax);
model->setRpropDWMinus(rpDWMinus);
model->setRpropDWPlus(rpDWPlus);
}
};
template<>
class MLParams<LogisticRegression>
{
public:
MLParams(double learning_rate = 0.001,
int iters = 1000,
int method = LogisticRegression::BATCH,
int normalization = LogisticRegression::REG_L2,
int reg = 1,
int batch_size = 1)
{
alpha = learning_rate;
num_iters = iters;
norm = normalization;
regularized = reg;
train_method = method;
mini_batch_size = batch_size;
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
}
double alpha;
int num_iters;
int norm;
int regularized;
int train_method;
int mini_batch_size;
TermCriteria term_crit;
void init(Ptr<LogisticRegression> model)
{
model->setLearningRate(alpha);
model->setIterations(num_iters);
if (regularized)
model->setRegularization(norm);
else
model->setRegularization(LogisticRegression::REG_NONE);
model->setTrainMethod(train_method);
model->setMiniBatchSize(mini_batch_size);
model->setTermCriteria(term_crit);
}
};
} } // cv::ml
#endif
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment