From: LaurentBerger Date: Wed, 20 Dec 2017 14:24:46 +0000 (+0100) Subject: Calcerror uses now weighted samples (#10346) X-Git-Tag: accepted/tizen/6.0/unified/20201030.111113~265 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e43997dbb56d5ce60adcde9dddb24b7f9f63922b;p=platform%2Fupstream%2Fopencv.git Calcerror uses now weighted samples (#10346) * Calcerror uses now sample weights * catree comment in #10319 --- diff --git a/modules/ml/src/ann_mlp.cpp b/modules/ml/src/ann_mlp.cpp index 5652da2..cd10c0d 100644 --- a/modules/ml/src/ann_mlp.cpp +++ b/modules/ml/src/ann_mlp.cpp @@ -155,7 +155,7 @@ int SimulatedAnnealingSolver::run() if (newEnergy < previousEnergy) { previousEnergy = newEnergy; - //??? exchange++; + exchange++; } else { @@ -405,21 +405,6 @@ public: param2 = 0.1; params.bpMomentScale = std::min(param2, 1.); } -/* else if (method == ANN_MLP::ANNEAL) - { - if (param1 <= 0) - param1 = 10; - if (param2 <= 0 || param2>param1) - param2 = 0.1; - if (param3 <= 0 || param3 >=1) - param3 = 0.95; - if (param4 <= 0) - param4 = 10; - params.initialT = param1; - params.finalT = param2; - params.coolingRatio = param3; - params.itePerStep = param4; - }*/ } int getTrainMethod() const diff --git a/modules/ml/src/inner_functions.cpp b/modules/ml/src/inner_functions.cpp index dc03e38..e95674a 100644 --- a/modules/ml/src/inner_functions.cpp +++ b/modules/ml/src/inner_functions.cpp @@ -94,26 +94,29 @@ public: int idxErr = range.start; CV_TRACE_FUNCTION_SKIP_NESTED(); Mat samples = data->getSamples(); + Mat weights=testerr? data->getTestSampleWeights() : data->getTrainSampleWeights(); int layout = data->getLayout(); Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx(); const int* sidx_ptr = sidx.ptr(); bool isclassifier = s.isClassifier(); Mat responses = data->getResponses(); int responses_type = responses.type(); - double err = 0; + + const float* sw = weights.empty() ? 0 : weights.ptr(); for (int i = range.start; i < range.end; i++) { int si = sidx_ptr ? sidx_ptr[i] : i; + double sweight = sw ? static_cast(sw[i]) : 1.; Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si); float val = s.predict(sample); float val0 = (responses_type == CV_32S) ? (float)responses.at(si) : responses.at(si); if (isclassifier) - err += fabs(val - val0) > FLT_EPSILON; + err += sweight * fabs(val - val0) > FLT_EPSILON; else - err += (val - val0)*(val - val0); + err += sweight * (val - val0)*(val - val0); if (!resp.empty()) resp.at(i) = val; } @@ -133,12 +136,17 @@ float StatModel::calcError(const Ptr& data, bool testerr, OutputArray CV_TRACE_FUNCTION_SKIP_NESTED(); Mat samples = data->getSamples(); Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx(); + Mat weights = testerr ? data->getTestSampleWeights() : data->getTrainSampleWeights(); int n = (int)sidx.total(); bool isclassifier = isClassifier(); Mat responses = data->getResponses(); if (n == 0) + { n = data->getNSamples(); + weights = data->getTrainSampleWeights(); + testerr =false; + } if (n == 0) return -FLT_MAX; @@ -155,11 +163,11 @@ float StatModel::calcError(const Ptr& data, bool testerr, OutputArray for (size_t i = 0; i < errStrip.size(); i++) err += errStrip[i]; - + float weightSum= weights.empty() ? n: static_cast(sum(weights)(0)); if (_resp.needed()) resp.copyTo(_resp); - return (float)(err / n * (isclassifier ? 100 : 1)); + return (float)(err/ weightSum * (isclassifier ? 100 : 1)); } /* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */