bool UpgradeV0LayerParameter(V1LayerParameter* v0_layer_connection_,
V1LayerParameter* layer_param) {
+ CV_Assert(v0_layer_connection_ != NULL);
const V1LayerParameter& v0_layer_connection = *v0_layer_connection_;
bool is_fully_compatible = true;
layer_param->Clear();
bool UpgradeV1Net(NetParameter* net_param) {
// V1LayerParameter layers -> LayerParameter layer
+ CV_Assert(net_param != NULL);
bool is_fully_compatible = true;
if (net_param->layer_size() > 0) {
LOG(ERROR) << "Input NetParameter to be upgraded already specifies 'layer' "
bool UpgradeV1LayerParameter(V1LayerParameter* v1_layer_param_,
LayerParameter* layer_param) {
+ CV_Assert(v1_layer_param_ != NULL);
const V1LayerParameter& v1_layer_param = *v1_layer_param_;
layer_param->Clear();
bool is_fully_compatible = true;
{
public:
enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
- Mat weightsMat, weightsMat_doubles;
+ Mat weightsMat;
+ std::vector<double> weightsMultipliers;
std::vector<float> biasvec;
std::vector<float> reluslope;
Ptr<ActivationLayer> activ;
wm = wm_aligned;
}
weightsMat = wm;
- weightsMat.convertTo(weightsMat_doubles, CV_64F);
+ weightsMultipliers.assign(outCn, 1.0);
Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
biasvec.resize(outCn+2);
if (!w.empty())
{
+ Mat originWeights = blobs[0].reshape(1, outCn);
for (int i = 0; i < outCn; ++i)
{
double wi = w.at<float>(i);
- cv::multiply(slice(weightsMat_doubles, i), wi, slice(weightsMat_doubles, i));
+ weightsMultipliers[i] *= wi;
+ cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
biasvec[i] *= wi;
}
- weightsMat_doubles.convertTo(weightsMat, weightsMat.type());
}
if (!b.empty())