//bool update(InputArray image, CV_OUT Rect& boundingBox) CV_OVERRIDE;
};
+class CV_EXPORTS_W TrackerDaSiamRPN : public Tracker
+{
+protected:
+ TrackerDaSiamRPN(); // use ::create()
+public:
+ virtual ~TrackerDaSiamRPN() CV_OVERRIDE;
+
+ struct CV_EXPORTS_W_SIMPLE Params
+ {
+ CV_WRAP Params();
+ CV_PROP_RW std::string model;
+ CV_PROP_RW std::string kernel_cls1;
+ CV_PROP_RW std::string kernel_r1;
+ CV_PROP_RW int backend;
+ CV_PROP_RW int target;
+ };
+
+ /** @brief Constructor
+ @param parameters DaSiamRPN parameters TrackerDaSiamRPN::Params
+ */
+ static CV_WRAP
+ Ptr<TrackerDaSiamRPN> create(const TrackerDaSiamRPN::Params& parameters = TrackerDaSiamRPN::Params());
+
+ /** @brief Return tracking score
+ */
+ CV_WRAP virtual float getTrackingScore() = 0;
+
+ //void init(InputArray image, const Rect& boundingBox) CV_OVERRIDE;
+ //bool update(InputArray image, CV_OUT Rect& boundingBox) CV_OVERRIDE;
+};
//! @} video_track
#ifdef HAVE_OPENCV_VIDEO
typedef TrackerMIL::Params TrackerMIL_Params;
typedef TrackerGOTURN::Params TrackerGOTURN_Params;
+typedef TrackerDaSiamRPN::Params TrackerDaSiamRPN_Params;
#endif
--- /dev/null
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "../precomp.hpp"
+
+#ifdef HAVE_OPENCV_DNN
+#include "opencv2/dnn.hpp"
+#endif
+
+namespace cv {
+
+TrackerDaSiamRPN::TrackerDaSiamRPN()
+{
+ // nothing
+}
+
+TrackerDaSiamRPN::~TrackerDaSiamRPN()
+{
+ // nothing
+}
+
+TrackerDaSiamRPN::Params::Params()
+{
+ model = "dasiamrpn_model.onnx";
+ kernel_cls1 = "dasiamrpn_kernel_cls1.onnx";
+ kernel_r1 = "dasiamrpn_kernel_r1.onnx";
+#ifdef HAVE_OPENCV_DNN
+ backend = dnn::DNN_BACKEND_DEFAULT;
+ target = dnn::DNN_TARGET_CPU;
+#else
+ backend = -1; // invalid value
+ target = -1; // invalid value
+#endif
+}
+
+#ifdef HAVE_OPENCV_DNN
+
+template <typename T> static
+T sizeCal(const T& w, const T& h)
+{
+ T pad = (w + h) * T(0.5);
+ T sz2 = (w + pad) * (h + pad);
+ return sqrt(sz2);
+}
+
+template <>
+Mat sizeCal(const Mat& w, const Mat& h)
+{
+ Mat pad = (w + h) * 0.5;
+ Mat sz2 = (w + pad).mul((h + pad));
+
+ cv::sqrt(sz2, sz2);
+ return sz2;
+}
+
+class TrackerDaSiamRPNImpl : public TrackerDaSiamRPN
+{
+public:
+ TrackerDaSiamRPNImpl(const TrackerDaSiamRPN::Params& parameters)
+ : params(parameters)
+ {
+
+ siamRPN = dnn::readNet(params.model);
+ siamKernelCL1 = dnn::readNet(params.kernel_cls1);
+ siamKernelR1 = dnn::readNet(params.kernel_r1);
+
+ CV_Assert(!siamRPN.empty());
+ CV_Assert(!siamKernelCL1.empty());
+ CV_Assert(!siamKernelR1.empty());
+
+ siamRPN.setPreferableBackend(params.backend);
+ siamRPN.setPreferableTarget(params.target);
+ siamKernelR1.setPreferableBackend(params.backend);
+ siamKernelR1.setPreferableTarget(params.target);
+ siamKernelCL1.setPreferableBackend(params.backend);
+ siamKernelCL1.setPreferableTarget(params.target);
+ }
+
+ void init(InputArray image, const Rect& boundingBox) CV_OVERRIDE;
+ bool update(InputArray image, Rect& boundingBox) CV_OVERRIDE;
+ float getTrackingScore() CV_OVERRIDE;
+
+ TrackerDaSiamRPN::Params params;
+
+protected:
+ dnn::Net siamRPN, siamKernelR1, siamKernelCL1;
+ Rect boundingBox_;
+ Mat image_;
+ struct trackerConfig
+ {
+ float windowInfluence = 0.43f;
+ float lr = 0.4f;
+ int scale = 8;
+ bool swapRB = false;
+ int totalStride = 8;
+ float penaltyK = 0.055f;
+ int exemplarSize = 127;
+ int instanceSize = 271;
+ float contextAmount = 0.5f;
+ std::vector<float> ratios = { 0.33f, 0.5f, 1.0f, 2.0f, 3.0f };
+ int anchorNum = int(ratios.size());
+ Mat anchors;
+ Mat windows;
+ Scalar avgChans;
+ Size imgSize = { 0, 0 };
+ Rect2f targetBox = { 0, 0, 0, 0 };
+ int scoreSize = (instanceSize - exemplarSize) / totalStride + 1;
+ float tracking_score;
+
+ void update_scoreSize()
+ {
+ scoreSize = int((instanceSize - exemplarSize) / totalStride + 1);
+ }
+ };
+ trackerConfig trackState;
+
+ void softmax(const Mat& src, Mat& dst);
+ void elementMax(Mat& src);
+ Mat generateHanningWindow();
+ Mat generateAnchors();
+ Mat getSubwindow(Mat& img, const Rect2f& targetBox, float originalSize, Scalar avgChans);
+ void trackerInit(Mat img);
+ void trackerEval(Mat img);
+};
+
+void TrackerDaSiamRPNImpl::init(InputArray image, const Rect& boundingBox)
+{
+ image_ = image.getMat().clone();
+
+ trackState.update_scoreSize();
+ trackState.targetBox = Rect2f(
+ float(boundingBox.x) + float(boundingBox.width) * 0.5f, // FIXIT don't use center in Rect structures, it is confusing
+ float(boundingBox.y) + float(boundingBox.height) * 0.5f,
+ float(boundingBox.width),
+ float(boundingBox.height)
+ );
+ trackerInit(image_);
+}
+
+void TrackerDaSiamRPNImpl::trackerInit(Mat img)
+{
+ Rect2f targetBox = trackState.targetBox;
+ Mat anchors = generateAnchors();
+ trackState.anchors = anchors;
+
+ Mat windows = generateHanningWindow();
+
+ trackState.windows = windows;
+ trackState.imgSize = img.size();
+
+ trackState.avgChans = mean(img);
+ float wc = targetBox.width + trackState.contextAmount * (targetBox.width + targetBox.height);
+ float hc = targetBox.height + trackState.contextAmount * (targetBox.width + targetBox.height);
+ float sz = (float)cvRound(sqrt(wc * hc));
+
+ Mat zCrop = getSubwindow(img, targetBox, sz, trackState.avgChans);
+ Mat blob;
+
+ dnn::blobFromImage(zCrop, blob, 1.0, Size(trackState.exemplarSize, trackState.exemplarSize), Scalar(), trackState.swapRB, false, CV_32F);
+ siamRPN.setInput(blob);
+ Mat out1;
+ siamRPN.forward(out1, "63");
+
+ siamKernelCL1.setInput(out1);
+ siamKernelR1.setInput(out1);
+
+ Mat cls1 = siamKernelCL1.forward();
+ Mat r1 = siamKernelR1.forward();
+ std::vector<int> r1_shape = { 20, 256, 4, 4 }, cls1_shape = { 10, 256, 4, 4 };
+
+ siamRPN.setParam(siamRPN.getLayerId("65"), 0, r1.reshape(0, r1_shape));
+ siamRPN.setParam(siamRPN.getLayerId("68"), 0, cls1.reshape(0, cls1_shape));
+}
+
+bool TrackerDaSiamRPNImpl::update(InputArray image, Rect& boundingBox)
+{
+ image_ = image.getMat().clone();
+ trackerEval(image_);
+ boundingBox = {
+ int(trackState.targetBox.x - int(trackState.targetBox.width / 2)),
+ int(trackState.targetBox.y - int(trackState.targetBox.height / 2)),
+ int(trackState.targetBox.width),
+ int(trackState.targetBox.height)
+ };
+ return true;
+}
+
+void TrackerDaSiamRPNImpl::trackerEval(Mat img)
+{
+ Rect2f targetBox = trackState.targetBox;
+
+ float wc = targetBox.height + trackState.contextAmount * (targetBox.width + targetBox.height);
+ float hc = targetBox.width + trackState.contextAmount * (targetBox.width + targetBox.height);
+
+ float sz = sqrt(wc * hc);
+ float scaleZ = trackState.exemplarSize / sz;
+
+ float searchSize = float((trackState.instanceSize - trackState.exemplarSize) / 2);
+ float pad = searchSize / scaleZ;
+ float sx = sz + 2 * pad;
+
+ Mat xCrop = getSubwindow(img, targetBox, (float)cvRound(sx), trackState.avgChans);
+
+ Mat blob;
+ std::vector<Mat> outs;
+ std::vector<String> outNames;
+ Mat delta, score;
+ Mat sc, rc, penalty, pscore;
+
+ dnn::blobFromImage(xCrop, blob, 1.0, Size(trackState.instanceSize, trackState.instanceSize), Scalar(), trackState.swapRB, false, CV_32F);
+
+ siamRPN.setInput(blob);
+
+ outNames = siamRPN.getUnconnectedOutLayersNames();
+ siamRPN.forward(outs, outNames);
+
+ delta = outs[0];
+ score = outs[1];
+
+ score = score.reshape(0, { 2, trackState.anchorNum, trackState.scoreSize, trackState.scoreSize });
+ delta = delta.reshape(0, { 4, trackState.anchorNum, trackState.scoreSize, trackState.scoreSize });
+
+ softmax(score, score);
+
+ targetBox.width *= scaleZ;
+ targetBox.height *= scaleZ;
+
+ score = score.row(1);
+ score = score.reshape(0, { 5, 19, 19 });
+
+ // Post processing
+ delta.row(0) = delta.row(0).mul(trackState.anchors.row(2)) + trackState.anchors.row(0);
+ delta.row(1) = delta.row(1).mul(trackState.anchors.row(3)) + trackState.anchors.row(1);
+ exp(delta.row(2), delta.row(2));
+ delta.row(2) = delta.row(2).mul(trackState.anchors.row(2));
+ exp(delta.row(3), delta.row(3));
+ delta.row(3) = delta.row(3).mul(trackState.anchors.row(3));
+
+ sc = sizeCal(delta.row(2), delta.row(3)) / sizeCal(targetBox.width, targetBox.height);
+ elementMax(sc);
+
+ rc = delta.row(2).mul(1 / delta.row(3));
+ rc = (targetBox.width / targetBox.height) / rc;
+ elementMax(rc);
+
+ // Calculating the penalty
+ exp(((rc.mul(sc) - 1.) * trackState.penaltyK * (-1.0)), penalty);
+ penalty = penalty.reshape(0, { trackState.anchorNum, trackState.scoreSize, trackState.scoreSize });
+
+ pscore = penalty.mul(score);
+ pscore = pscore * (1.0 - trackState.windowInfluence) + trackState.windows * trackState.windowInfluence;
+
+ int bestID[] = { 0 };
+ // Find the index of best score.
+ minMaxIdx(pscore.reshape(0, { trackState.anchorNum * trackState.scoreSize * trackState.scoreSize, 1 }), 0, 0, 0, bestID);
+ delta = delta.reshape(0, { 4, trackState.anchorNum * trackState.scoreSize * trackState.scoreSize });
+ penalty = penalty.reshape(0, { trackState.anchorNum * trackState.scoreSize * trackState.scoreSize, 1 });
+ score = score.reshape(0, { trackState.anchorNum * trackState.scoreSize * trackState.scoreSize, 1 });
+
+ int index[] = { 0, bestID[0] };
+ Rect2f resBox = { 0, 0, 0, 0 };
+
+ resBox.x = delta.at<float>(index) / scaleZ;
+ index[0] = 1;
+ resBox.y = delta.at<float>(index) / scaleZ;
+ index[0] = 2;
+ resBox.width = delta.at<float>(index) / scaleZ;
+ index[0] = 3;
+ resBox.height = delta.at<float>(index) / scaleZ;
+
+ float lr = penalty.at<float>(bestID) * score.at<float>(bestID) * trackState.lr;
+
+ resBox.x = resBox.x + targetBox.x;
+ resBox.y = resBox.y + targetBox.y;
+ targetBox.width /= scaleZ;
+ targetBox.height /= scaleZ;
+
+ resBox.width = targetBox.width * (1 - lr) + resBox.width * lr;
+ resBox.height = targetBox.height * (1 - lr) + resBox.height * lr;
+
+ resBox.x = float(fmax(0., fmin(float(trackState.imgSize.width), resBox.x)));
+ resBox.y = float(fmax(0., fmin(float(trackState.imgSize.height), resBox.y)));
+ resBox.width = float(fmax(10., fmin(float(trackState.imgSize.width), resBox.width)));
+ resBox.height = float(fmax(10., fmin(float(trackState.imgSize.height), resBox.height)));
+
+ trackState.targetBox = resBox;
+ trackState.tracking_score = score.at<float>(bestID);
+}
+
+float TrackerDaSiamRPNImpl::getTrackingScore()
+{
+ return trackState.tracking_score;
+}
+
+void TrackerDaSiamRPNImpl::softmax(const Mat& src, Mat& dst)
+{
+ Mat maxVal;
+ cv::max(src.row(1), src.row(0), maxVal);
+
+ src.row(1) -= maxVal;
+ src.row(0) -= maxVal;
+
+ exp(src, dst);
+
+ Mat sumVal = dst.row(0) + dst.row(1);
+ dst.row(0) = dst.row(0) / sumVal;
+ dst.row(1) = dst.row(1) / sumVal;
+}
+
+void TrackerDaSiamRPNImpl::elementMax(Mat& src)
+{
+ int* p = src.size.p;
+ int index[] = { 0, 0, 0, 0 };
+ for (int n = 0; n < *p; n++)
+ {
+ for (int k = 0; k < *(p + 1); k++)
+ {
+ for (int i = 0; i < *(p + 2); i++)
+ {
+ for (int j = 0; j < *(p + 3); j++)
+ {
+ index[0] = n, index[1] = k, index[2] = i, index[3] = j;
+ float& v = src.at<float>(index);
+ v = fmax(v, 1.0f / v);
+ }
+ }
+ }
+ }
+}
+
+Mat TrackerDaSiamRPNImpl::generateHanningWindow()
+{
+ Mat baseWindows, HanningWindows;
+
+ createHanningWindow(baseWindows, Size(trackState.scoreSize, trackState.scoreSize), CV_32F);
+ baseWindows = baseWindows.reshape(0, { 1, trackState.scoreSize, trackState.scoreSize });
+ HanningWindows = baseWindows.clone();
+ for (int i = 1; i < trackState.anchorNum; i++)
+ {
+ HanningWindows.push_back(baseWindows);
+ }
+
+ return HanningWindows;
+}
+
+Mat TrackerDaSiamRPNImpl::generateAnchors()
+{
+ int totalStride = trackState.totalStride, scales = trackState.scale, scoreSize = trackState.scoreSize;
+ std::vector<float> ratios = trackState.ratios;
+ std::vector<Rect2f> baseAnchors;
+ int anchorNum = int(ratios.size());
+ int size = totalStride * totalStride;
+
+ float ori = -(float(scoreSize / 2)) * float(totalStride);
+
+ for (auto i = 0; i < anchorNum; i++)
+ {
+ int ws = int(sqrt(size / ratios[i]));
+ int hs = int(ws * ratios[i]);
+
+ float wws = float(ws) * scales;
+ float hhs = float(hs) * scales;
+ Rect2f anchor = { 0, 0, wws, hhs };
+ baseAnchors.push_back(anchor);
+ }
+
+ int anchorIndex[] = { 0, 0, 0, 0 };
+ const int sizes[] = { 4, (int)ratios.size(), scoreSize, scoreSize };
+ Mat anchors(4, sizes, CV_32F);
+
+ for (auto i = 0; i < scoreSize; i++)
+ {
+ for (auto j = 0; j < scoreSize; j++)
+ {
+ for (auto k = 0; k < anchorNum; k++)
+ {
+ anchorIndex[0] = 1, anchorIndex[1] = k, anchorIndex[2] = i, anchorIndex[3] = j;
+ anchors.at<float>(anchorIndex) = ori + totalStride * i;
+
+ anchorIndex[0] = 0;
+ anchors.at<float>(anchorIndex) = ori + totalStride * j;
+
+ anchorIndex[0] = 2;
+ anchors.at<float>(anchorIndex) = baseAnchors[k].width;
+
+ anchorIndex[0] = 3;
+ anchors.at<float>(anchorIndex) = baseAnchors[k].height;
+ }
+ }
+ }
+
+ return anchors;
+}
+
+Mat TrackerDaSiamRPNImpl::getSubwindow(Mat& img, const Rect2f& targetBox, float originalSize, Scalar avgChans)
+{
+ Mat zCrop, dst;
+ Size imgSize = img.size();
+ float c = (originalSize + 1) / 2;
+ float xMin = (float)cvRound(targetBox.x - c);
+ float xMax = xMin + originalSize - 1;
+ float yMin = (float)cvRound(targetBox.y - c);
+ float yMax = yMin + originalSize - 1;
+
+ int leftPad = (int)(fmax(0., -xMin));
+ int topPad = (int)(fmax(0., -yMin));
+ int rightPad = (int)(fmax(0., xMax - imgSize.width + 1));
+ int bottomPad = (int)(fmax(0., yMax - imgSize.height + 1));
+
+ xMin = xMin + leftPad;
+ xMax = xMax + leftPad;
+ yMax = yMax + topPad;
+ yMin = yMin + topPad;
+
+ if (topPad == 0 && bottomPad == 0 && leftPad == 0 && rightPad == 0)
+ {
+ img(Rect(int(xMin), int(yMin), int(xMax - xMin + 1), int(yMax - yMin + 1))).copyTo(zCrop);
+ }
+ else
+ {
+ copyMakeBorder(img, dst, topPad, bottomPad, leftPad, rightPad, BORDER_CONSTANT, avgChans);
+ dst(Rect(int(xMin), int(yMin), int(xMax - xMin + 1), int(yMax - yMin + 1))).copyTo(zCrop);
+ }
+
+ return zCrop;
+}
+Ptr<TrackerDaSiamRPN> TrackerDaSiamRPN::create(const TrackerDaSiamRPN::Params& parameters)
+{
+ return makePtr<TrackerDaSiamRPNImpl>(parameters);
+}
+
+#else // OPENCV_HAVE_DNN
+Ptr<TrackerDaSiamRPN> TrackerDaSiamRPN::create(const TrackerDaSiamRPN::Params& parameters)
+{
+ (void)(parameters);
+ CV_Error(cv::Error::StsNotImplemented, "to use GOTURN, the tracking module needs to be built with opencv_dnn !");
+}
+#endif // OPENCV_HAVE_DNN
+}
}
}
+TEST(DaSiamRPN, memory_usage)
+{
+ cv::Rect roi(145, 70, 85, 85);
+
+ std::string model = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_model.onnx", false);
+ std::string kernel_r1 = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_kernel_r1.onnx", false);
+ std::string kernel_cls1 = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_kernel_cls1.onnx", false);
+ cv::TrackerDaSiamRPN::Params params;
+ params.model = model;
+ params.kernel_r1 = kernel_r1;
+ params.kernel_cls1 = kernel_cls1;
+ cv::Ptr<Tracker> tracker = TrackerDaSiamRPN::create(params);
+
+ string inputVideo = cvtest::findDataFile("tracking/david/data/david.webm");
+ cv::VideoCapture video(inputVideo);
+ ASSERT_TRUE(video.isOpened()) << inputVideo;
+
+ cv::Mat frame;
+ video >> frame;
+ ASSERT_FALSE(frame.empty()) << inputVideo;
+ tracker->init(frame, roi);
+ string ground_truth_bb;
+ for (int nframes = 0; nframes < 15; ++nframes)
+ {
+ std::cout << "Frame: " << nframes << std::endl;
+ video >> frame;
+ bool res = tracker->update(frame, roi);
+ ASSERT_TRUE(res);
+ std::cout << "Predicted ROI: " << roi << std::endl;
+ }
+}
+
}} // namespace opencv_test::
opencv_core
opencv_imgproc
opencv_dnn
+ opencv_video
opencv_imgcodecs
opencv_videoio
opencv_highgui)
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
+#include <opencv2/video.hpp>
using namespace cv;
using namespace cv::dnn;
"3: VPU }"
;
-// Initial parameters of the model
-struct trackerConfig
-{
- float windowInfluence = 0.43f;
- float lr = 0.4f;
- int scale = 8;
- bool swapRB = false;
- int totalStride = 8;
- float penaltyK = 0.055f;
- int exemplarSize = 127;
- int instanceSize = 271;
- float contextAmount = 0.5f;
- std::vector<float> ratios = { 0.33f, 0.5f, 1.0f, 2.0f, 3.0f };
- int anchorNum = int(ratios.size());
- Mat anchors;
- Mat windows;
- Scalar avgChans;
- Size imgSize = { 0, 0 };
- Rect2f targetBox = { 0, 0, 0, 0 };
- int scoreSize = (instanceSize - exemplarSize) / totalStride + 1;
-
- void update_scoreSize()
- {
- scoreSize = int((instanceSize - exemplarSize) / totalStride + 1);
- }
-};
-
-static void softmax(const Mat& src, Mat& dst);
-static void elementMax(Mat& src);
-static Mat generateHanningWindow(const trackerConfig& trackState);
-static Mat generateAnchors(trackerConfig& trackState);
-static Mat getSubwindow(Mat& img, const Rect2f& targetBox, float originalSize, Scalar avgChans);
-static float trackerEval(Mat img, trackerConfig& trackState, Net& siamRPN);
-static void trackerInit(Mat img, trackerConfig& trackState, Net& siamRPN, Net& siamKernelR1, Net& siamKernelCL1);
-
-template <typename T> static
-T sizeCal(const T& w, const T& h)
-{
- T pad = (w + h) * T(0.5);
- T sz2 = (w + pad) * (h + pad);
- return sqrt(sz2);
-}
-
-template <>
-Mat sizeCal(const Mat& w, const Mat& h)
-{
- Mat pad = (w + h) * 0.5;
- Mat sz2 = (w + pad).mul((h + pad));
-
- cv::sqrt(sz2, sz2);
- return sz2;
-}
-
static
int run(int argc, char** argv)
{
int backend = parser.get<int>("backend");
int target = parser.get<int>("target");
- // Read nets.
- Net siamRPN, siamKernelCL1, siamKernelR1;
+ Ptr<TrackerDaSiamRPN> tracker;
try
{
- siamRPN = readNet(samples::findFile(net));
- siamKernelCL1 = readNet(samples::findFile(kernel_cls1));
- siamKernelR1 = readNet(samples::findFile(kernel_r1));
+ TrackerDaSiamRPN::Params params;
+ params.model = samples::findFile(net);
+ params.kernel_cls1 = samples::findFile(kernel_cls1);
+ params.kernel_r1 = samples::findFile(kernel_r1);
+ params.backend = backend;
+ params.target = target;
+ tracker = TrackerDaSiamRPN::create(params);
}
catch (const cv::Exception& ee)
{
return 2;
}
- // Set model backend.
- siamRPN.setPreferableBackend(backend);
- siamRPN.setPreferableTarget(target);
- siamKernelR1.setPreferableBackend(backend);
- siamKernelR1.setPreferableTarget(target);
- siamKernelCL1.setPreferableBackend(backend);
- siamKernelCL1.setPreferableTarget(target);
-
const std::string winName = "DaSiamRPN";
namedWindow(winName, WINDOW_AUTOSIZE);
Rect selectRect = selectROI(winName, image_select);
std::cout << "ROI=" << selectRect << std::endl;
- trackerConfig trackState;
- trackState.update_scoreSize();
- trackState.targetBox = Rect2f(
- float(selectRect.x) + float(selectRect.width) * 0.5f, // FIXIT don't use center in Rect structures, it is confusing
- float(selectRect.y) + float(selectRect.height) * 0.5f,
- float(selectRect.width),
- float(selectRect.height)
- );
-
- // Set tracking template.
- trackerInit(image, trackState, siamRPN, siamKernelR1, siamKernelCL1);
+ tracker->init(image, selectRect);
TickMeter tickMeter;
break;
}
+ Rect rect;
+
tickMeter.start();
- float score = trackerEval(image, trackState, siamRPN);
+ bool ok = tracker->update(image, rect);
tickMeter.stop();
- Rect rect = {
- int(trackState.targetBox.x - int(trackState.targetBox.width / 2)),
- int(trackState.targetBox.y - int(trackState.targetBox.height / 2)),
- int(trackState.targetBox.width),
- int(trackState.targetBox.height)
- };
+ float score = tracker->getTrackingScore();
+
std::cout << "frame " << count <<
": predicted score=" << score <<
" rect=" << rect <<
std::endl;
Mat render_image = image.clone();
- rectangle(render_image, rect, Scalar(0, 255, 0), 2);
- std::string timeLabel = format("Inference time: %.2f ms", tickMeter.getTimeMilli());
- std::string scoreLabel = format("Score: %f", score);
- putText(render_image, timeLabel, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
- putText(render_image, scoreLabel, Point(0, 35), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
+ if (ok)
+ {
+ rectangle(render_image, rect, Scalar(0, 255, 0), 2);
+
+ std::string timeLabel = format("Inference time: %.2f ms", tickMeter.getTimeMilli());
+ std::string scoreLabel = format("Score: %f", score);
+ putText(render_image, timeLabel, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
+ putText(render_image, scoreLabel, Point(0, 35), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
+ }
imshow(winName, render_image);
return 0;
}
-Mat generateHanningWindow(const trackerConfig& trackState)
-{
- Mat baseWindows, HanningWindows;
-
- createHanningWindow(baseWindows, Size(trackState.scoreSize, trackState.scoreSize), CV_32F);
- baseWindows = baseWindows.reshape(0, { 1, trackState.scoreSize, trackState.scoreSize });
- HanningWindows = baseWindows.clone();
- for (int i = 1; i < trackState.anchorNum; i++)
- {
- HanningWindows.push_back(baseWindows);
- }
-
- return HanningWindows;
-}
-
-Mat generateAnchors(trackerConfig& trackState)
-{
- int totalStride = trackState.totalStride, scales = trackState.scale, scoreSize = trackState.scoreSize;
- std::vector<float> ratios = trackState.ratios;
- std::vector<Rect2f> baseAnchors;
- int anchorNum = int(ratios.size());
- int size = totalStride * totalStride;
-
- float ori = -(float(scoreSize / 2)) * float(totalStride);
-
- for (auto i = 0; i < anchorNum; i++)
- {
- int ws = int(sqrt(size / ratios[i]));
- int hs = int(ws * ratios[i]);
-
- float wws = float(ws) * scales;
- float hhs = float(hs) * scales;
- Rect2f anchor = { 0, 0, wws, hhs };
- baseAnchors.push_back(anchor);
- }
-
- int anchorIndex[] = { 0, 0, 0, 0 };
- const int sizes[] = { 4, (int)ratios.size(), scoreSize, scoreSize };
- Mat anchors(4, sizes, CV_32F);
-
- for (auto i = 0; i < scoreSize; i++)
- {
- for (auto j = 0; j < scoreSize; j++)
- {
- for (auto k = 0; k < anchorNum; k++)
- {
- anchorIndex[0] = 1, anchorIndex[1] = k, anchorIndex[2] = i, anchorIndex[3] = j;
- anchors.at<float>(anchorIndex) = ori + totalStride * i;
-
- anchorIndex[0] = 0;
- anchors.at<float>(anchorIndex) = ori + totalStride * j;
-
- anchorIndex[0] = 2;
- anchors.at<float>(anchorIndex) = baseAnchors[k].width;
-
- anchorIndex[0] = 3;
- anchors.at<float>(anchorIndex) = baseAnchors[k].height;
- }
- }
- }
-
- return anchors;
-}
-
-Mat getSubwindow(Mat& img, const Rect2f& targetBox, float originalSize, Scalar avgChans)
-{
- Mat zCrop, dst;
- Size imgSize = img.size();
- float c = (originalSize + 1) / 2;
- float xMin = (float)cvRound(targetBox.x - c);
- float xMax = xMin + originalSize - 1;
- float yMin = (float)cvRound(targetBox.y - c);
- float yMax = yMin + originalSize - 1;
-
- int leftPad = (int)(fmax(0., -xMin));
- int topPad = (int)(fmax(0., -yMin));
- int rightPad = (int)(fmax(0., xMax - imgSize.width + 1));
- int bottomPad = (int)(fmax(0., yMax - imgSize.height + 1));
-
- xMin = xMin + leftPad;
- xMax = xMax + leftPad;
- yMax = yMax + topPad;
- yMin = yMin + topPad;
-
- if (topPad == 0 && bottomPad == 0 && leftPad == 0 && rightPad == 0)
- {
- img(Rect(int(xMin), int(yMin), int(xMax - xMin + 1), int(yMax - yMin + 1))).copyTo(zCrop);
- }
- else
- {
- copyMakeBorder(img, dst, topPad, bottomPad, leftPad, rightPad, BORDER_CONSTANT, avgChans);
- dst(Rect(int(xMin), int(yMin), int(xMax - xMin + 1), int(yMax - yMin + 1))).copyTo(zCrop);
- }
-
- return zCrop;
-}
-
-void softmax(const Mat& src, Mat& dst)
-{
- Mat maxVal;
- cv::max(src.row(1), src.row(0), maxVal);
-
- src.row(1) -= maxVal;
- src.row(0) -= maxVal;
-
- exp(src, dst);
-
- Mat sumVal = dst.row(0) + dst.row(1);
- dst.row(0) = dst.row(0) / sumVal;
- dst.row(1) = dst.row(1) / sumVal;
-}
-
-void elementMax(Mat& src)
-{
- int* p = src.size.p;
- int index[] = { 0, 0, 0, 0 };
- for (int n = 0; n < *p; n++)
- {
- for (int k = 0; k < *(p + 1); k++)
- {
- for (int i = 0; i < *(p + 2); i++)
- {
- for (int j = 0; j < *(p + 3); j++)
- {
- index[0] = n, index[1] = k, index[2] = i, index[3] = j;
- float& v = src.at<float>(index);
- v = fmax(v, 1.0f / v);
- }
- }
- }
- }
-}
-
-float trackerEval(Mat img, trackerConfig& trackState, Net& siamRPN)
-{
- Rect2f targetBox = trackState.targetBox;
-
- float wc = targetBox.height + trackState.contextAmount * (targetBox.width + targetBox.height);
- float hc = targetBox.width + trackState.contextAmount * (targetBox.width + targetBox.height);
-
- float sz = sqrt(wc * hc);
- float scaleZ = trackState.exemplarSize / sz;
-
- float searchSize = float((trackState.instanceSize - trackState.exemplarSize) / 2);
- float pad = searchSize / scaleZ;
- float sx = sz + 2 * pad;
-
- Mat xCrop = getSubwindow(img, targetBox, (float)cvRound(sx), trackState.avgChans);
-
- static Mat blob;
- std::vector<Mat> outs;
- std::vector<String> outNames;
- Mat delta, score;
- Mat sc, rc, penalty, pscore;
-
- blobFromImage(xCrop, blob, 1.0, Size(trackState.instanceSize, trackState.instanceSize), Scalar(), trackState.swapRB, false, CV_32F);
-
- siamRPN.setInput(blob);
-
- outNames = siamRPN.getUnconnectedOutLayersNames();
- siamRPN.forward(outs, outNames);
-
- delta = outs[0];
- score = outs[1];
-
- score = score.reshape(0, { 2, trackState.anchorNum, trackState.scoreSize, trackState.scoreSize });
- delta = delta.reshape(0, { 4, trackState.anchorNum, trackState.scoreSize, trackState.scoreSize });
-
- softmax(score, score);
-
- targetBox.width *= scaleZ;
- targetBox.height *= scaleZ;
-
- score = score.row(1);
- score = score.reshape(0, { 5, 19, 19 });
-
- // Post processing
- delta.row(0) = delta.row(0).mul(trackState.anchors.row(2)) + trackState.anchors.row(0);
- delta.row(1) = delta.row(1).mul(trackState.anchors.row(3)) + trackState.anchors.row(1);
- exp(delta.row(2), delta.row(2));
- delta.row(2) = delta.row(2).mul(trackState.anchors.row(2));
- exp(delta.row(3), delta.row(3));
- delta.row(3) = delta.row(3).mul(trackState.anchors.row(3));
-
- sc = sizeCal(delta.row(2), delta.row(3)) / sizeCal(targetBox.width, targetBox.height);
- elementMax(sc);
-
- rc = delta.row(2).mul(1 / delta.row(3));
- rc = (targetBox.width / targetBox.height) / rc;
- elementMax(rc);
-
- // Calculating the penalty
- exp(((rc.mul(sc) - 1.) * trackState.penaltyK * (-1.0)), penalty);
- penalty = penalty.reshape(0, { trackState.anchorNum, trackState.scoreSize, trackState.scoreSize });
-
- pscore = penalty.mul(score);
- pscore = pscore * (1.0 - trackState.windowInfluence) + trackState.windows * trackState.windowInfluence;
-
- int bestID[] = { 0 };
- // Find the index of best score.
- minMaxIdx(pscore.reshape(0, { trackState.anchorNum * trackState.scoreSize * trackState.scoreSize, 1 }), 0, 0, 0, bestID);
- delta = delta.reshape(0, { 4, trackState.anchorNum * trackState.scoreSize * trackState.scoreSize });
- penalty = penalty.reshape(0, { trackState.anchorNum * trackState.scoreSize * trackState.scoreSize, 1 });
- score = score.reshape(0, { trackState.anchorNum * trackState.scoreSize * trackState.scoreSize, 1 });
-
- int index[] = { 0, bestID[0] };
- Rect2f resBox = { 0, 0, 0, 0 };
-
- resBox.x = delta.at<float>(index) / scaleZ;
- index[0] = 1;
- resBox.y = delta.at<float>(index) / scaleZ;
- index[0] = 2;
- resBox.width = delta.at<float>(index) / scaleZ;
- index[0] = 3;
- resBox.height = delta.at<float>(index) / scaleZ;
-
- float lr = penalty.at<float>(bestID) * score.at<float>(bestID) * trackState.lr;
-
- resBox.x = resBox.x + targetBox.x;
- resBox.y = resBox.y + targetBox.y;
- targetBox.width /= scaleZ;
- targetBox.height /= scaleZ;
-
- resBox.width = targetBox.width * (1 - lr) + resBox.width * lr;
- resBox.height = targetBox.height * (1 - lr) + resBox.height * lr;
-
- resBox.x = float(fmax(0., fmin(float(trackState.imgSize.width), resBox.x)));
- resBox.y = float(fmax(0., fmin(float(trackState.imgSize.height), resBox.y)));
- resBox.width = float(fmax(10., fmin(float(trackState.imgSize.width), resBox.width)));
- resBox.height = float(fmax(10., fmin(float(trackState.imgSize.height), resBox.height)));
-
- trackState.targetBox = resBox;
- return score.at<float>(bestID);
-}
-
-void trackerInit(Mat img, trackerConfig& trackState, Net& siamRPN, Net& siamKernelR1, Net& siamKernelCL1)
-{
- Rect2f targetBox = trackState.targetBox;
- Mat anchors = generateAnchors(trackState);
- trackState.anchors = anchors;
-
- Mat windows = generateHanningWindow(trackState);
-
- trackState.windows = windows;
- trackState.imgSize = img.size();
-
- trackState.avgChans = mean(img);
- float wc = targetBox.width + trackState.contextAmount * (targetBox.width + targetBox.height);
- float hc = targetBox.height + trackState.contextAmount * (targetBox.width + targetBox.height);
- float sz = (float)cvRound(sqrt(wc * hc));
-
- Mat zCrop = getSubwindow(img, targetBox, sz, trackState.avgChans);
- static Mat blob;
-
- blobFromImage(zCrop, blob, 1.0, Size(trackState.exemplarSize, trackState.exemplarSize), Scalar(), trackState.swapRB, false, CV_32F);
- siamRPN.setInput(blob);
- Mat out1;
- siamRPN.forward(out1, "63");
-
- siamKernelCL1.setInput(out1);
- siamKernelR1.setInput(out1);
-
- Mat cls1 = siamKernelCL1.forward();
- Mat r1 = siamKernelR1.forward();
- std::vector<int> r1_shape = { 20, 256, 4, 4 }, cls1_shape = { 10, 256, 4, 4 };
-
- siamRPN.setParam(siamRPN.getLayerId("65"), 0, r1.reshape(0, r1_shape));
- siamRPN.setParam(siamRPN.getLayerId("68"), 0, cls1.reshape(0, cls1_shape));
-}
int main(int argc, char **argv)
{
+++ /dev/null
-"""
-DaSiamRPN tracker.
-Original paper: https://arxiv.org/abs/1808.06048
-Link to original repo: https://github.com/foolwood/DaSiamRPN
-Links to onnx models:
-network: https://www.dropbox.com/s/rr1lk9355vzolqv/dasiamrpn_model.onnx?dl=0
-kernel_r1: https://www.dropbox.com/s/999cqx5zrfi7w4p/dasiamrpn_kernel_r1.onnx?dl=0
-kernel_cls1: https://www.dropbox.com/s/qvmtszx5h339a0w/dasiamrpn_kernel_cls1.onnx?dl=0
-"""
-
-import numpy as np
-import cv2 as cv
-import argparse
-import sys
-
-class DaSiamRPNTracker:
- # Initialization of used values, initial bounding box, used network
- def __init__(self, net="dasiamrpn_model.onnx", kernel_r1="dasiamrpn_kernel_r1.onnx", kernel_cls1="dasiamrpn_kernel_cls1.onnx"):
- self.windowing = "cosine"
- self.exemplar_size = 127
- self.instance_size = 271
- self.total_stride = 8
- self.score_size = (self.instance_size - self.exemplar_size) // self.total_stride + 1
- self.context_amount = 0.5
- self.ratios = [0.33, 0.5, 1, 2, 3]
- self.scales = [8, ]
- self.anchor_num = len(self.ratios) * len(self.scales)
- self.penalty_k = 0.055
- self.window_influence = 0.42
- self.lr = 0.295
- self.score = []
- if self.windowing == "cosine":
- self.window = np.outer(np.hanning(self.score_size), np.hanning(self.score_size))
- elif self.windowing == "uniform":
- self.window = np.ones((self.score_size, self.score_size))
- self.window = np.tile(self.window.flatten(), self.anchor_num)
- # Loading network`s and kernel`s models
- self.net = cv.dnn.readNet(net)
- self.kernel_r1 = cv.dnn.readNet(kernel_r1)
- self.kernel_cls1 = cv.dnn.readNet(kernel_cls1)
-
- def init(self, im, init_bb):
- target_pos, target_sz = np.array([init_bb[0], init_bb[1]]), np.array([init_bb[2], init_bb[3]])
- self.im_h = im.shape[0]
- self.im_w = im.shape[1]
- self.target_pos = target_pos
- self.target_sz = target_sz
- self.avg_chans = np.mean(im, axis=(0, 1))
-
- # When we trying to generate ONNX model from the pre-trained .pth model
- # we are using only one state of the network. In our case used state
- # with big bounding box, so we were forced to add assertion for
- # too small bounding boxes - current state of the network can not
- # work properly with such small bounding boxes
- if ((self.target_sz[0] * self.target_sz[1]) / float(self.im_h * self.im_w)) < 0.004:
- raise AssertionError(
- "Initializing BB is too small-try to restart tracker with larger BB")
-
- self.anchor = self.__generate_anchor()
- wc_z = self.target_sz[0] + self.context_amount * sum(self.target_sz)
- hc_z = self.target_sz[1] + self.context_amount * sum(self.target_sz)
- s_z = round(np.sqrt(wc_z * hc_z))
- z_crop = self.__get_subwindow_tracking(im, self.exemplar_size, s_z)
- z_crop = z_crop.transpose(2, 0, 1).reshape(1, 3, 127, 127).astype(np.float32)
- self.net.setInput(z_crop)
- z_f = self.net.forward('63')
- self.kernel_r1.setInput(z_f)
- r1 = self.kernel_r1.forward()
- self.kernel_cls1.setInput(z_f)
- cls1 = self.kernel_cls1.forward()
- r1 = r1.reshape(20, 256, 4, 4)
- cls1 = cls1.reshape(10, 256 , 4, 4)
- self.net.setParam(self.net.getLayerId('65'), 0, r1)
- self.net.setParam(self.net.getLayerId('68'), 0, cls1)
-
- # Сreating anchor for tracking bounding box
- def __generate_anchor(self):
- self.anchor = np.zeros((self.anchor_num, 4), dtype = np.float32)
- size = self.total_stride * self.total_stride
- count = 0
-
- for ratio in self.ratios:
- ws = int(np.sqrt(size / ratio))
- hs = int(ws * ratio)
- for scale in self.scales:
- wws = ws * scale
- hhs = hs * scale
- self.anchor[count] = [0, 0, wws, hhs]
- count += 1
-
- score_sz = int(self.score_size)
- self.anchor = np.tile(self.anchor, score_sz * score_sz).reshape((-1, 4))
- ori = - (score_sz / 2) * self.total_stride
- xx, yy = np.meshgrid([ori + self.total_stride * dx for dx in range(score_sz)], [ori + self.total_stride * dy for dy in range(score_sz)])
- xx, yy = np.tile(xx.flatten(), (self.anchor_num, 1)).flatten(), np.tile(yy.flatten(), (self.anchor_num, 1)).flatten()
- self.anchor[:, 0], self.anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
- return self.anchor
-
- # Function for updating tracker state
- def update(self, im):
- wc_z = self.target_sz[1] + self.context_amount * sum(self.target_sz)
- hc_z = self.target_sz[0] + self.context_amount * sum(self.target_sz)
- s_z = np.sqrt(wc_z * hc_z)
- scale_z = self.exemplar_size / s_z
- d_search = (self.instance_size - self.exemplar_size) / 2
- pad = d_search / scale_z
- s_x = round(s_z + 2 * pad)
-
- # Region preprocessing part
- x_crop = self.__get_subwindow_tracking(im, self.instance_size, s_x)
- x_crop = x_crop.transpose(2, 0, 1).reshape(1, 3, 271, 271).astype(np.float32)
- self.score = self.__tracker_eval(x_crop, scale_z)
- self.target_pos[0] = max(0, min(self.im_w, self.target_pos[0]))
- self.target_pos[1] = max(0, min(self.im_h, self.target_pos[1]))
- self.target_sz[0] = max(10, min(self.im_w, self.target_sz[0]))
- self.target_sz[1] = max(10, min(self.im_h, self.target_sz[1]))
-
- cx, cy = self.target_pos
- w, h = self.target_sz
- updated_bb = (cx, cy, w, h)
- return True, updated_bb
-
- # Function for updating position of the bounding box
- def __tracker_eval(self, x_crop, scale_z):
- target_size = self.target_sz * scale_z
- self.net.setInput(x_crop)
- outNames = self.net.getUnconnectedOutLayersNames()
- outNames = ['66', '68']
- delta, score = self.net.forward(outNames)
- delta = np.transpose(delta, (1, 2, 3, 0))
- delta = np.ascontiguousarray(delta, dtype = np.float32)
- delta = np.reshape(delta, (4, -1))
- score = np.transpose(score, (1, 2, 3, 0))
- score = np.ascontiguousarray(score, dtype = np.float32)
- score = np.reshape(score, (2, -1))
- score = self.__softmax(score)[1, :]
- delta[0, :] = delta[0, :] * self.anchor[:, 2] + self.anchor[:, 0]
- delta[1, :] = delta[1, :] * self.anchor[:, 3] + self.anchor[:, 1]
- delta[2, :] = np.exp(delta[2, :]) * self.anchor[:, 2]
- delta[3, :] = np.exp(delta[3, :]) * self.anchor[:, 3]
-
- def __change(r):
- return np.maximum(r, 1./r)
-
- def __sz(w, h):
- pad = (w + h) * 0.5
- sz2 = (w + pad) * (h + pad)
- return np.sqrt(sz2)
-
- def __sz_wh(wh):
- pad = (wh[0] + wh[1]) * 0.5
- sz2 = (wh[0] + pad) * (wh[1] + pad)
- return np.sqrt(sz2)
-
- s_c = __change(__sz(delta[2, :], delta[3, :]) / (__sz_wh(target_size)))
- r_c = __change((target_size[0] / target_size[1]) / (delta[2, :] / delta[3, :]))
- penalty = np.exp(-(r_c * s_c - 1.) * self.penalty_k)
- pscore = penalty * score
- pscore = pscore * (1 - self.window_influence) + self.window * self.window_influence
- best_pscore_id = np.argmax(pscore)
- target = delta[:, best_pscore_id] / scale_z
- target_size /= scale_z
- lr = penalty[best_pscore_id] * score[best_pscore_id] * self.lr
- res_x = target[0] + self.target_pos[0]
- res_y = target[1] + self.target_pos[1]
- res_w = target_size[0] * (1 - lr) + target[2] * lr
- res_h = target_size[1] * (1 - lr) + target[3] * lr
- self.target_pos = np.array([res_x, res_y])
- self.target_sz = np.array([res_w, res_h])
- return score[best_pscore_id]
-
- def __softmax(self, x):
- x_max = x.max(0)
- e_x = np.exp(x - x_max)
- y = e_x / e_x.sum(axis = 0)
- return y
-
- # Reshaping cropped image for using in the model
- def __get_subwindow_tracking(self, im, model_size, original_sz):
- im_sz = im.shape
- c = (original_sz + 1) / 2
- context_xmin = round(self.target_pos[0] - c)
- context_xmax = context_xmin + original_sz - 1
- context_ymin = round(self.target_pos[1] - c)
- context_ymax = context_ymin + original_sz - 1
- left_pad = int(max(0., -context_xmin))
- top_pad = int(max(0., -context_ymin))
- right_pad = int(max(0., context_xmax - im_sz[1] + 1))
- bot_pad = int(max(0., context_ymax - im_sz[0] + 1))
- context_xmin += left_pad
- context_xmax += left_pad
- context_ymin += top_pad
- context_ymax += top_pad
- r, c, k = im.shape
-
- if any([top_pad, bot_pad, left_pad, right_pad]):
- te_im = np.zeros((
- r + top_pad + bot_pad, c + left_pad + right_pad, k), np.uint8)
- te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
- if top_pad:
- te_im[0:top_pad, left_pad:left_pad + c, :] = self.avg_chans
- if bot_pad:
- te_im[r + top_pad:, left_pad:left_pad + c, :] = self.avg_chans
- if left_pad:
- te_im[:, 0:left_pad, :] = self.avg_chans
- if right_pad:
- te_im[:, c + left_pad:, :] = self.avg_chans
- im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
- else:
- im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
-
- if not np.array_equal(model_size, original_sz):
- im_patch_original = cv.resize(im_patch_original, (model_size, model_size))
- return im_patch_original
-
-# Sample for using DaSiamRPN tracker
-def main():
- parser = argparse.ArgumentParser(description="Run tracker")
- parser.add_argument("--input", type=str, help="Full path to input (empty for camera)")
- parser.add_argument("--net", type=str, default="dasiamrpn_model.onnx", help="Full path to onnx model of net")
- parser.add_argument("--kernel_r1", type=str, default="dasiamrpn_kernel_r1.onnx", help="Full path to onnx model of kernel_r1")
- parser.add_argument("--kernel_cls1", type=str, default="dasiamrpn_kernel_cls1.onnx", help="Full path to onnx model of kernel_cls1")
- args = parser.parse_args()
- point1 = ()
- point2 = ()
- mark = True
- drawing = False
- cx, cy, w, h = 0.0, 0.0, 0, 0
- # Fucntion for drawing during videostream
- def get_bb(event, x, y, flag, param):
- nonlocal point1, point2, cx, cy, w, h, drawing, mark
-
- if event == cv.EVENT_LBUTTONDOWN:
- if not drawing:
- drawing = True
- point1 = (x, y)
- else:
- drawing = False
-
- elif event == cv.EVENT_MOUSEMOVE:
- if drawing:
- point2 = (x, y)
-
- elif event == cv.EVENT_LBUTTONUP:
- cx = point1[0] - (point1[0] - point2[0]) / 2
- cy = point1[1] - (point1[1] - point2[1]) / 2
- w = abs(point1[0] - point2[0])
- h = abs(point1[1] - point2[1])
- mark = False
-
- # Creating window for visualization
- cap = cv.VideoCapture(args.input if args.input else 0)
- cv.namedWindow("DaSiamRPN")
- cv.setMouseCallback("DaSiamRPN", get_bb)
-
- whitespace_key = 32
- while cv.waitKey(40) != whitespace_key:
- has_frame, frame = cap.read()
- if not has_frame:
- sys.exit(0)
- cv.imshow("DaSiamRPN", frame)
-
- while mark:
- twin = np.copy(frame)
- if point1 and point2:
- cv.rectangle(twin, point1, point2, (0, 255, 255), 3)
- cv.imshow("DaSiamRPN", twin)
- cv.waitKey(40)
-
- init_bb = (cx, cy, w, h)
- tracker = DaSiamRPNTracker(args.net, args.kernel_r1, args.kernel_cls1)
- tracker.init(frame, init_bb)
-
- # Tracking loop
- while cap.isOpened():
- has_frame, frame = cap.read()
- if not has_frame:
- sys.exit(0)
- _, new_bb = tracker.update(frame)
- cx, cy, w, h = new_bb
- cv.rectangle(frame, (int(cx - w // 2), int(cy - h // 2)), (int(cx - w // 2) + int(w), int(cy - h // 2) + int(h)),(0, 255, 255), 3)
- cv.imshow("DaSiamRPN", frame)
- key = cv.waitKey(1)
- if key == ord("q"):
- break
-
- cap.release()
- cv.destroyAllWindows()
-
-if __name__ == "__main__":
- main()
'''
Tracker demo
+For usage download models by following links
+For GOTURN:
+ goturn.prototxt and goturn.caffemodel: https://github.com/opencv/opencv_extra/tree/c4219d5eb3105ed8e634278fad312a1a8d2c182d/testdata/tracking
+For DaSiamRPN:
+ network: https://www.dropbox.com/s/rr1lk9355vzolqv/dasiamrpn_model.onnx?dl=0
+ kernel_r1: https://www.dropbox.com/s/999cqx5zrfi7w4p/dasiamrpn_kernel_r1.onnx?dl=0
+ kernel_cls1: https://www.dropbox.com/s/qvmtszx5h339a0w/dasiamrpn_kernel_cls1.onnx?dl=0
+
USAGE:
- tracker.py [<video_source>]
+ tracker.py [-h] [--input INPUT] [--tracker_algo TRACKER_ALGO]
+ [--goturn GOTURN] [--goturn_model GOTURN_MODEL]
+ [--dasiamrpn_net DASIAMRPN_NET]
+ [--dasiamrpn_kernel_r1 DASIAMRPN_KERNEL_R1]
+ [--dasiamrpn_kernel_cls1 DASIAMRPN_KERNEL_CLS1]
+ [--dasiamrpn_backend DASIAMRPN_BACKEND]
+ [--dasiamrpn_target DASIAMRPN_TARGET]
'''
# Python 2/3 compatibility
import numpy as np
import cv2 as cv
+import argparse
from video import create_capture, presets
class App(object):
- def initializeTracker(self, image):
+ def __init__(self, args):
+ self.args = args
+
+ def initializeTracker(self, image, trackerAlgorithm):
while True:
+ if trackerAlgorithm == 'mil':
+ tracker = cv.TrackerMIL_create()
+ elif trackerAlgorithm == 'goturn':
+ params = cv.TrackerGOTURN_Params()
+ params.modelTxt = self.args.goturn
+ params.modelBin = self.args.goturn_model
+ tracker = cv.TrackerGOTURN_create(params)
+ elif trackerAlgorithm == 'dasiamrpn':
+ params = cv.TrackerDaSiamRPN_Params()
+ params.model = self.args.dasiamrpn_net
+ params.kernel_cls1 = self.args.dasiamrpn_kernel_cls1
+ params.kernel_r1 = self.args.dasiamrpn_kernel_r1
+ tracker = cv.TrackerDaSiamRPN_create(params)
+ else:
+ sys.exit("Tracker {} is not recognized. Please use one of three available: mil, goturn, dasiamrpn.".format(trackerAlgorithm))
+
print('==> Select object ROI for tracker ...')
bbox = cv.selectROI('tracking', image)
print('ROI: {}'.format(bbox))
- tracker = cv.TrackerMIL_create()
try:
tracker.init(image, bbox)
except Exception as e:
return tracker
def run(self):
- videoPath = sys.argv[1] if len(sys.argv) >= 2 else 'vtest.avi'
+ videoPath = self.args.input
+ trackerAlgorithm = self.args.tracker_algo
camera = create_capture(videoPath, presets['cube'])
if not camera.isOpened():
sys.exit("Can't open video stream: {}".format(videoPath))
assert image is not None
cv.namedWindow('tracking')
- tracker = self.initializeTracker(image)
+ tracker = self.initializeTracker(image, trackerAlgorithm)
print("==> Tracking is started. Press 'SPACE' to re-initialize tracker or 'ESC' for exit...")
if __name__ == '__main__':
print(__doc__)
- App().run()
+ parser = argparse.ArgumentParser(description="Run tracker")
+ parser.add_argument("--input", type=str, default="vtest.avi", help="Path to video source")
+ parser.add_argument("--tracker_algo", type=str, default="mil", help="One of three available tracking algorithms: mil, goturn, dasiamrpn")
+ parser.add_argument("--goturn", type=str, default="goturn.prototxt", help="Path to GOTURN architecture")
+ parser.add_argument("--goturn_model", type=str, default="goturn.caffemodel", help="Path to GOTERN model")
+ parser.add_argument("--dasiamrpn_net", type=str, default="dasiamrpn_model.onnx", help="Path to onnx model of DaSiamRPN net")
+ parser.add_argument("--dasiamrpn_kernel_r1", type=str, default="dasiamrpn_kernel_r1.onnx", help="Path to onnx model of DaSiamRPN kernel_r1")
+ parser.add_argument("--dasiamrpn_kernel_cls1", type=str, default="dasiamrpn_kernel_cls1.onnx", help="Path to onnx model of DaSiamRPN kernel_cls1")
+ parser.add_argument("--dasiamrpn_backend", type=int, default=0, help="Choose one of computation backends:\
+ 0: automatically (by default),\
+ 1: Halide language (http://halide-lang.org/),\
+ 2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit),\
+ 3: OpenCV implementation")
+ parser.add_argument("--dasiamrpn_target", type=int, default=0, help="Choose one of target computation devices:\
+ 0: CPU target (by default),\
+ 1: OpenCL,\
+ 2: OpenCL fp16 (half-float precision),\
+ 3: VPU")
+ args = parser.parse_args()
+ App(args).run()
cv.destroyAllWindows()