{
enum { COPY_ON_MAP=1, HOST_COPY_OBSOLETE=2,
DEVICE_COPY_OBSOLETE=4, TEMP_UMAT=8, TEMP_COPIED_UMAT=24,
- USER_ALLOCATED=32, DEVICE_MEM_MAPPED=64};
+ USER_ALLOCATED=32, DEVICE_MEM_MAPPED=64,
+ ASYNC_CLEANUP=128
+ };
UMatData(const MatAllocator* allocator);
~UMatData();
#include "precomp.hpp"
#include <list>
#include <map>
+#include <deque>
#include <string>
#include <sstream>
#include <iostream> // std::cerr
if( u[i] )
{
if( CV_XADD(&u[i]->urefcount, -1) == 1 )
+ {
+ u[i]->flags |= UMatData::ASYNC_CLEANUP;
u[i]->currAllocator->deallocate(u[i]);
+ }
u[i] = 0;
}
nu = 0;
matStdAllocator = Mat::getDefaultAllocator();
}
+ ~OpenCLAllocator()
+ {
+ flushCleanupQueue();
+ }
UMatData* defaultAllocate(int dims, const int* sizes, int type, void* data, size_t* step,
int flags, UMatUsageFlags usageFlags) const
}
Context& ctx = Context::getDefault();
+ flushCleanupQueue();
int createFlags = 0, flags0 = 0;
getBestFlags(ctx, flags, usageFlags, createFlags, flags0);
if(!u)
return false;
+ flushCleanupQueue();
+
UMatDataAutoLock lock(u);
if(u->handle == 0)
CV_Assert(u->handle != 0);
CV_Assert(u->mapcount == 0);
+
+ if (u->flags & UMatData::ASYNC_CLEANUP)
+ addToCleanupQueue(u);
+ else
+ deallocate_(u);
+ }
+
+ void deallocate_(UMatData* u) const
+ {
if(u->tempUMat())
{
CV_Assert(u->origdata);
}
MatAllocator* matStdAllocator;
+
+ mutable cv::Mutex cleanupQueueMutex;
+ mutable std::deque<UMatData*> cleanupQueue;
+
+ void flushCleanupQueue() const
+ {
+ if (!cleanupQueue.empty())
+ {
+ std::deque<UMatData*> q;
+ {
+ cv::AutoLock lock(cleanupQueueMutex);
+ q.swap(cleanupQueue);
+ }
+ for (std::deque<UMatData*>::const_iterator i = q.begin(); i != q.end(); ++i)
+ {
+ deallocate_(*i);
+ }
+ }
+ }
+ void addToCleanupQueue(UMatData* u) const
+ {
+ //TODO: Validation check: CV_Assert(!u->tempUMat());
+ {
+ cv::AutoLock lock(cleanupQueueMutex);
+ cleanupQueue.push_back(u);
+ }
+ }
};
MatAllocator* getOpenCLAllocator()