bo->bo_reuse = TRUE;
pthread_mutex_unlock(&table_lock);
+ VG_BO_ALLOC(bo);
+
return bo;
}
bo = bo_from_handle(dev, size, handle);
+ VG_BO_ALLOC(bo);
+
out_unlock:
pthread_mutex_unlock(&table_lock);
bo = bo_from_handle(dev, size, handle);
+ VG_BO_ALLOC(bo);
+
out_unlock:
pthread_mutex_unlock(&table_lock);
goto out_unlock;
bo = bo_from_handle(dev, req.size, req.handle);
- if (bo)
+ if (bo) {
set_name(bo, name);
+ VG_BO_ALLOC(bo);
+ }
out_unlock:
pthread_mutex_unlock(&table_lock);
/* Called under table_lock */
drm_private void bo_del(struct fd_bo *bo)
{
+ VG_BO_FREE(bo);
+
if (bo->map)
drm_munmap(bo->map, bo->size);
#include "freedreno_drmif.h"
#include "freedreno_priv.h"
-
drm_private void bo_del(struct fd_bo *bo);
drm_private extern pthread_mutex_t table_lock;
if (time && ((time - bo->free_time) <= 1))
break;
+ VG_BO_OBTAIN(bo);
list_del(&bo->list);
bo_del(bo);
}
*size = bucket->size;
bo = find_in_bucket(bucket, flags);
if (bo) {
+ VG_BO_OBTAIN(bo);
if (bo->funcs->madvise(bo, TRUE) <= 0) {
/* we've lost the backing pages, delete and try again: */
pthread_mutex_lock(&table_lock);
clock_gettime(CLOCK_MONOTONIC, &time);
bo->free_time = time.tv_sec;
+ VG_BO_RELEASE(bo);
list_addtail(&bo->list, &bucket->list);
fd_bo_cache_cleanup(cache, time.tv_sec);
struct fd_bo_cache bo_cache;
int closefd; /* call close(fd) upon destruction */
+
+ /* just for valgrind: */
+ int bo_size;
};
drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
return ((char *)end) - ((char *)start);
}
+#ifdef HAVE_VALGRIND
+# include <memcheck.h>
+
+/*
+ * For tracking the backing memory (if valgrind enabled, we force a mmap
+ * for the purposes of tracking)
+ */
+static inline void VG_BO_ALLOC(struct fd_bo *bo)
+{
+ if (bo && RUNNING_ON_VALGRIND) {
+ VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
+ }
+}
+
+static inline void VG_BO_FREE(struct fd_bo *bo)
+{
+ VALGRIND_FREELIKE_BLOCK(bo->map, 0);
+}
+
+/*
+ * For tracking bo structs that are in the buffer-cache, so that valgrind
+ * doesn't attribute ownership to the first one to allocate the recycled
+ * bo.
+ *
+ * Note that the list_head in fd_bo is used to track the buffers in cache
+ * so disable error reporting on the range while they are in cache so
+ * valgrind doesn't squawk about list traversal.
+ *
+ */
+static inline void VG_BO_RELEASE(struct fd_bo *bo)
+{
+ if (RUNNING_ON_VALGRIND) {
+ VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
+ VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
+ VALGRIND_FREELIKE_BLOCK(bo->map, 0);
+ }
+}
+static inline void VG_BO_OBTAIN(struct fd_bo *bo)
+{
+ if (RUNNING_ON_VALGRIND) {
+ VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
+ VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
+ VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
+ }
+}
+#else
+static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
+static inline void VG_BO_FREE(struct fd_bo *bo) {}
+static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
+static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
+#endif
+
+
#endif /* FREEDRENO_PRIV_H_ */