tbm_error_e *error);
/**
+ * @brief allocate the bo data for GPU which support tiled format
+ * @param[in] bufmgr_data : The backend bufmgr data
+ * @param[in] width : the width of surface
+ * @param[in] height : the height of surface
+ * @param[in] bpp: bpp of the surface format
+ * @param[in] format : the format of surface
+ * @param[in] flags : the flags of memory type
+ * @param[in] bo_idx : the bo index of the surface
+ * @param[out] error #TBM_ERROR_NONE if success. Otherwise, error value.
+ * @return pointer of the bo private.
+ */
+ tbm_backend_bo_data *(*bufmgr_alloc_bo_with_tiled_format)(tbm_backend_bufmgr_data *bufmgr_data,
+ int width, int height, int bpp, int format,
+ tbm_bo_memory_type flags, int bo_idx,
+ tbm_error_e *error);
+
+ /**
* @brief import the bo data associated with the tdm_fd(prime fd).
* @remarks tbm_fd must be free by user.
* @remarks It must be supported at the backend module. To sharing the tdm_fd(prime fd)
surf->bos[i] = bo;
/* LCOV_EXCL_STOP */
+ } else if (bufmgr->bufmgr_func->bufmgr_alloc_bo_with_tiled_format) {
+ bo = calloc(1, sizeof(struct _tbm_bo));
+ if (!bo) {
+ TBM_ERR("fail to alloc bo struct\n");
+ _tbm_set_last_result(TBM_ERROR_OUT_OF_MEMORY);
+ goto alloc_bo_fail;
+ }
+
+ bo->bufmgr = surf->bufmgr;
+
+ _tbm_bufmgr_mutex_lock();
+
+ bo_data = bufmgr->bufmgr_func->bufmgr_alloc_bo_with_tiled_format(bufmgr->bufmgr_data, width, height, surf->info.bpp/8, format, flags, i, &error);
+ if (!bo_data) {
+ TBM_ERR("fail to alloc bo priv. error(%d)\n", error);
+ _tbm_set_last_result(error);
+ free(bo);
+ _tbm_bufmgr_mutex_unlock();
+ goto alloc_bo_fail;
+ }
+ bo->bo_data = bo_data;
+
+ bo->ref_cnt = 1;
+ bo->flags = flags;
+ LIST_INITHEAD(&bo->user_data_list);
+
+ LIST_ADD(&bo->item_link, &surf->bufmgr->bo_list);
+
+ _tbm_bufmgr_mutex_unlock();
+
+ surf->bos[i] = bo;
+
} else {
surf->bos[i] = tbm_bo_alloc(bufmgr, bo_size, flags);
if (!surf->bos[i]) {