#define TBM_EXYNOS_DEBUG(...)
#endif
+#define STRERR_BUFSIZE 128
+
#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
{
struct tgl_ver_data data;
int err;
+ char buf[STRERR_BUFSIZE];
err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
if (err) {
- TBM_EXYNOS_ERROR("error(%s) %s:%d\n", strerror(errno));
+ TBM_EXYNOS_ERROR("error(%s) %s:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE));
return 0;
}
{
struct tgl_reg_data data;
int err;
+ char buf[STRERR_BUFSIZE];
data.key = key;
data.timeout_ms = 1000;
err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
if (err) {
- TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror(errno), key);
+ TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE), key);
return 0;
}
{
struct tgl_reg_data data;
int err;
+ char buf[STRERR_BUFSIZE];
data.key = key;
err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
if (err) {
- TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror(errno), key);
+ TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE), key);
return 0;
}
struct tgl_lock_data data;
enum tgl_type_data tgl_type;
int err;
+ char buf[STRERR_BUFSIZE];
switch (opt) {
case TBM_OPTION_READ:
err = ioctl(fd, TGL_IOCTL_LOCK, &data);
if (err) {
TBM_EXYNOS_ERROR("error(%s) key:%d opt:%d\n",
- strerror(errno), key, opt);
+ strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
return 0;
}
{
struct tgl_lock_data data;
int err;
+ char buf[STRERR_BUFSIZE];
data.key = key;
data.type = TGL_TYPE_NONE;
err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
if (err) {
TBM_EXYNOS_ERROR("error(%s) key:%d\n",
- strerror(errno), key);
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
return 0;
}
{
struct tgl_usr_data data;
int err;
+ char buf[STRERR_BUFSIZE];
data.key = key;
data.data1 = val;
err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
if (err) {
TBM_EXYNOS_ERROR("error(%s) key:%d\n",
- strerror(errno), key);
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
return 0;
}
{
struct tgl_usr_data data = { 0, };
int err;
+ char buf[STRERR_BUFSIZE];
data.key = key;
err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
if (err) {
TBM_EXYNOS_ERROR("error(%s) key:%d\n",
- strerror(errno), key);
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
return 0;
}
{
tbm_bo_exynos bo_exynos;
tbm_bufmgr_exynos bufmgr_exynos;
+ char buf[STRERR_BUFSIZE];
if (!bo)
return;
if (bo_exynos->pBase) {
if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
TBM_EXYNOS_ERROR("bo:%p fail to munmap(%s)\n",
- bo, strerror(errno));
+ bo, strerror_r(errno, buf, STRERR_BUFSIZE));
}
}
arg.handle = bo_exynos->gem;
if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
TBM_EXYNOS_ERROR("bo:%p fail to gem close.(%s)\n",
- bo, strerror(errno));
+ bo, strerror_r(errno, buf, STRERR_BUFSIZE));
}
free(bo_exynos);
PrivGem *privGem = NULL;
unsigned int name;
int ret;
+ char buf[STRERR_BUFSIZE];
bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
arg.flags = 0;
if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
TBM_EXYNOS_ERROR("bo:%p Cannot get gem handle from fd:%d (%s)\n",
- bo, arg.fd, strerror(errno));
+ bo, arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
return NULL;
}
gem = arg.handle;
name = _get_name(bufmgr_exynos->fd, gem);
if (!name) {
TBM_EXYNOS_ERROR("bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
- bo, gem, key, strerror(errno));
+ bo, gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
return 0;
}
&info,
sizeof(struct drm_exynos_gem_info))) {
TBM_EXYNOS_ERROR("bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
- bo, gem, key, strerror(errno));
+ bo, gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
return 0;
}
tbm_bo_exynos bo_exynos;
int ret;
+ char buf[STRERR_BUFSIZE];
bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
if (ret) {
TBM_EXYNOS_ERROR("bo:%p Cannot dmabuf=%d (%s)\n",
- bo, bo_exynos->gem, strerror(errno));
+ bo, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
return (tbm_fd) ret;
}
struct dma_buf_fence fence;
struct flock filelock;
int ret = 0;
+ char buf[STRERR_BUFSIZE];
if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
TBM_EXYNOS_DEBUG("Not support device type,\n");
/* Check if the tbm manager supports dma fence or not. */
if (!bufmgr_exynos->use_dma_fence) {
- TBM_EXYNOS_ERROR("Not support DMA FENCE(%s)\n", strerror(errno));
+ TBM_EXYNOS_ERROR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
return 0;
}
if (device == TBM_DEVICE_3D) {
ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
if (ret < 0) {
- TBM_EXYNOS_ERROR("Cannot set GET FENCE(%s)\n", strerror(errno));
+ TBM_EXYNOS_ERROR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
return 0;
}
} else {
struct flock filelock;
unsigned int dma_type = 0;
int ret = 0;
+ char buf[STRERR_BUFSIZE];
bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
if (dma_type) {
ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
if (ret < 0) {
- TBM_EXYNOS_ERROR("Can not set PUT FENCE(%s)\n", strerror(errno));
+ TBM_EXYNOS_ERROR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
return 0;
}
} else {