cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_90:
+ cfg |= GSC_IN_ROT_90;
if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_90_XFLIP;
- else if (rotation & DRM_MODE_REFLECT_X)
- cfg |= GSC_IN_ROT_90_YFLIP;
- else
- cfg |= GSC_IN_ROT_90;
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_180:
cfg |= GSC_IN_ROT_180;
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
- unsigned long size = height * buf->buf.pitch[i] +
- buf->buf.offset[i];
+ unsigned long size = height * buf->buf.pitch[i];
struct drm_gem_object *obj = drm_gem_object_lookup(filp,
buf->buf.gem_id[i]);
if (!obj) {
}
buf->exynos_gem[i] = to_exynos_gem(obj);
- if (size > buf->exynos_gem[i]->size) {
+ if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
ret = -EINVAL;
goto gem_free;
unsigned int num_limits, bool swap)
{
const struct drm_exynos_ipp_limit_val *lh, *lv;
+ int dw, dh;
for (; num_limits; limits++, num_limits--)
if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
lh = (!swap) ? &limits->h : &limits->v;
lv = (!swap) ? &limits->v : &limits->h;
+ dw = (!swap) ? dst->w : dst->h;
+ dh = (!swap) ? dst->h : dst->w;
- if (!__scale_limit_check(src->w, dst->w, lh->min, lh->max) ||
- !__scale_limit_check(src->h, dst->h, lv->min, lv->max))
+ if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
+ !__scale_limit_check(src->h, dh, lv->min, lv->max))
return -EINVAL;
return 0;
}
ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
dst_fmt->num_limits,
- rotate, swap);
+ false, swap);
if (ret)
return ret;
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
- DRM_DEBUG_DRIVER("ipp: %d, task %pK done\n", ipp->id, task);
+ DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task == task)