int16_t mask_x, int16_t mask_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
DBG(("%s()\n", __FUNCTION__));
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen2_composite_fallback(sna, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
fastcall static void
int16_t mask_x, int16_t mask_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
DBG(("%s()\n", __FUNCTION__));
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen3_composite_fallback(sna, op, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
static void
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen4_composite_fallback(sna, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
#if !NO_COMPOSITE_SPANS
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen5_composite_fallback(sna, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
#if !NO_COMPOSITE_SPANS
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
if (op >= ARRAY_SIZE(gen6_blend_op))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen6_composite_fallback(sna, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
#if !NO_COMPOSITE_SPANS
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
if (op >= ARRAY_SIZE(gen7_blend_op))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen7_composite_fallback(sna, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
#if !NO_COMPOSITE_SPANS
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
if (op >= ARRAY_SIZE(gen8_blend_op))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, false))
+ flags, tmp))
return true;
if (gen8_composite_fallback(sna, src, mask, dst))
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true));
+ flags | COMPOSITE_FALLBACK, tmp));
}
#if !NO_COMPOSITE_SPANS
int16_t x, int16_t y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
- struct sna_composite_op *tmp,
- bool fallback)
+ unsigned flags,
+ struct sna_composite_op *tmp)
{
PictFormat src_format = src->format;
PixmapPtr src_pixmap;
#if DEBUG_NO_BLT || NO_BLT_COMPOSITE
return false;
#endif
-
DBG(("%s (%d, %d), (%d, %d), %dx%d\n",
__FUNCTION__, x, y, dst_x, dst_y, width, height));
hint = 0;
if (can_render(sna)) {
hint |= PREFER_GPU;
- if (dst->pCompositeClip->data == NULL && (width | height)) {
+ if (dst->pCompositeClip->data == NULL &&
+ (flags & COMPOSITE_PARTIAL) == 0) {
hint |= IGNORE_CPU;
- if (width == tmp->dst.pixmap->drawable.width &&
+ if (width == tmp->dst.pixmap->drawable.width &&
height == tmp->dst.pixmap->drawable.height)
hint |= REPLACES;
}
}
tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable, hint,
&dst_box, &tmp->damage);
- if (tmp->dst.bo && !kgem_bo_can_blt(&sna->kgem, tmp->dst.bo)) {
- DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
- __FUNCTION__, tmp->dst.bo->tiling, tmp->dst.bo->pitch));
- return false;
- }
-
- if (!tmp->dst.bo) {
+ if (tmp->dst.bo) {
+ if (!kgem_bo_can_blt(&sna->kgem, tmp->dst.bo)) {
+ DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
+ __FUNCTION__, tmp->dst.bo->tiling, tmp->dst.bo->pitch));
+ return false;
+ }
+ if (hint & REPLACES)
+ kgem_bo_undo(&sna->kgem, tmp->dst.bo);
+ } else {
RegionRec region;
region.extents = dst_box;
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion,
MOVE_INPLACE_HINT | MOVE_WRITE))
return false;
- } else if (hint & REPLACES)
- kgem_bo_undo(&sna->kgem, tmp->dst.bo);
+ }
return prepare_blt_clear(sna, tmp);
}
hint = 0;
if (can_render(sna)) {
hint |= PREFER_GPU;
- if (dst->pCompositeClip->data == NULL && (width | height))
+ if (dst->pCompositeClip->data == NULL &&
+ (flags & COMPOSITE_PARTIAL) == 0) {
hint |= IGNORE_CPU;
- if (width == tmp->dst.pixmap->drawable.width &&
- height == tmp->dst.pixmap->drawable.height)
- hint |= REPLACES;
+ if (width == tmp->dst.pixmap->drawable.width &&
+ height == tmp->dst.pixmap->drawable.height)
+ hint |= REPLACES;
+ }
}
tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable, hint,
&dst_box, &tmp->damage);
- if (tmp->dst.bo && !kgem_bo_can_blt(&sna->kgem, tmp->dst.bo)) {
- DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
- __FUNCTION__, tmp->dst.bo->tiling, tmp->dst.bo->pitch));
- return false;
- }
-
- if (!tmp->dst.bo) {
+ if (tmp->dst.bo) {
+ if (!kgem_bo_can_blt(&sna->kgem, tmp->dst.bo)) {
+ DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
+ __FUNCTION__, tmp->dst.bo->tiling, tmp->dst.bo->pitch));
+ return false;
+ }
+ if (hint & REPLACES)
+ kgem_bo_undo(&sna->kgem, tmp->dst.bo);
+ } else {
RegionRec region;
region.extents = dst_box;
region.data = NULL;
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion,
- MOVE_INPLACE_HINT | MOVE_WRITE))
+ MOVE_INPLACE_HINT | MOVE_WRITE))
return false;
- } else if (hint & REPLACES)
- kgem_bo_undo(&sna->kgem, tmp->dst.bo);
+ }
return prepare_blt_fill(sna, tmp, color);
}
if (src_pixmap->drawable.width <= sna->render.max_3d_size &&
src_pixmap->drawable.height <= sna->render.max_3d_size &&
bo->pitch <= sna->render.max_3d_pitch &&
- !fallback)
+ (flags & COMPOSITE_FALLBACK) == 0)
{
return false;
}
hint = 0;
if (bo || can_render(sna)) {
hint |= PREFER_GPU;
- if (dst->pCompositeClip->data == NULL && (width | height)) {
+ if (dst->pCompositeClip->data == NULL &&
+ (flags & COMPOSITE_PARTIAL) == 0) {
hint |= IGNORE_CPU;
- if (width == tmp->dst.pixmap->drawable.width &&
+ if (width == tmp->dst.pixmap->drawable.width &&
height == tmp->dst.pixmap->drawable.height)
hint |= REPLACES;
}
if (!tmp->dst.bo) {
DBG(("%s: fallback -- unaccelerated read back\n",
__FUNCTION__));
- if (fallback || !kgem_bo_is_busy(bo))
+ if (flags & COMPOSITE_FALLBACK || !kgem_bo_is_busy(bo))
goto put;
} else if (bo->snoop && tmp->dst.bo->snoop) {
DBG(("%s: fallback -- can not copy between snooped bo\n",
} else if (!kgem_bo_can_blt(&sna->kgem, tmp->dst.bo)) {
DBG(("%s: fallback -- unaccelerated upload\n",
__FUNCTION__));
- if (fallback || !kgem_bo_is_busy(bo))
+ if (flags & COMPOSITE_FALLBACK || !kgem_bo_is_busy(bo))
goto put;
} else {
ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
- if (fallback && !ret)
+ if (flags & COMPOSITE_FALLBACK && !ret)
goto put;
}
} else {
region.extents.y1,
region.extents.x2 - region.extents.x1,
region.extents.y2 - region.extents.y1,
- memset(&tmp, 0, sizeof(tmp)))) {
+ 0, memset(&tmp, 0, sizeof(tmp)))) {
DBG(("%s: fallback due unhandled composite op\n", __FUNCTION__));
goto fallback;
}
0, 0,
0, 0,
crtc->mode.HDisplay, crtc->mode.VDisplay,
- memset(&tmp, 0, sizeof(tmp)))) {
+ 0, memset(&tmp, 0, sizeof(tmp)))) {
DBG(("%s: unsupported operation!\n", __FUNCTION__));
sna_crtc_redisplay__fallback(crtc, region, bo);
goto free_dst;
op, src, p->atlas, dst,
0, 0, 0, 0, 0, 0,
0, 0,
- &tmp))
+ COMPOSITE_PARTIAL, &tmp))
return false;
glyph_atlas = p->atlas;
op, src, p->atlas, dst,
0, 0, 0, 0, 0, 0,
0, 0,
- &tmp))
+ COMPOSITE_PARTIAL, &tmp))
return false;
glyph_atlas = p->atlas;
y - glyph->info.y,
glyph->info.width,
glyph->info.height,
- memset(&tmp, 0, sizeof(tmp))))
+ COMPOSITE_PARTIAL, memset(&tmp, 0, sizeof(tmp))))
return false;
rects = REGION_RECTS(dst->pCompositeClip);
p->atlas, NULL, mask,
0, 0, 0, 0, 0, 0,
0, 0,
- &tmp);
+ COMPOSITE_PARTIAL, &tmp);
} else {
ok = sna->render.composite(sna, PictOpAdd,
sna->render.white_picture, p->atlas, mask,
0, 0, 0, 0, 0, 0,
0, 0,
- &tmp);
+ COMPOSITE_PARTIAL, &tmp);
}
if (!ok) {
DBG(("%s: fallback -- can not handle PictOpAdd of glyph onto mask!\n",
int16_t mask_x, int16_t mask_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
+ unsigned flags,
struct sna_composite_op *tmp)
{
DBG(("%s (op=%d, mask? %d)\n", __FUNCTION__, op, mask != NULL));
src_x, src_y,
dst_x, dst_y,
width, height,
- tmp, true);
+ flags | COMPOSITE_FALLBACK, tmp);
(void)mask_x;
(void)mask_y;
}
0, 0,
b.x1, b.y1,
b.x2 - b.x1, b.y2 - b.y1,
- &op))
+ 0, &op))
goto cleanup_src;
op.box(sna, &op, &b);
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t w, int16_t h,
+ unsigned flags,
struct sna_composite_op *tmp);
+#define COMPOSITE_PARTIAL 0x1
+#define COMPOSITE_FALLBACK 0x80000000
bool (*check_composite_spans)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src,
int16_t src_x, int16_t src_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
- struct sna_composite_op *tmp,
- bool fallback);
+ unsigned flags,
+ struct sna_composite_op *tmp);
bool sna_blt_composite__convert(struct sna *sna,
int x, int y,
int width, int height,
tile->mask_x + x, tile->mask_y + y,
tile->dst_x + x, tile->dst_y + y,
width, height,
- memset(&tmp, 0, sizeof(tmp)))) {
+ COMPOSITE_PARTIAL, memset(&tmp, 0, sizeof(tmp)))) {
for (n = 0; n < tile->rect_count; n++) {
const struct sna_composite_rectangles *r = &tile->rects[n];
int x1, x2, dx, y1, y2, dy;
clip.extents.x1, clip.extents.y1,
clip.extents.x2 - clip.extents.x1,
clip.extents.y2 - clip.extents.y1,
- memset(&tmp, 0, sizeof(tmp)))) {
+ COMPOSITE_PARTIAL, memset(&tmp, 0, sizeof(tmp)))) {
unsigned int flags;
pixman_box16_t *b;
int i, count;
mono.clip.extents.x1, mono.clip.extents.y1,
mono.clip.extents.x2 - mono.clip.extents.x1,
mono.clip.extents.y2 - mono.clip.extents.y1,
- memset(&mono.op, 0, sizeof(mono.op))))
+ COMPOSITE_PARTIAL, memset(&mono.op, 0, sizeof(mono.op))))
return false;
num_threads = 1;
traps[n].top, traps[n].bottom,
&traps[n].right.p1, &traps[n].right.p2, -1);
}
- memset(&mono.op, 0, sizeof(mono.op));
if (mono.sna->render.composite(mono.sna,
PictOpClear,
mono.sna->clear, NULL, dst,
mono.clip.extents.x1, mono.clip.extents.y1,
mono.clip.extents.x2 - mono.clip.extents.x1,
mono.clip.extents.y2 - mono.clip.extents.y1,
- &mono.op)) {
+ COMPOSITE_PARTIAL, memset(&mono.op, 0, sizeof(mono.op)))) {
mono_render(&mono);
mono.op.done(mono.sna, &mono.op);
}
&p1, &p2, -1);
}
- memset(&mono.op, 0, sizeof(mono.op));
if (mono.sna->render.composite(mono.sna, PictOpAdd, src, NULL, dst,
- 0, 0,
- 0, 0,
- mono.clip.extents.x1, mono.clip.extents.y1,
- mono.clip.extents.x2 - mono.clip.extents.x1,
- mono.clip.extents.y2 - mono.clip.extents.y1,
- &mono.op)) {
+ 0, 0,
+ 0, 0,
+ mono.clip.extents.x1, mono.clip.extents.y1,
+ mono.clip.extents.x2 - mono.clip.extents.x1,
+ mono.clip.extents.y2 - mono.clip.extents.y1,
+ COMPOSITE_PARTIAL, memset(&mono.op, 0, sizeof(mono.op)))) {
mono_render(&mono);
mono.op.done(mono.sna, &mono.op);
}
&tri[n].p3, &tri[n].p1, 1);
}
- memset(&mono.op, 0, sizeof(mono.op));
if (mono.sna->render.composite(mono.sna, op, src, NULL, dst,
src_x + mono.clip.extents.x1 - dst_x - dx,
src_y + mono.clip.extents.y1 - dst_y - dy,
mono.clip.extents.x1, mono.clip.extents.y1,
mono.clip.extents.x2 - mono.clip.extents.x1,
mono.clip.extents.y2 - mono.clip.extents.y1,
- &mono.op)) {
+ COMPOSITE_PARTIAL, memset(&mono.op, 0, sizeof(mono.op)))) {
if (mono.clip.data == NULL && mono.op.damage == NULL)
mono.span = mono_span__fast;
else
&tri[n].p3, &tri[n].p1, 1);
}
- memset(&mono.op, 0, sizeof(mono.op));
if (mono.sna->render.composite(mono.sna,
PictOpClear,
mono.sna->clear, NULL, dst,
mono.clip.extents.x1, mono.clip.extents.y1,
mono.clip.extents.x2 - mono.clip.extents.x1,
mono.clip.extents.y2 - mono.clip.extents.y1,
- &mono.op)) {
+ COMPOSITE_PARTIAL, memset(&mono.op, 0, sizeof(mono.op)))) {
if (mono.clip.data == NULL && mono.op.damage == NULL)
mono.span = mono_span__fast;
else