sna/gen4: Hookup composite spans

Due to the unresolved flushing bug it is no faster (so only enable when
we definitely can't do the operation inplace), however it does eliminate
a chunk of CPU overhead.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2012-07-19 10:50:09 +01:00
parent 5f138176bf
commit e4fce3b780
9 changed files with 535 additions and 122 deletions

View File

@ -2191,6 +2191,28 @@ gen2_render_composite_spans_done(struct sna *sna,
sna_render_composite_redirect_done(sna, &op->base);
}
static bool
gen2_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height, unsigned flags)
{
if (op >= ARRAY_SIZE(gen2_blend_op))
return false;
if (gen2_composite_fallback(sna, src, NULL, dst))
return false;
if (need_tiling(sna, width, height)) {
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
}
return true;
}
static bool
gen2_render_composite_spans(struct sna *sna,
uint8_t op,
@ -2205,29 +2227,10 @@ gen2_render_composite_spans(struct sna *sna,
DBG(("%s(src=(%d, %d), dst=(%d, %d), size=(%d, %d))\n", __FUNCTION__,
src_x, src_y, dst_x, dst_y, width, height));
#if NO_COMPOSITE_SPANS
return false;
#endif
if (op >= ARRAY_SIZE(gen2_blend_op)) {
DBG(("%s: fallback due to unhandled blend op: %d\n",
__FUNCTION__, op));
return false;
}
if (gen2_composite_fallback(sna, src, NULL, dst))
return false;
assert(gen2_check_composite_spans(sna, op, src, dst, width, height, flags));
if (need_tiling(sna, width, height)) {
DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
__FUNCTION__, width, height));
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
return sna_tiling_composite_spans(op, src, dst,
src_x, src_y, dst_x, dst_y,
width, height, flags, tmp);
@ -3134,7 +3137,10 @@ bool gen2_render_init(struct sna *sna)
* use the texture combiners.
*/
render->composite = gen2_render_composite;
#if !NO_COMPOSITE_SPANS
render->check_composite_spans = gen2_check_composite_spans;
render->composite_spans = gen2_render_composite_spans;
#endif
render->fill_boxes = gen2_render_fill_boxes;
render->fill = gen2_render_fill;
render->fill_one = gen2_render_fill_one;

View File

@ -3367,6 +3367,28 @@ gen3_render_composite_spans_done(struct sna *sna,
sna_render_composite_redirect_done(sna, &op->base);
}
static bool
gen3_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height, unsigned flags)
{
if (op >= ARRAY_SIZE(gen3_blend_op))
return false;
if (gen3_composite_fallback(sna, op, src, NULL, dst))
return false;
if (need_tiling(sna, width, height)) {
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
}
return true;
}
static bool
gen3_render_composite_spans(struct sna *sna,
uint8_t op,
@ -3383,29 +3405,11 @@ gen3_render_composite_spans(struct sna *sna,
DBG(("%s(src=(%d, %d), dst=(%d, %d), size=(%d, %d))\n", __FUNCTION__,
src_x, src_y, dst_x, dst_y, width, height));
#if NO_COMPOSITE_SPANS
return false;
#endif
if (op >= ARRAY_SIZE(gen3_blend_op)) {
DBG(("%s: fallback due to unhandled blend op: %d\n",
__FUNCTION__, op));
return false;
}
if (gen3_composite_fallback(sna, op, src, NULL, dst))
return false;
assert(gen3_check_composite_spans(sna, op, src, dst, width, height, flags));
if (need_tiling(sna, width, height)) {
DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
__FUNCTION__, width, height));
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
return sna_tiling_composite_spans(op, src, dst,
src_x, src_y, dst_x, dst_y,
width, height, flags, tmp);
@ -4666,7 +4670,10 @@ bool gen3_render_init(struct sna *sna)
struct sna_render *render = &sna->render;
render->composite = gen3_render_composite;
#if !NO_COMPOSITE_SPANS
render->check_composite_spans = gen3_check_composite_spans;
render->composite_spans = gen3_render_composite_spans;
#endif
render->video = gen3_render_video;

View File

@ -50,6 +50,7 @@
#define FLUSH_EVERY_VERTEX 1
#define NO_COMPOSITE 0
#define NO_COMPOSITE_SPANS 0
#define NO_COPY 0
#define NO_COPY_BOXES 0
#define NO_FILL 0
@ -61,8 +62,13 @@
gen4_magic_ca_pass(sna, OP); \
OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
} while (0)
#define FLUSH_NOCA() do { \
gen4_vertex_flush(sna); \
OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
} while (0)
#else
#define FLUSH(OP)
#define FLUSH_NOCA()
#endif
#define GEN4_GRF_BLOCKS(nreg) ((nreg + 15) / 16 - 1)
@ -2494,6 +2500,320 @@ cleanup_dst:
return false;
}
/* A poor man's span interface. But better than nothing? */
#if !NO_COMPOSITE_SPANS
static bool
gen4_composite_alpha_gradient_init(struct sna *sna,
struct sna_composite_channel *channel)
{
DBG(("%s\n", __FUNCTION__));
channel->filter = PictFilterNearest;
channel->repeat = RepeatPad;
channel->is_affine = true;
channel->is_solid = false;
channel->transform = NULL;
channel->width = 256;
channel->height = 1;
channel->card_format = GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
channel->bo = sna_render_get_alpha_gradient(sna);
channel->scale[0] = channel->scale[1] = 1;
channel->offset[0] = channel->offset[1] = 0;
return channel->bo != NULL;
}
inline static void
gen4_emit_composite_texcoord(struct sna *sna,
const struct sna_composite_channel *channel,
int16_t x, int16_t y)
{
float t[3];
if (channel->is_affine) {
sna_get_transformed_coordinates(x + channel->offset[0],
y + channel->offset[1],
channel->transform,
&t[0], &t[1]);
OUT_VERTEX_F(t[0] * channel->scale[0]);
OUT_VERTEX_F(t[1] * channel->scale[1]);
} else {
t[0] = t[1] = 0; t[2] = 1;
sna_get_transformed_coordinates_3d(x + channel->offset[0],
y + channel->offset[1],
channel->transform,
&t[0], &t[1], &t[2]);
OUT_VERTEX_F(t[0] * channel->scale[0]);
OUT_VERTEX_F(t[1] * channel->scale[1]);
OUT_VERTEX_F(t[2]);
}
}
inline static void
gen4_emit_composite_texcoord_affine(struct sna *sna,
const struct sna_composite_channel *channel,
int16_t x, int16_t y)
{
float t[2];
sna_get_transformed_coordinates(x + channel->offset[0],
y + channel->offset[1],
channel->transform,
&t[0], &t[1]);
OUT_VERTEX_F(t[0] * channel->scale[0]);
OUT_VERTEX_F(t[1] * channel->scale[1]);
}
inline static void
gen4_emit_composite_spans_vertex(struct sna *sna,
const struct sna_composite_spans_op *op,
int16_t x, int16_t y)
{
OUT_VERTEX(x, y);
gen4_emit_composite_texcoord(sna, &op->base.src, x, y);
}
fastcall static void
gen4_emit_composite_spans_primitive(struct sna *sna,
const struct sna_composite_spans_op *op,
const BoxRec *box,
float opacity)
{
gen4_emit_composite_spans_vertex(sna, op, box->x2, box->y2);
OUT_VERTEX_F(opacity);
OUT_VERTEX_F(1);
if (!op->base.is_affine)
OUT_VERTEX_F(1);
gen4_emit_composite_spans_vertex(sna, op, box->x1, box->y2);
OUT_VERTEX_F(opacity);
OUT_VERTEX_F(1);
if (!op->base.is_affine)
OUT_VERTEX_F(1);
gen4_emit_composite_spans_vertex(sna, op, box->x1, box->y1);
OUT_VERTEX_F(opacity);
OUT_VERTEX_F(0);
if (!op->base.is_affine)
OUT_VERTEX_F(1);
}
fastcall static void
gen4_emit_composite_spans_solid(struct sna *sna,
const struct sna_composite_spans_op *op,
const BoxRec *box,
float opacity)
{
OUT_VERTEX(box->x2, box->y2);
OUT_VERTEX_F(1); OUT_VERTEX_F(1);
OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
OUT_VERTEX(box->x1, box->y2);
OUT_VERTEX_F(0); OUT_VERTEX_F(1);
OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
OUT_VERTEX(box->x1, box->y1);
OUT_VERTEX_F(0); OUT_VERTEX_F(0);
OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
}
fastcall static void
gen4_emit_composite_spans_affine(struct sna *sna,
const struct sna_composite_spans_op *op,
const BoxRec *box,
float opacity)
{
OUT_VERTEX(box->x2, box->y2);
gen4_emit_composite_texcoord_affine(sna, &op->base.src,
box->x2, box->y2);
OUT_VERTEX_F(opacity);
OUT_VERTEX_F(1);
OUT_VERTEX(box->x1, box->y2);
gen4_emit_composite_texcoord_affine(sna, &op->base.src,
box->x1, box->y2);
OUT_VERTEX_F(opacity);
OUT_VERTEX_F(1);
OUT_VERTEX(box->x1, box->y1);
gen4_emit_composite_texcoord_affine(sna, &op->base.src,
box->x1, box->y1);
OUT_VERTEX_F(opacity);
OUT_VERTEX_F(0);
}
fastcall static void
gen4_render_composite_spans_box(struct sna *sna,
const struct sna_composite_spans_op *op,
const BoxRec *box, float opacity)
{
DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
__FUNCTION__,
op->base.src.offset[0], op->base.src.offset[1],
opacity,
op->base.dst.x, op->base.dst.y,
box->x1, box->y1,
box->x2 - box->x1,
box->y2 - box->y1));
gen4_get_rectangles(sna, &op->base, 1, gen4_bind_surfaces);
op->prim_emit(sna, op, box, opacity);
FLUSH_NOCA();
}
static void
gen4_render_composite_spans_boxes(struct sna *sna,
const struct sna_composite_spans_op *op,
const BoxRec *box, int nbox,
float opacity)
{
DBG(("%s: nbox=%d, src=+(%d, %d), opacity=%f, dst=+(%d, %d)\n",
__FUNCTION__, nbox,
op->base.src.offset[0], op->base.src.offset[1],
opacity,
op->base.dst.x, op->base.dst.y));
do {
gen4_render_composite_spans_box(sna, op, box++, opacity);
} while (--nbox);
}
fastcall static void
gen4_render_composite_spans_done(struct sna *sna,
const struct sna_composite_spans_op *op)
{
if (sna->render_state.gen4.vertex_offset)
gen4_vertex_flush(sna);
DBG(("%s()\n", __FUNCTION__));
if (op->base.src.bo)
kgem_bo_destroy(&sna->kgem, op->base.src.bo);
sna_render_composite_redirect_done(sna, &op->base);
}
static bool
gen4_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height,
unsigned flags)
{
if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
return false;
if (op >= ARRAY_SIZE(gen4_blend_op))
return false;
if (gen4_composite_fallback(sna, src, NULL, dst))
return false;
if (!is_gpu(dst->pDrawable))
return false;
return true;
}
static bool
gen4_render_composite_spans(struct sna *sna,
uint8_t op,
PicturePtr src,
PicturePtr dst,
int16_t src_x, int16_t src_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
unsigned flags,
struct sna_composite_spans_op *tmp)
{
DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
width, height, flags, sna->kgem.ring));
assert(gen4_check_composite_spans(sna, op, src, dst, width, height, flags));
if (need_tiling(sna, width, height)) {
DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
__FUNCTION__, width, height));
return sna_tiling_composite_spans(op, src, dst,
src_x, src_y, dst_x, dst_y,
width, height, flags, tmp);
}
tmp->base.op = op;
if (!gen4_composite_set_target(dst, &tmp->base))
return false;
sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
if (!sna_render_composite_redirect(sna, &tmp->base,
dst_x, dst_y, width, height))
return false;
}
switch (gen4_composite_picture(sna, src, &tmp->base.src,
src_x, src_y,
width, height,
dst_x, dst_y,
dst->polyMode == PolyModePrecise)) {
case -1:
goto cleanup_dst;
case 0:
gen4_composite_solid_init(sna, &tmp->base.src, 0);
/* fall through to fixup */
case 1:
gen4_composite_channel_convert(&tmp->base.src);
break;
}
tmp->base.mask.bo = NULL;
tmp->base.is_affine = tmp->base.src.is_affine;
tmp->base.has_component_alpha = false;
tmp->base.need_magic_ca_pass = false;
gen4_composite_alpha_gradient_init(sna, &tmp->base.mask);
tmp->prim_emit = gen4_emit_composite_spans_primitive;
if (tmp->base.src.is_solid)
tmp->prim_emit = gen4_emit_composite_spans_solid;
else if (tmp->base.is_affine)
tmp->prim_emit = gen4_emit_composite_spans_affine;
tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
tmp->base.u.gen4.wm_kernel =
gen4_choose_composite_kernel(tmp->base.op,
true, false,
tmp->base.is_affine);
tmp->base.u.gen4.ve_id = 1 << 1 | tmp->base.is_affine;
tmp->box = gen4_render_composite_spans_box;
tmp->boxes = gen4_render_composite_spans_boxes;
tmp->done = gen4_render_composite_spans_done;
if (!kgem_check_bo(&sna->kgem,
tmp->base.dst.bo, tmp->base.src.bo,
NULL)) {
kgem_submit(&sna->kgem);
if (!kgem_check_bo(&sna->kgem,
tmp->base.dst.bo, tmp->base.src.bo,
NULL))
goto cleanup_src;
}
gen4_bind_surfaces(sna, &tmp->base);
gen4_align_vertex(sna, &tmp->base);
return true;
cleanup_src:
if (tmp->base.src.bo)
kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
cleanup_dst:
if (tmp->base.redirect.real_bo)
kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
return false;
}
#endif
static void
gen4_copy_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
{
@ -3472,6 +3792,10 @@ bool gen4_render_init(struct sna *sna)
return false;
sna->render.composite = gen4_render_composite;
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen4_check_composite_spans;
sna->render.composite_spans = gen4_render_composite_spans;
#endif
sna->render.video = gen4_render_video;
sna->render.copy_boxes = gen4_render_copy_boxes;

View File

@ -2728,6 +2728,31 @@ gen5_render_composite_spans_done(struct sna *sna,
sna_render_composite_redirect_done(sna, &op->base);
}
static bool
gen5_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height, unsigned flags)
{
if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
return false;
if (op >= ARRAY_SIZE(gen5_blend_op))
return false;
if (gen5_composite_fallback(sna, src, NULL, dst))
return false;
if (need_tiling(sna, width, height)) {
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
}
return true;
}
static bool
gen5_render_composite_spans(struct sna *sna,
uint8_t op,
@ -2742,25 +2767,11 @@ gen5_render_composite_spans(struct sna *sna,
DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
width, height, flags, sna->kgem.ring));
if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
return false;
if (op >= ARRAY_SIZE(gen5_blend_op))
return false;
if (gen5_composite_fallback(sna, src, NULL, dst))
return false;
assert(gen5_check_composite_spans(sna, op, src, dst, width, height, flags));
if (need_tiling(sna, width, height)) {
DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
__FUNCTION__, width, height));
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
return sna_tiling_composite_spans(op, src, dst,
src_x, src_y, dst_x, dst_y,
width, height, flags, tmp);
@ -3924,6 +3935,7 @@ bool gen5_render_init(struct sna *sna)
sna->render.composite = gen5_render_composite;
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen5_check_composite_spans;
sna->render.composite_spans = gen5_render_composite_spans;
#endif
sna->render.video = gen5_render_video;

View File

@ -3083,6 +3083,31 @@ gen6_render_composite_spans_done(struct sna *sna,
sna_render_composite_redirect_done(sna, &op->base);
}
static bool
gen6_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height, unsigned flags)
{
if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
return false;
if (op >= ARRAY_SIZE(gen6_blend_op))
return false;
if (gen6_composite_fallback(sna, src, NULL, dst))
return false;
if (need_tiling(sna, width, height)) {
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
}
return true;
}
static bool
gen6_render_composite_spans(struct sna *sna,
uint8_t op,
@ -3097,25 +3122,11 @@ gen6_render_composite_spans(struct sna *sna,
DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
width, height, flags, sna->kgem.ring));
if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
return false;
if (op >= ARRAY_SIZE(gen6_blend_op))
return false;
if (gen6_composite_fallback(sna, src, NULL, dst))
return false;
assert(gen6_check_composite_spans(sna, op, src, dst, width, height, flags));
if (need_tiling(sna, width, height)) {
DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
__FUNCTION__, width, height));
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
return sna_tiling_composite_spans(op, src, dst,
src_x, src_y, dst_x, dst_y,
width, height, flags, tmp);
@ -4232,6 +4243,7 @@ bool gen6_render_init(struct sna *sna)
sna->render.composite = gen6_render_composite;
#endif
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen6_check_composite_spans;
sna->render.composite_spans = gen6_render_composite_spans;
#endif
sna->render.video = gen6_render_video;

View File

@ -3165,6 +3165,28 @@ gen7_render_composite_spans_done(struct sna *sna,
sna_render_composite_redirect_done(sna, &op->base);
}
static bool
gen7_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height, unsigned flags)
{
if (op >= ARRAY_SIZE(gen7_blend_op))
return false;
if (gen7_composite_fallback(sna, src, NULL, dst))
return false;
if (need_tiling(sna, width, height)) {
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
}
return true;
}
static bool
gen7_render_composite_spans(struct sna *sna,
uint8_t op,
@ -3179,22 +3201,11 @@ gen7_render_composite_spans(struct sna *sna,
DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
width, height, flags, sna->kgem.ring));
if (op >= ARRAY_SIZE(gen7_blend_op))
return false;
if (gen7_composite_fallback(sna, src, NULL, dst))
return false;
assert(gen7_check_composite_spans(sna, op, src, dst, width, height, flags));
if (need_tiling(sna, width, height)) {
DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
__FUNCTION__, width, height));
if (!is_gpu(dst->pDrawable)) {
DBG(("%s: fallback, tiled operation not on GPU\n",
__FUNCTION__));
return false;
}
return sna_tiling_composite_spans(op, src, dst,
src_x, src_y, dst_x, dst_y,
width, height, flags, tmp);
@ -4313,6 +4324,7 @@ bool gen7_render_init(struct sna *sna)
sna->render.composite = gen7_render_composite;
#endif
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen7_check_composite_spans;
sna->render.composite_spans = gen7_render_composite_spans;
#endif
sna->render.video = gen7_render_video;

View File

@ -91,13 +91,21 @@ no_render_composite(struct sna *sna,
dst_x, dst_y,
width, height,
tmp))
return TRUE;
return true;
return FALSE;
return false;
(void)mask_x;
(void)mask_y;
}
static bool
no_render_check_composite_spans(struct sna *sna,
uint8_t op, PicturePtr src, PicturePtr dst,
int16_t width, int16_t height, unsigned flags)
{
return false;
}
static bool
no_render_copy_boxes(struct sna *sna, uint8_t alu,
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
@ -107,7 +115,7 @@ no_render_copy_boxes(struct sna *sna, uint8_t alu,
DBG(("%s (n=%d)\n", __FUNCTION__, n));
if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
return FALSE;
return false;
return sna_blt_copy_boxes(sna, alu,
src_bo, src_dx, src_dy,
@ -128,9 +136,9 @@ no_render_copy(struct sna *sna, uint8_t alu,
sna_blt_copy(sna, alu,
src_bo, dst_bo, dst->drawable.bitsPerPixel,
tmp))
return TRUE;
return true;
return FALSE;
return false;
}
static bool
@ -160,7 +168,7 @@ no_render_fill_boxes(struct sna *sna,
}
if (op != PictOpSrc)
return FALSE;
return false;
if (alu == GXcopy &&
!sna_get_pixel_from_rgba(&pixel,
@ -169,7 +177,7 @@ no_render_fill_boxes(struct sna *sna,
color->blue,
color->alpha,
format))
return FALSE;
return false;
return sna_blt_fill_boxes(sna, alu,
dst_bo, dst->drawable.bitsPerPixel,
@ -267,6 +275,7 @@ void no_render_init(struct sna *sna)
render->vertex_size = ARRAY_SIZE(render->vertex_data);
render->composite = no_render_composite;
render->check_composite_spans = no_render_check_composite_spans;
render->copy_boxes = no_render_copy_boxes;
render->copy = no_render_copy;
@ -1314,7 +1323,7 @@ sna_render_picture_convolve(struct sna *sna,
channel->width = w;
channel->filter = PictFilterNearest;
channel->repeat = RepeatNone;
channel->is_affine = TRUE;
channel->is_affine = true;
channel->transform = NULL;
channel->scale[0] = 1.f / w;
channel->scale[1] = 1.f / h;
@ -1381,7 +1390,7 @@ sna_render_picture_flatten(struct sna *sna,
channel->filter = PictFilterNearest;
channel->repeat = RepeatNone;
channel->pict_format = PIXMAN_a8r8g8b8;
channel->is_affine = TRUE;
channel->is_affine = true;
channel->transform = NULL;
channel->scale[0] = 1.f / w;
channel->scale[1] = 1.f / h;
@ -1440,7 +1449,7 @@ sna_render_picture_approximate_gradient(struct sna *sna,
return 0;
}
src = image_from_pict(picture, FALSE, &dx, &dy);
src = image_from_pict(picture, false, &dx, &dy);
if (src == NULL) {
pixman_image_unref(dst);
kgem_bo_destroy(&sna->kgem, channel->bo);
@ -1468,7 +1477,7 @@ sna_render_picture_approximate_gradient(struct sna *sna,
channel->filter = PictFilterNearest;
channel->repeat = RepeatNone;
channel->is_affine = TRUE;
channel->is_affine = true;
channel->scale[0] = 1.f/w;
channel->scale[1] = 1.f/h;
@ -1561,7 +1570,7 @@ do_fixup:
return 0;
}
src = image_from_pict(picture, FALSE, &dx, &dy);
src = image_from_pict(picture, false, &dx, &dy);
if (src == NULL) {
pixman_image_unref(dst);
kgem_bo_destroy(&sna->kgem, channel->bo);
@ -1606,7 +1615,7 @@ do_fixup:
channel->filter = PictFilterNearest;
channel->repeat = RepeatNone;
channel->is_affine = TRUE;
channel->is_affine = true;
channel->scale[0] = 1.f/w;
channel->scale[1] = 1.f/h;
@ -1750,7 +1759,7 @@ sna_render_composite_redirect(struct sna *sna,
struct kgem_bo *bo;
#if NO_REDIRECT
return FALSE;
return false;
#endif
DBG(("%s: target too large (%dx%d), copying to temporary %dx%d, max %d\n",
@ -1760,11 +1769,11 @@ sna_render_composite_redirect(struct sna *sna,
sna->render.max_3d_size));
if (!width || !height)
return FALSE;
return false;
if (width > sna->render.max_3d_size ||
height > sna->render.max_3d_size)
return FALSE;
return false;
if (op->dst.bo->pitch <= sna->render.max_3d_pitch) {
BoxRec box;
@ -1842,7 +1851,7 @@ sna_render_composite_redirect(struct sna *sna,
t->real_bo = NULL;
if (t->damage)
__sna_damage_destroy(t->damage);
return FALSE;
return false;
}
assert(op->dst.bo != t->real_bo);
@ -1852,7 +1861,7 @@ sna_render_composite_redirect(struct sna *sna,
op->dst.y -= box.y1;
op->dst.width = w;
op->dst.height = h;
return TRUE;
return true;
}
}
@ -1866,7 +1875,7 @@ sna_render_composite_redirect(struct sna *sna,
width, height, bpp),
CREATE_TEMPORARY);
if (!bo)
return FALSE;
return false;
t->box.x1 = x + op->dst.x;
t->box.y1 = y + op->dst.y;
@ -1881,7 +1890,7 @@ sna_render_composite_redirect(struct sna *sna,
bo, -t->box.x1, -t->box.y1,
bpp, &t->box, 1)) {
kgem_bo_destroy(&sna->kgem, bo);
return FALSE;
return false;
}
t->real_bo = op->dst.bo;
@ -1897,7 +1906,7 @@ sna_render_composite_redirect(struct sna *sna,
op->dst.y = -y;
op->dst.width = width;
op->dst.height = height;
return TRUE;
return true;
}
void

View File

@ -194,6 +194,9 @@ struct sna_render {
int16_t w, int16_t h,
struct sna_composite_op *tmp);
bool (*check_composite_spans)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src,
int16_t w, int16_t h, unsigned flags);
bool (*composite_spans)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src,
int16_t src_x, int16_t src_y,

View File

@ -3673,7 +3673,9 @@ composite_unaligned_boxes(struct sna *sna,
if (ntrap > 1 && maskFormat)
return false;
if (force_fallback || !sna->render.composite_spans) {
if (force_fallback ||
!sna->render.check_composite_spans(sna, op, src, dst, 0, 0,
COMPOSITE_SPANS_RECTILINEAR)) {
fallback:
return composite_unaligned_boxes_fallback(op, src, dst,
src_x, src_y,
@ -3721,6 +3723,15 @@ fallback:
return true;
}
if (!sna->render.check_composite_spans(sna, op, src, dst,
clip.extents.x2 - clip.extents.x1,
clip.extents.y2 - clip.extents.y1,
COMPOSITE_SPANS_RECTILINEAR)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
goto fallback;
}
c = NULL;
if (extents.x2 - extents.x1 > clip.extents.x2 - clip.extents.x1 ||
extents.y2 - extents.y1 > clip.extents.y2 - clip.extents.y1)
@ -4016,7 +4027,7 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
}
sna = to_sna_from_drawable(dst->pDrawable);
if (!sna->render.composite_spans) {
if (!sna->render.check_composite_spans(sna, op, src, dst, 0, 0, flags)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
@ -4053,6 +4064,15 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
return true;
}
if (!sna->render.check_composite_spans(sna, op, src, dst,
clip.extents.x2 - clip.extents.x1,
clip.extents.y2 - clip.extents.y1,
flags)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
}
extents = *RegionExtents(&clip);
dx = dst->pDrawable->x;
dy = dst->pDrawable->y;
@ -5633,11 +5653,8 @@ trap_span_converter(PicturePtr dst,
struct sna_composite_spans_op tmp;
struct tor tor;
BoxRec extents;
PicturePtr src;
xRenderColor white;
pixman_region16_t *clip;
int dx, dy;
int n, error;
int dx, dy, n;
if (NO_SCAN_CONVERTER)
return false;
@ -5649,15 +5666,15 @@ trap_span_converter(PicturePtr dst,
return mono_trap_span_converter(dst, src_x, src_y, ntrap, trap);
sna = to_sna_from_drawable(dst->pDrawable);
if (!sna->render.composite_spans) {
if (!sna->render.check_composite_spans(sna, PictOpAdd, sna->render.white_picture, dst,
dst->pCompositeClip->extents.x2 - dst->pCompositeClip->extents.x1,
dst->pCompositeClip->extents.y2 - dst->pCompositeClip->extents.y1,
0)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
}
DBG(("%s: extents (%d, %d), (%d, %d)\n",
__FUNCTION__, extents.x1, extents.y1, extents.x2, extents.y2));
clip = dst->pCompositeClip;
extents = *RegionExtents(clip);
dx = dst->pDrawable->x;
@ -5669,13 +5686,8 @@ trap_span_converter(PicturePtr dst,
extents.x2, extents.y2,
dx, dy));
white.red = white.green = white.blue = white.alpha = 0xffff;
src = CreateSolidPicture(0, &white, &error);
if (src == NULL)
return true;
memset(&tmp, 0, sizeof(tmp));
if (!sna->render.composite_spans(sna, PictOpAdd, src, dst,
if (!sna->render.composite_spans(sna, PictOpAdd, sna->render.white_picture, dst,
0, 0,
extents.x1, extents.y1,
extents.x2 - extents.x1,
@ -5684,7 +5696,6 @@ trap_span_converter(PicturePtr dst,
&tmp)) {
DBG(("%s: fallback -- composite spans render op not supported\n",
__FUNCTION__));
FreePicture(src, 0);
return false;
}
@ -5723,7 +5734,6 @@ trap_span_converter(PicturePtr dst,
skip:
tor_fini(&tor);
tmp.done(sna, &tmp);
FreePicture(src, 0);
return true;
}
@ -6175,7 +6185,7 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
}
sna = to_sna_from_drawable(dst->pDrawable);
if (!sna->render.composite_spans) {
if (!sna->render.check_composite_spans(sna, op, src, dst, 0, 0, 0)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
@ -6212,6 +6222,15 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
return true;
}
if (!sna->render.check_composite_spans(sna, op, src, dst,
clip.extents.x2 - clip.extents.x1,
clip.extents.y2 - clip.extents.y1,
0)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
}
extents = *RegionExtents(&clip);
dx = dst->pDrawable->x;
dy = dst->pDrawable->y;
@ -6531,7 +6550,7 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
}
sna = to_sna_from_drawable(dst->pDrawable);
if (!sna->render.composite_spans) {
if (!sna->render.check_composite_spans(sna, op, src, dst, 0, 0, 0)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
@ -6568,6 +6587,15 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
return true;
}
if (!sna->render.check_composite_spans(sna, op, src, dst,
clip.extents.x2 - clip.extents.x1,
clip.extents.y2 - clip.extents.y1,
0)) {
DBG(("%s: fallback -- composite spans not supported\n",
__FUNCTION__));
return false;
}
extents = *RegionExtents(&clip);
dx = dst->pDrawable->x;
dy = dst->pDrawable->y;