sna: Only update a buffer when it becomes dirty

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2012-07-20 20:34:23 +01:00
parent c52d265b83
commit a0d95a9c2d
8 changed files with 21 additions and 19 deletions

View File

@ -547,7 +547,7 @@ static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
assert(sna->render_state.gen2.vertex_offset == 0);
if (sna->render_state.gen2.target == op->dst.bo->unique_id) {
kgem_bo_mark_dirty(op->dst.bo);
kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
return;
}

View File

@ -1373,7 +1373,7 @@ static void gen3_emit_target(struct sna *sna,
state->current_dst = bo->unique_id;
}
kgem_bo_mark_dirty(bo);
kgem_bo_mark_dirty(&sna->kgem, bo);
}
static void gen3_emit_composite_state(struct sna *sna,

View File

@ -732,7 +732,7 @@ gen4_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
kgem_bo_mark_dirty(bo);
kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;
@ -1457,7 +1457,7 @@ gen4_emit_state(struct sna *sna,
kgem_bo_is_dirty(op->mask.bo)));
OUT_BATCH(MI_FLUSH);
kgem_clear_dirty(&sna->kgem);
kgem_bo_mark_dirty(op->dst.bo);
kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
}
}

View File

@ -726,7 +726,7 @@ gen5_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
kgem_bo_mark_dirty(bo);
kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;
@ -1472,7 +1472,7 @@ gen5_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
OUT_BATCH(MI_FLUSH);
kgem_clear_dirty(&sna->kgem);
kgem_bo_mark_dirty(op->dst.bo);
kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
}
}

View File

@ -914,7 +914,7 @@ gen6_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen6_emit_flush(sna);
kgem_clear_dirty(&sna->kgem);
kgem_bo_mark_dirty(op->dst.bo);
kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
need_stall = false;
}
if (need_stall) {
@ -1246,7 +1246,7 @@ gen6_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
kgem_bo_mark_dirty(bo);
kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;

View File

@ -1048,7 +1048,7 @@ gen7_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen7_emit_pipe_invalidate(sna, need_stall);
kgem_clear_dirty(&sna->kgem);
kgem_bo_mark_dirty(op->dst.bo);
kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
need_stall = false;
}
if (need_stall)
@ -1355,7 +1355,7 @@ gen7_bind_bo(struct sna *sna,
/* After the first bind, we manage the cache domains within the batch */
if (is_dst) {
domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
kgem_bo_mark_dirty(bo);
kgem_bo_mark_dirty(&sna->kgem, bo);
} else
domains = I915_GEM_DOMAIN_SAMPLER << 16;

View File

@ -349,9 +349,10 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
assert(list_is_empty(&bo->vma));
bo->rq = NULL;
list_del(&bo->request);
bo->needs_flush = false;
}
bo->needs_flush = false;
bo->domain = DOMAIN_NONE;
}
@ -3494,12 +3495,8 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
kgem->reloc[index].target_handle = bo->handle;
kgem->reloc[index].presumed_offset = bo->presumed_offset;
if (read_write_domain & 0x7fff) {
DBG(("%s: marking handle=%d dirty\n",
__FUNCTION__, bo->handle));
bo->needs_flush = bo->dirty = true;
list_move(&bo->request, &kgem->next_request->buffers);
}
if (read_write_domain & 0x7ff)
kgem_bo_mark_dirty(kgem, bo);
delta += bo->presumed_offset;
} else {

View File

@ -536,10 +536,15 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
return bo->dirty;
}
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
static inline void kgem_bo_mark_dirty(struct kgem *kgem, struct kgem_bo *bo)
{
if (bo->dirty)
return;
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
bo->dirty = true;
bo->needs_flush = bo->dirty = true;
list_move(&bo->request, &kgem->next_request->buffers);
}
void kgem_sync(struct kgem *kgem);