sna/gen6+: Keep the bo on its current ring

Track the most recent ring each bo is executed on, and prefer to keep it
on that ring for the next operation.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2012-12-15 20:49:56 +00:00
parent 15ccb7148d
commit ac9ef1fc60
4 changed files with 26 additions and 14 deletions

View File

@ -2406,7 +2406,7 @@ static bool prefer_blt_ring(struct sna *sna)
static bool can_switch_to_blt(struct sna *sna)
{
if (sna->kgem.ring == KGEM_BLT)
if (sna->kgem.ring != KGEM_RENDER)
return true;
if (NO_RING_SWITCH)
@ -2415,8 +2415,7 @@ static bool can_switch_to_blt(struct sna *sna)
if (!sna->kgem.has_semaphores)
return false;
return (sna->kgem.mode == KGEM_NONE ||
kgem_ring_is_idle(&sna->kgem, KGEM_BLT));
return kgem_ring_is_idle(&sna->kgem, KGEM_BLT);
}
static inline bool untiled_tlb_miss(struct kgem_bo *bo)
@ -2426,6 +2425,9 @@ static inline bool untiled_tlb_miss(struct kgem_bo *bo)
static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
{
if (RQ_IS_BLT(bo->rq))
return true;
return untiled_tlb_miss(bo) && bo->pitch < MAXSHORT;
}

View File

@ -2509,7 +2509,7 @@ gen7_composite_set_target(struct sna *sna,
inline static bool can_switch_to_blt(struct sna *sna)
{
if (sna->kgem.ring == KGEM_BLT)
if (sna->kgem.ring != KGEM_RENDER)
return true;
if (NO_RING_SWITCH)
@ -2518,8 +2518,7 @@ inline static bool can_switch_to_blt(struct sna *sna)
if (!sna->kgem.has_semaphores)
return false;
return (sna->kgem.mode == KGEM_NONE ||
kgem_ring_is_idle(&sna->kgem, KGEM_BLT));
return kgem_ring_is_idle(&sna->kgem, KGEM_BLT);
}
static inline bool untiled_tlb_miss(struct kgem_bo *bo)
@ -2529,6 +2528,9 @@ static inline bool untiled_tlb_miss(struct kgem_bo *bo)
static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
{
if (RQ_IS_BLT(bo->rq))
return true;
return untiled_tlb_miss(bo) && bo->pitch < MAXSHORT;
}

View File

@ -106,6 +106,8 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
#define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
#define LOCAL_I915_PARAM_HAS_SEMAPHORES 20
#define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23
#define LOCAL_I915_PARAM_HAS_NO_RELOC 24
@ -1332,7 +1334,7 @@ kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo)
static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
{
bo->exec = kgem_add_handle(kgem, bo);
bo->rq = kgem->next_request;
bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
list_move_tail(&bo->request, &kgem->next_request->buffers);
@ -1845,7 +1847,7 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
struct kgem_bo,
request);
assert(bo->rq == rq);
assert(RQ(bo->rq) == rq);
assert(bo->exec == NULL);
assert(bo->domain == DOMAIN_GPU);
@ -2005,7 +2007,7 @@ static void kgem_commit(struct kgem *kgem)
assert(!bo->purged);
assert(bo->exec);
assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec);
assert(bo->rq == rq || (bo->proxy->rq == rq));
assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
bo->presumed_offset = bo->exec->offset;
bo->exec = NULL;
@ -2125,7 +2127,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
}
assert(bo->need_io);
assert(bo->base.rq == kgem->next_request);
assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
assert(bo->base.domain != DOMAIN_GPU);
if (bo->base.refcnt == 1 &&
@ -2452,7 +2454,7 @@ void _kgem_submit(struct kgem *kgem)
rq->bo->target_handle = kgem->has_handle_lut ? i : handle;
rq->bo->exec = &kgem->exec[i];
rq->bo->rq = rq; /* useful sanity check */
rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
list_add(&rq->bo->request, &rq->buffers);
rq->ring = kgem->ring == KGEM_BLT;
@ -4039,7 +4041,8 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
if (bo->exec == NULL) {
list_move_tail(&bo->request,
&kgem->next_request->buffers);
bo->rq = kgem->next_request;
bo->rq = MAKE_REQUEST(kgem->next_request,
kgem->ring);
bo->exec = &_kgem_dummy_exec;
}
@ -4053,7 +4056,8 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
if (bo->exec == NULL)
kgem_add_bo(kgem, bo);
assert(bo->rq == kgem->next_request);
assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
assert(RQ_RING(bo->rq) == kgem->ring);
if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
if (bo->tiling &&

View File

@ -53,6 +53,10 @@ struct kgem_bo {
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
struct kgem_request *rq;
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
struct drm_i915_gem_exec_object2 *exec;
struct kgem_bo_binding {
@ -586,7 +590,7 @@ static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
bo->needs_flush = bo->dirty = true;
list_move(&bo->request, &bo->rq->buffers);
list_move(&bo->request, &RQ(bo->rq)->buffers);
}
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)