sna: Always clear the mmapped domains when reusing partial upload buffers

As we need to make sure that we do invalidate the caches appropriately
on reuse. Mildly paranoid, but strictly required by the spec.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2012-04-20 13:21:40 +01:00
parent cb6a3dc2ed
commit aff3614efd
2 changed files with 31 additions and 11 deletions

View File

@ -3414,6 +3414,29 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
}
}
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
{
assert(bo->proxy == NULL);
kgem_bo_submit(kgem, bo);
if (bo->domain != DOMAIN_GTT) {
struct drm_i915_gem_set_domain set_domain;
DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
bo->needs_flush, bo->domain, kgem_busy(kgem, bo->handle)));
VG_CLEAR(set_domain);
set_domain.handle = bo->handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
kgem_bo_retire(kgem, bo);
bo->domain = DOMAIN_GTT;
}
}
}
void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
{
assert(!bo->reusable);
@ -3424,7 +3447,6 @@ void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
void kgem_sync(struct kgem *kgem)
{
struct drm_i915_gem_set_domain set_domain;
struct kgem_request *rq;
struct kgem_bo *bo;
@ -3437,14 +3459,7 @@ void kgem_sync(struct kgem *kgem)
if (rq == kgem->next_request)
_kgem_submit(kgem);
VG_CLEAR(set_domain);
set_domain.handle = rq->bo->handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
kgem_retire(kgem);
kgem_bo_sync__gtt(kgem, rq->bo);
list_for_each_entry(bo, &kgem->sync_list, list)
kgem_bo_sync__cpu(kgem, bo);
@ -3599,8 +3614,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
bo->used = size;
list_move(&bo->base.list, &kgem->active_partials);
if (bo->base.vmap)
kgem_bo_sync__cpu(kgem, &bo->base);
if (bo->mmapped) {
if (IS_CPU_MAP(bo->base.map))
kgem_bo_sync__cpu(kgem, &bo->base);
else
kgem_bo_sync__gtt(kgem, &bo->base);
}
goto done;
} while (kgem_retire(kgem));

View File

@ -365,6 +365,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);