sna: Only shrink a partial buffer if it is no longer used.
The condition on being able to shrink a buffer is more severe than just whether we are reading from the buffer, but also we cannot swap the handles if the existing handle remains exposed via a proxy. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
d3169154d1
commit
978e1aecea
|
|
@ -112,7 +112,6 @@ struct kgem_partial_bo {
|
|||
uint32_t used;
|
||||
uint32_t need_io : 1;
|
||||
uint32_t write : 1;
|
||||
uint32_t shrink : 1;
|
||||
uint32_t mmapped : 1;
|
||||
};
|
||||
|
||||
|
|
@ -294,6 +293,8 @@ static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
|
|||
kgem_retire(kgem);
|
||||
|
||||
if (bo->exec == NULL) {
|
||||
DBG(("%s: retiring bo handle=%d (needed flush? %d)\n",
|
||||
__FUNCTION__, bo->handle, bo->needs_flush));
|
||||
bo->rq = NULL;
|
||||
list_del(&bo->request);
|
||||
bo->needs_flush = bo->flush;
|
||||
|
|
@ -1079,8 +1080,9 @@ static void kgem_commit(struct kgem *kgem)
|
|||
struct kgem_bo *bo, *next;
|
||||
|
||||
list_for_each_entry_safe(bo, next, &rq->buffers, request) {
|
||||
DBG(("%s: release handle=%d (proxy? %d)\n",
|
||||
__FUNCTION__, bo->handle, bo->proxy != NULL));
|
||||
DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d\n",
|
||||
__FUNCTION__, bo->handle, bo->proxy != NULL,
|
||||
bo->dirty, bo->needs_flush));
|
||||
|
||||
assert(!bo->purged);
|
||||
assert(bo->proxy || bo->rq == rq);
|
||||
|
|
@ -1193,7 +1195,7 @@ static void kgem_finish_partials(struct kgem *kgem)
|
|||
}
|
||||
|
||||
assert(bo->base.rq == kgem->next_request);
|
||||
if (bo->shrink && bo->used < bo->base.size / 2) {
|
||||
if (bo->base.refcnt == 1 && bo->used < bo->base.size / 2) {
|
||||
struct kgem_bo *shrink;
|
||||
|
||||
shrink = search_linear_cache(kgem,
|
||||
|
|
@ -1227,9 +1229,12 @@ static void kgem_finish_partials(struct kgem *kgem)
|
|||
list_replace(&bo->base.request,
|
||||
&shrink->request);
|
||||
list_init(&bo->base.request);
|
||||
shrink->needs_flush = bo->base.dirty;
|
||||
|
||||
bo->base.exec = NULL;
|
||||
bo->base.rq = NULL;
|
||||
bo->base.dirty = false;
|
||||
bo->base.needs_flush = false;
|
||||
bo->used = 0;
|
||||
|
||||
bubble_sort_partial(kgem, bo);
|
||||
|
|
@ -2497,8 +2502,11 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
|
|||
kgem->reloc[index].target_handle = bo->handle;
|
||||
kgem->reloc[index].presumed_offset = bo->presumed_offset;
|
||||
|
||||
if (read_write_domain & 0x7fff)
|
||||
if (read_write_domain & 0x7fff) {
|
||||
DBG(("%s: marking handle=%d dirty\n",
|
||||
__FUNCTION__, bo->handle));
|
||||
bo->needs_flush = bo->dirty = true;
|
||||
}
|
||||
|
||||
delta += bo->presumed_offset;
|
||||
} else {
|
||||
|
|
@ -2898,13 +2906,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
|||
DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
|
||||
__FUNCTION__, size, bo->used, bo->base.size));
|
||||
offset = 0;
|
||||
bo->shrink = 0;
|
||||
goto done;
|
||||
} else if (bo->used + size <= bo->base.size) {
|
||||
DBG(("%s: reusing unfinished write buffer for read of %d bytes? used=%d, total=%d\n",
|
||||
__FUNCTION__, size, bo->used, bo->base.size));
|
||||
offset = bo->used;
|
||||
bo->shrink = 0;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
|
@ -3046,7 +3052,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
|||
|
||||
bo->used = size;
|
||||
bo->write = write;
|
||||
bo->shrink = bo->need_io;
|
||||
offset = 0;
|
||||
|
||||
list_add(&bo->base.list, &kgem->partial);
|
||||
|
|
|
|||
Loading…
Reference in New Issue