sna: Map the upload buffer using an LLC bo
In order to avoid having to perform a copy of the cacheable buffer into GPU space, we can map a bo as cacheable and write directly to its contents. This is only a win on systems that can avoid the clflush, and also we have to go to greater measures to avoid unnecessary serialisation upon that CPU bo. Sadly, we do not yet go to enough length to avoid negatively impacting ShmPutImage, but that does not appear to be a artefact of stalling upon a CPU buffer. Note, LLC is a SandyBridge feature enabled by default in kernel 3.1 and later. In time, we should be able to expose similar support for snoopable buffers for other generations. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
6e47f28371
commit
8ef5d8c195
|
|
@ -262,7 +262,7 @@ fi
|
|||
if test "x$DEBUG" != xno; then
|
||||
AC_DEFINE(HAS_EXTRA_DEBUG,1,[Enable additional debugging])
|
||||
PKG_CHECK_MODULES(VALGRIND, [valgrind],
|
||||
AC_DEFINE([HAVE_VALGRIND], 0, [Use valgind intrinsics to suppress false warings]),)
|
||||
AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warings]),)
|
||||
fi
|
||||
if test "x$DEBUG" = xfull; then
|
||||
AC_DEFINE(HAS_DEBUG_FULL,1,[Enable all debugging])
|
||||
|
|
|
|||
|
|
@ -2041,10 +2041,9 @@ gen6_composite_set_target(struct sna *sna,
|
|||
op->dst.width = op->dst.pixmap->drawable.width;
|
||||
op->dst.height = op->dst.pixmap->drawable.height;
|
||||
op->dst.format = dst->format;
|
||||
priv = sna_pixmap(op->dst.pixmap);
|
||||
|
||||
op->dst.bo = NULL;
|
||||
#if USE_VMAP
|
||||
priv = sna_pixmap(op->dst.pixmap);
|
||||
if (priv && priv->gpu_bo == NULL &&
|
||||
I915_TILING_NONE == kgem_choose_tiling(&sna->kgem,
|
||||
I915_TILING_X,
|
||||
|
|
@ -2054,7 +2053,6 @@ gen6_composite_set_target(struct sna *sna,
|
|||
op->dst.bo = priv->cpu_bo;
|
||||
op->damage = &priv->cpu_damage;
|
||||
}
|
||||
#endif
|
||||
if (op->dst.bo == NULL) {
|
||||
priv = sna_pixmap_force_to_gpu(op->dst.pixmap);
|
||||
if (priv == NULL)
|
||||
|
|
@ -2154,7 +2152,7 @@ gen6_composite_fallback(struct sna *sna,
|
|||
|
||||
/* If anything is on the GPU, push everything out to the GPU */
|
||||
priv = sna_pixmap(dst_pixmap);
|
||||
if (priv && priv->gpu_damage) {
|
||||
if (priv && (priv->gpu_damage || (priv->cpu_bo && priv->cpu_bo->gpu))) {
|
||||
DBG(("%s: dst is already on the GPU, try to use GPU\n",
|
||||
__FUNCTION__));
|
||||
return FALSE;
|
||||
|
|
|
|||
229
src/sna/kgem.c
229
src/sna/kgem.c
|
|
@ -44,15 +44,20 @@
|
|||
#include <memcheck.h>
|
||||
#endif
|
||||
|
||||
static inline void list_move(struct list *list, struct list *head)
|
||||
static inline void _list_del(struct list *list)
|
||||
{
|
||||
__list_del(list->prev, list->next);
|
||||
}
|
||||
|
||||
static inline void list_move(struct list *list, struct list *head)
|
||||
{
|
||||
_list_del(list);
|
||||
list_add(list, head);
|
||||
}
|
||||
|
||||
static inline void list_move_tail(struct list *list, struct list *head)
|
||||
{
|
||||
__list_del(list->prev, list->next);
|
||||
_list_del(list);
|
||||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
|
|
@ -94,6 +99,7 @@ static inline void list_replace(struct list *old,
|
|||
|
||||
struct kgem_partial_bo {
|
||||
struct kgem_bo base;
|
||||
void *mem;
|
||||
uint32_t used, alloc;
|
||||
uint32_t need_io : 1;
|
||||
uint32_t write : 1;
|
||||
|
|
@ -201,8 +207,11 @@ static int gem_read(int fd, uint32_t handle, const void *dst,
|
|||
pread.size = length;
|
||||
pread.data_ptr = (uintptr_t)dst;
|
||||
ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DBG(("%s: failed, errno=%d\n", __FUNCTION__, errno));
|
||||
assert(0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
VG(VALGRIND_MAKE_MEM_DEFINED(dst, length));
|
||||
return 0;
|
||||
|
|
@ -287,8 +296,7 @@ static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
|
|||
bo->handle = handle;
|
||||
bo->size = size;
|
||||
bo->reusable = true;
|
||||
bo->cpu_read = true;
|
||||
bo->cpu_write = true;
|
||||
bo->cpu = true;
|
||||
list_init(&bo->request);
|
||||
list_init(&bo->list);
|
||||
list_init(&bo->vma);
|
||||
|
|
@ -610,6 +618,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
|
|||
struct kgem_bo_binding *b;
|
||||
|
||||
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
|
||||
assert(bo->exec == NULL);
|
||||
|
||||
b = bo->binding.next;
|
||||
while (b) {
|
||||
|
|
@ -626,13 +635,19 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
|
|||
list_del(&bo->vma);
|
||||
kgem->vma_count--;
|
||||
}
|
||||
assert(list_is_empty(&bo->vma));
|
||||
|
||||
list_del(&bo->list);
|
||||
list_del(&bo->request);
|
||||
_list_del(&bo->list);
|
||||
_list_del(&bo->request);
|
||||
gem_close(kgem->fd, bo->handle);
|
||||
free(bo);
|
||||
}
|
||||
|
||||
static bool is_mmaped_buffer(struct kgem_partial_bo *bo)
|
||||
{
|
||||
return bo->mem != bo+1;
|
||||
}
|
||||
|
||||
static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
|
||||
|
|
@ -646,11 +661,20 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
|||
goto destroy;
|
||||
|
||||
if (bo->io) {
|
||||
/* transfer the handle to a minimum bo */
|
||||
struct kgem_bo *base = malloc(sizeof(*base));
|
||||
struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo;
|
||||
struct kgem_bo *base;
|
||||
|
||||
if (is_mmaped_buffer(io))
|
||||
kgem_bo_unmap__cpu(kgem, bo, io->mem);
|
||||
|
||||
base = malloc(sizeof(*base));
|
||||
if (base) {
|
||||
DBG(("%s: transferring io handle=%d to bo\n",
|
||||
__FUNCTION__, bo->handle));
|
||||
/* transfer the handle to a minimum bo */
|
||||
memcpy(base, bo, sizeof (*base));
|
||||
base->reusable = true;
|
||||
base->io = false;
|
||||
list_init(&base->list);
|
||||
list_replace(&bo->request, &base->request);
|
||||
list_replace(&bo->vma, &base->vma);
|
||||
|
|
@ -665,7 +689,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
|||
goto destroy;
|
||||
}
|
||||
|
||||
kgem->need_expire = true;
|
||||
if (bo->rq) {
|
||||
DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
|
||||
list_move(&bo->list, active(kgem, bo->size));
|
||||
|
|
@ -691,7 +714,9 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
|||
}
|
||||
|
||||
DBG(("%s: handle=%d -> inactive\n", __FUNCTION__, bo->handle));
|
||||
assert(!kgem_busy(kgem, bo->handle));
|
||||
list_move(&bo->list, inactive(kgem, bo->size));
|
||||
kgem->need_expire = true;
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
@ -795,7 +820,7 @@ bool kgem_retire(struct kgem *kgem)
|
|||
kgem_bo_free(kgem, rq->bo);
|
||||
}
|
||||
|
||||
list_del(&rq->list);
|
||||
_list_del(&rq->list);
|
||||
free(rq);
|
||||
}
|
||||
|
||||
|
|
@ -819,8 +844,7 @@ static void kgem_commit(struct kgem *kgem)
|
|||
bo->binding.offset = 0;
|
||||
bo->exec = NULL;
|
||||
bo->dirty = false;
|
||||
bo->cpu_read = false;
|
||||
bo->cpu_write = false;
|
||||
bo->cpu = false;
|
||||
|
||||
if (!bo->refcnt && !bo->reusable) {
|
||||
kgem_bo_free(kgem, bo);
|
||||
|
|
@ -831,6 +855,8 @@ static void kgem_commit(struct kgem *kgem)
|
|||
if (rq == &_kgem_static_request) {
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
|
||||
DBG(("%s: syncing due to allocation failure\n", __FUNCTION__));
|
||||
|
||||
VG_CLEAR(set_domain);
|
||||
set_domain.handle = rq->bo->handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
|
|
@ -886,11 +912,11 @@ static void kgem_finish_partials(struct kgem *kgem)
|
|||
__FUNCTION__, bo->base.handle, bo->used, bo->alloc));
|
||||
assert(!kgem_busy(kgem, bo->base.handle));
|
||||
gem_write(kgem->fd, bo->base.handle,
|
||||
0, bo->used, bo+1);
|
||||
0, bo->used, bo->mem);
|
||||
bo->need_io = 0;
|
||||
}
|
||||
|
||||
VG(VALGRIND_MAKE_MEM_NOACCESS(bo+1, bo->alloc));
|
||||
VG(VALGRIND_MAKE_MEM_NOACCESS(bo->mem, bo->alloc));
|
||||
kgem_bo_unref(kgem, &bo->base);
|
||||
}
|
||||
}
|
||||
|
|
@ -926,7 +952,7 @@ static void kgem_cleanup(struct kgem *kgem)
|
|||
kgem_bo_free(kgem, bo);
|
||||
}
|
||||
|
||||
list_del(&rq->list);
|
||||
_list_del(&rq->list);
|
||||
free(rq);
|
||||
}
|
||||
|
||||
|
|
@ -978,8 +1004,6 @@ void kgem_reset(struct kgem *kgem)
|
|||
bo->binding.offset = 0;
|
||||
bo->exec = NULL;
|
||||
bo->dirty = false;
|
||||
bo->cpu_read = false;
|
||||
bo->cpu_write = false;
|
||||
bo->rq = NULL;
|
||||
|
||||
list_del(&bo->request);
|
||||
|
|
@ -1155,6 +1179,8 @@ void _kgem_submit(struct kgem *kgem)
|
|||
if (DEBUG_FLUSH_SYNC) {
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
|
||||
DBG(("%s: debug sync\n", __FUNCTION__));
|
||||
|
||||
VG_CLEAR(set_domain);
|
||||
set_domain.handle = handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
|
|
@ -1175,6 +1201,8 @@ void _kgem_submit(struct kgem *kgem)
|
|||
|
||||
kgem_reset(kgem);
|
||||
kgem->flush_now = 1;
|
||||
|
||||
assert(kgem->next_request != NULL);
|
||||
}
|
||||
|
||||
void kgem_throttle(struct kgem *kgem)
|
||||
|
|
@ -1291,6 +1319,8 @@ void kgem_cleanup_cache(struct kgem *kgem)
|
|||
struct kgem_request,
|
||||
list);
|
||||
|
||||
DBG(("%s: sync on cleanup\n", __FUNCTION__));
|
||||
|
||||
VG_CLEAR(set_domain);
|
||||
set_domain.handle = rq->bo->handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
|
|
@ -1669,6 +1699,14 @@ skip_active_search:
|
|||
bo->handle,
|
||||
tiling, pitch))
|
||||
goto next_bo;
|
||||
|
||||
if (bo->map) {
|
||||
munmap(CPU_MAP(bo->map), bo->size);
|
||||
bo->map = NULL;
|
||||
|
||||
list_del(&bo->vma);
|
||||
kgem->vma_count--;
|
||||
}
|
||||
}
|
||||
|
||||
bo->pitch = pitch;
|
||||
|
|
@ -1739,11 +1777,15 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
|||
kgem_bo_unref(kgem, bo->proxy);
|
||||
|
||||
assert(bo->binding.next == NULL);
|
||||
list_del(&bo->request);
|
||||
assert(bo->map == NULL);
|
||||
_list_del(&bo->request);
|
||||
free(bo);
|
||||
return;
|
||||
}
|
||||
|
||||
if (bo->vmap)
|
||||
kgem_bo_sync__cpu(kgem, bo);
|
||||
|
||||
__kgem_bo_destroy(kgem, bo);
|
||||
}
|
||||
|
||||
|
|
@ -1910,10 +1952,14 @@ static void kgem_trim_vma_cache(struct kgem *kgem)
|
|||
DBG(("%s: discarding %s vma cache for %d\n",
|
||||
__FUNCTION__, IS_CPU_MAP(old->map) ? "CPU" : "GTT",
|
||||
old->handle));
|
||||
assert(old->map);
|
||||
munmap(CPU_MAP(old->map), old->size);
|
||||
old->map = NULL;
|
||||
list_del(&old->vma);
|
||||
kgem->vma_count--;
|
||||
|
||||
if (!old->gpu && old->refcnt == 0)
|
||||
kgem_bo_free(kgem, old);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1957,15 +2003,22 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
|
|||
if (bo->needs_flush | bo->gpu) {
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
|
||||
DBG(("%s: sync: needs_flush? %d, gpu? %d\n", __FUNCTION__,
|
||||
bo->needs_flush, bo->gpu));
|
||||
|
||||
/* XXX use PROT_READ to avoid the write flush? */
|
||||
|
||||
VG_CLEAR(set_domain);
|
||||
set_domain.handle = bo->handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
set_domain.write_domain = prot & PROT_WRITE ? I915_GEM_DOMAIN_GTT : 0;
|
||||
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
|
||||
drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
|
||||
bo->needs_flush = false;
|
||||
if (bo->gpu)
|
||||
if (bo->gpu) {
|
||||
kgem->sync = false;
|
||||
kgem_retire(kgem);
|
||||
}
|
||||
}
|
||||
|
||||
list_move_tail(&bo->vma, &kgem->vma_cache);
|
||||
|
|
@ -1986,6 +2039,7 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
|
|||
list_del(&bo->vma);
|
||||
kgem->vma_count--;
|
||||
bo->map = NULL;
|
||||
VG(VALGRIND_MALLOCLIKE_BLOCK(ptr, bo->size, 0, 1));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
|
@ -2009,17 +2063,20 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bo->size));
|
||||
VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, bo->size, 0, 1));
|
||||
return (void *)(uintptr_t)mmap_arg.addr_ptr;
|
||||
}
|
||||
|
||||
void kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr)
|
||||
{
|
||||
assert(bo->map == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
bo->map = MAKE_CPU_MAP(ptr);
|
||||
list_move(&bo->vma, &kgem->vma_cache);
|
||||
kgem->vma_count++;
|
||||
|
||||
VG(VALGRIND_FREELIKE_BLOCK(ptr, 0));
|
||||
}
|
||||
|
||||
void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo)
|
||||
|
|
@ -2029,6 +2086,7 @@ void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo)
|
|||
|
||||
DBG(("%s: (debug) releasing vma for handle=%d, count=%d\n",
|
||||
__FUNCTION__, bo->handle, kgem->vma_count-1));
|
||||
assert(!IS_CPU_MAP(bo->map));
|
||||
|
||||
munmap(CPU_MAP(bo->map), bo->size);
|
||||
bo->map = NULL;
|
||||
|
|
@ -2057,8 +2115,10 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
|
|||
|
||||
/* The bo is outside of our control, so presume it is written to */
|
||||
bo->needs_flush = true;
|
||||
bo->gpu = true;
|
||||
bo->cpu_read = bo->cpu_write = false;
|
||||
|
||||
/* Henceforth, we need to broadcast all updates to clients and
|
||||
* flush our rendering before doing so.
|
||||
*/
|
||||
bo->flush = 1;
|
||||
if (bo->exec)
|
||||
kgem->flush = 1;
|
||||
|
|
@ -2132,33 +2192,37 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
|
|||
}
|
||||
#endif
|
||||
|
||||
void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write)
|
||||
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
|
||||
kgem_bo_submit(kgem, bo);
|
||||
if (for_write ? bo->cpu_write : bo->cpu_read)
|
||||
return;
|
||||
|
||||
VG_CLEAR(set_domain);
|
||||
set_domain.handle = bo->handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
set_domain.write_domain = for_write ? I915_GEM_DOMAIN_CPU : 0;
|
||||
/* XXX assumes bo is snoopable */
|
||||
if (!bo->cpu) {
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
|
||||
drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
assert(!kgem_busy(kgem, bo->handle));
|
||||
bo->needs_flush = false;
|
||||
if (bo->gpu) {
|
||||
kgem->sync = false;
|
||||
kgem_retire(kgem);
|
||||
DBG(("%s: sync: needs_flush? %d, gpu? %d, busy? %d\n", __FUNCTION__,
|
||||
bo->needs_flush, bo->gpu, kgem_busy(kgem, bo->handle)));
|
||||
|
||||
VG_CLEAR(set_domain);
|
||||
set_domain.handle = bo->handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
set_domain.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
|
||||
drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
assert(!kgem_busy(kgem, bo->handle));
|
||||
bo->needs_flush = false;
|
||||
if (bo->gpu) {
|
||||
kgem->sync = false;
|
||||
kgem_retire(kgem);
|
||||
}
|
||||
bo->cpu = true;
|
||||
}
|
||||
bo->cpu_read = true;
|
||||
if (for_write)
|
||||
bo->cpu_write = true;
|
||||
}
|
||||
|
||||
void kgem_sync(struct kgem *kgem)
|
||||
{
|
||||
DBG(("%s\n", __FUNCTION__));
|
||||
|
||||
if (!list_is_empty(&kgem->requests)) {
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
struct kgem_request *rq;
|
||||
|
|
@ -2336,12 +2400,61 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
|||
__kgem_bo_init(&bo->base, handle, alloc);
|
||||
bo->base.vmap = true;
|
||||
bo->need_io = 0;
|
||||
bo->mem = bo + 1;
|
||||
goto init;
|
||||
} else
|
||||
free(bo);
|
||||
}
|
||||
|
||||
{
|
||||
if (!DEBUG_NO_LLC && kgem->gen >= 60) {
|
||||
struct kgem_bo *old;
|
||||
|
||||
bo = malloc(sizeof(*bo));
|
||||
if (bo == NULL)
|
||||
return NULL;
|
||||
|
||||
/* Be a little more generous and hope to hold fewer mmappings */
|
||||
alloc = ALIGN(size, 128*1024);
|
||||
|
||||
old = NULL;
|
||||
if (!write)
|
||||
old = search_linear_cache(kgem, alloc, true);
|
||||
if (old == NULL)
|
||||
old = search_linear_cache(kgem, alloc, false);
|
||||
if (old) {
|
||||
DBG(("%s: reusing handle=%d for buffer\n",
|
||||
__FUNCTION__, old->handle));
|
||||
|
||||
memcpy(&bo->base, old, sizeof(*old));
|
||||
if (old->rq)
|
||||
list_replace(&old->request, &bo->base.request);
|
||||
else
|
||||
list_init(&bo->base.request);
|
||||
list_replace(&old->vma, &bo->base.vma);
|
||||
free(old);
|
||||
bo->base.refcnt = 1;
|
||||
} else {
|
||||
if (!__kgem_bo_init(&bo->base,
|
||||
gem_create(kgem->fd, alloc),
|
||||
alloc)) {
|
||||
free(bo);
|
||||
return NULL;
|
||||
}
|
||||
DBG(("%s: created handle=%d for buffer\n",
|
||||
__FUNCTION__, bo->base.handle));
|
||||
}
|
||||
|
||||
bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
|
||||
if (bo->mem == NULL) {
|
||||
kgem_bo_free(kgem, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo->need_io = false;
|
||||
bo->base.io = true;
|
||||
|
||||
alloc = bo->base.size;
|
||||
} else {
|
||||
struct kgem_bo *old;
|
||||
|
||||
old = NULL;
|
||||
|
|
@ -2355,14 +2468,16 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
|||
if (bo == NULL)
|
||||
return NULL;
|
||||
|
||||
bo->mem = bo + 1;
|
||||
bo->need_io = write;
|
||||
|
||||
memcpy(&bo->base, old, sizeof(*old));
|
||||
if (old->rq)
|
||||
list_replace(&old->request,
|
||||
&bo->base.request);
|
||||
else
|
||||
list_init(&bo->base.request);
|
||||
list_replace(&old->vma,
|
||||
&bo->base.vma);
|
||||
list_replace(&old->vma, &bo->base.vma);
|
||||
free(old);
|
||||
bo->base.refcnt = 1;
|
||||
} else {
|
||||
|
|
@ -2376,9 +2491,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
|||
free(bo);
|
||||
return NULL;
|
||||
}
|
||||
bo->mem = bo + 1;
|
||||
bo->need_io = write;
|
||||
}
|
||||
bo->need_io = write;
|
||||
bo->base.io = write;
|
||||
bo->base.io = true;
|
||||
}
|
||||
init:
|
||||
bo->base.reusable = false;
|
||||
|
|
@ -2409,13 +2525,11 @@ done:
|
|||
struct kgem_partial_bo,
|
||||
base.list);
|
||||
}
|
||||
if (p != first) {
|
||||
__list_del(bo->base.list.prev, bo->base.list.next);
|
||||
list_add_tail(&bo->base.list, &p->base.list);
|
||||
}
|
||||
if (p != first)
|
||||
list_move_tail(&bo->base.list, &p->base.list);
|
||||
assert(validate_partials(kgem));
|
||||
}
|
||||
*ret = (char *)(bo+1) + offset;
|
||||
*ret = (char *)bo->mem + offset;
|
||||
return kgem_create_proxy(&bo->base, offset, size);
|
||||
}
|
||||
|
||||
|
|
@ -2511,24 +2625,29 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
|
|||
struct kgem_partial_bo *bo;
|
||||
uint32_t offset = _bo->delta, length = _bo->size;
|
||||
|
||||
assert(_bo->exec == NULL);
|
||||
if (_bo->proxy)
|
||||
_bo = _bo->proxy;
|
||||
assert(_bo->exec == NULL);
|
||||
|
||||
bo = (struct kgem_partial_bo *)_bo;
|
||||
|
||||
DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
|
||||
offset, length, bo->base.vmap));
|
||||
|
||||
if (!bo->base.vmap) {
|
||||
if (!bo->base.vmap && !is_mmaped_buffer(bo)) {
|
||||
gem_read(kgem->fd,
|
||||
bo->base.handle, (char *)(bo+1)+offset,
|
||||
offset, length);
|
||||
assert(!kgem_busy(kgem, bo->base.handle));
|
||||
bo->base.needs_flush = false;
|
||||
if (bo->base.gpu)
|
||||
if (bo->base.gpu) {
|
||||
kgem->sync = false;
|
||||
kgem_retire(kgem);
|
||||
}
|
||||
assert(bo->base.gpu == false);
|
||||
} else
|
||||
kgem_bo_sync(kgem, &bo->base, false);
|
||||
kgem_bo_sync__cpu(kgem, &bo->base);
|
||||
}
|
||||
|
||||
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
|
||||
|
|
|
|||
|
|
@ -71,9 +71,8 @@ struct kgem_bo {
|
|||
uint32_t reusable : 1;
|
||||
uint32_t dirty : 1;
|
||||
uint32_t gpu : 1;
|
||||
uint32_t cpu : 1;
|
||||
uint32_t needs_flush : 1;
|
||||
uint32_t cpu_read : 1;
|
||||
uint32_t cpu_write : 1;
|
||||
uint32_t vmap : 1;
|
||||
uint32_t io : 1;
|
||||
uint32_t flush : 1;
|
||||
|
|
@ -320,6 +319,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
|
|||
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot);
|
||||
void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
|
||||
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
|
||||
|
||||
|
|
@ -352,7 +352,6 @@ static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
|
|||
bo->dirty = true;
|
||||
}
|
||||
|
||||
void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write);
|
||||
void kgem_sync(struct kgem *kgem);
|
||||
|
||||
#define KGEM_BUFFER_WRITE 0x1
|
||||
|
|
|
|||
|
|
@ -94,6 +94,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#define DEBUG_NO_RENDER 0
|
||||
#define DEBUG_NO_BLT 0
|
||||
#define DEBUG_NO_IO 0
|
||||
#define DEBUG_NO_LLC 0
|
||||
|
||||
#define DEBUG_FLUSH_CACHE 0
|
||||
#define DEBUG_FLUSH_BATCH 0
|
||||
|
|
@ -141,7 +142,6 @@ struct sna_pixmap {
|
|||
#define SOURCE_BIAS 4
|
||||
uint16_t source_count;
|
||||
uint8_t pinned :1;
|
||||
uint8_t gpu_only :1;
|
||||
uint8_t flush :1;
|
||||
uint8_t gpu :1;
|
||||
uint8_t freed :1;
|
||||
|
|
@ -428,19 +428,21 @@ PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
|
|||
struct sna_pixmap *sna_pixmap_move_to_gpu(PixmapPtr pixmap);
|
||||
struct sna_pixmap *sna_pixmap_force_to_gpu(PixmapPtr pixmap);
|
||||
|
||||
bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, bool write);
|
||||
#define MOVE_WRITE 0x1
|
||||
#define MOVE_READ 0x2
|
||||
bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
|
||||
bool must_check sna_drawable_move_region_to_cpu(DrawablePtr drawable,
|
||||
RegionPtr region,
|
||||
Bool write);
|
||||
unsigned flags);
|
||||
|
||||
static inline bool must_check
|
||||
sna_drawable_move_to_cpu(DrawablePtr drawable, bool write)
|
||||
sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags)
|
||||
{
|
||||
RegionRec region;
|
||||
|
||||
pixman_region_init_rect(®ion,
|
||||
0, 0, drawable->width, drawable->height);
|
||||
return sna_drawable_move_region_to_cpu(drawable, ®ion, write);
|
||||
return sna_drawable_move_region_to_cpu(drawable, ®ion, flags);
|
||||
}
|
||||
|
||||
static inline bool must_check
|
||||
|
|
|
|||
|
|
@ -61,7 +61,6 @@
|
|||
#define USE_SPANS 0
|
||||
#define USE_ZERO_SPANS 1
|
||||
#define USE_BO_FOR_SCRATCH_PIXMAP 1
|
||||
#define USE_LLC_CPU_BO 1
|
||||
|
||||
static int sna_font_key;
|
||||
|
||||
|
|
@ -183,7 +182,10 @@ sna_pixmap_alloc_cpu(struct sna *sna,
|
|||
PixmapPtr pixmap,
|
||||
struct sna_pixmap *priv)
|
||||
{
|
||||
if (USE_LLC_CPU_BO && sna->kgem.gen >= 60) {
|
||||
assert(priv->ptr == NULL);
|
||||
assert(pixmap->devKind);
|
||||
|
||||
if (!DEBUG_NO_LLC && sna->kgem.gen >= 60) {
|
||||
DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
|
||||
pixmap->drawable.width, pixmap->drawable.height));
|
||||
|
||||
|
|
@ -225,7 +227,9 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
|
|||
priv->cpu_bo = NULL;
|
||||
} else
|
||||
free(priv->ptr);
|
||||
|
||||
priv->pixmap->devPrivate.ptr = priv->ptr = NULL;
|
||||
list_del(&priv->list);
|
||||
}
|
||||
|
||||
static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
|
||||
|
|
@ -246,11 +250,10 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
|
|||
sna_pixmap_free_cpu(sna, priv);
|
||||
|
||||
if (priv->cpu_bo) {
|
||||
if (kgem_bo_is_busy(priv->cpu_bo)) {
|
||||
if (priv->cpu_bo->vmap && kgem_bo_is_busy(priv->cpu_bo)) {
|
||||
list_add_tail(&priv->list, &sna->deferred_free);
|
||||
return false;
|
||||
}
|
||||
kgem_bo_sync(&sna->kgem, priv->cpu_bo, true);
|
||||
kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
|
||||
}
|
||||
|
||||
|
|
@ -474,7 +477,6 @@ sna_pixmap_create_scratch(ScreenPtr screen,
|
|||
return NullPixmap;
|
||||
}
|
||||
|
||||
priv->gpu_only = 1;
|
||||
priv->freed = 1;
|
||||
sna_damage_all(&priv->gpu_damage, width, height);
|
||||
|
||||
|
|
@ -576,12 +578,12 @@ static inline void list_move(struct list *list, struct list *head)
|
|||
}
|
||||
|
||||
bool
|
||||
sna_pixmap_move_to_cpu(PixmapPtr pixmap, bool write)
|
||||
sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
|
||||
{
|
||||
struct sna *sna = to_sna_from_pixmap(pixmap);
|
||||
struct sna_pixmap *priv;
|
||||
|
||||
DBG(("%s(pixmap=%p, write=%d)\n", __FUNCTION__, pixmap, write));
|
||||
DBG(("%s(pixmap=%p, flags=%x)\n", __FUNCTION__, pixmap, flags));
|
||||
|
||||
priv = sna_pixmap(pixmap);
|
||||
if (priv == NULL) {
|
||||
|
|
@ -589,17 +591,27 @@ sna_pixmap_move_to_cpu(PixmapPtr pixmap, bool write)
|
|||
return true;
|
||||
}
|
||||
|
||||
DBG(("%s: gpu_bo=%p, gpu_damage=%p, gpu_only=%d\n",
|
||||
__FUNCTION__, priv->gpu_bo, priv->gpu_damage, priv->gpu_only));
|
||||
DBG(("%s: gpu_bo=%p, gpu_damage=%p\n",
|
||||
__FUNCTION__, priv->gpu_bo, priv->gpu_damage));
|
||||
|
||||
if (pixmap->devPrivate.ptr == NULL) {
|
||||
assert(priv->ptr == NULL);
|
||||
assert(pixmap->devKind);
|
||||
assert(priv->cpu_damage == NULL);
|
||||
if (!sna_pixmap_alloc_cpu(sna, pixmap, priv))
|
||||
return false;
|
||||
if ((flags & MOVE_READ) == 0) {
|
||||
if (priv->cpu_bo && priv->cpu_bo->gpu) {
|
||||
if (priv->cpu_bo->exec == NULL)
|
||||
kgem_retire(&sna->kgem);
|
||||
|
||||
if (priv->cpu_bo->gpu) {
|
||||
DBG(("%s: discarding busy CPU bo\n", __FUNCTION__));
|
||||
sna_pixmap_free_cpu(sna, priv);
|
||||
}
|
||||
}
|
||||
|
||||
sna_damage_destroy(&priv->gpu_damage);
|
||||
}
|
||||
|
||||
if (pixmap->devPrivate.ptr == NULL &&
|
||||
!sna_pixmap_alloc_cpu(sna, pixmap, priv))
|
||||
return false;
|
||||
|
||||
if (priv->gpu_bo == NULL) {
|
||||
DBG(("%s: no GPU bo\n", __FUNCTION__));
|
||||
goto done;
|
||||
|
|
@ -638,10 +650,10 @@ sna_pixmap_move_to_cpu(PixmapPtr pixmap, bool write)
|
|||
done:
|
||||
if (priv->cpu_bo) {
|
||||
DBG(("%s: syncing CPU bo\n", __FUNCTION__));
|
||||
kgem_bo_sync(&sna->kgem, priv->cpu_bo, write);
|
||||
kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
|
||||
}
|
||||
|
||||
if (write) {
|
||||
if (flags & MOVE_WRITE) {
|
||||
DBG(("%s: marking as damaged\n", __FUNCTION__));
|
||||
sna_damage_all(&priv->cpu_damage,
|
||||
pixmap->drawable.width,
|
||||
|
|
@ -670,22 +682,27 @@ region_subsumes_drawable(RegionPtr region, DrawablePtr drawable)
|
|||
extents->y2 >= drawable->height;
|
||||
}
|
||||
|
||||
static bool sync_will_stall(struct kgem_bo *bo)
|
||||
{
|
||||
return bo->gpu | bo->needs_flush;
|
||||
}
|
||||
|
||||
bool
|
||||
sna_drawable_move_region_to_cpu(DrawablePtr drawable,
|
||||
RegionPtr region,
|
||||
Bool write)
|
||||
unsigned flags)
|
||||
{
|
||||
PixmapPtr pixmap = get_drawable_pixmap(drawable);
|
||||
struct sna *sna = to_sna_from_pixmap(pixmap);
|
||||
struct sna_pixmap *priv;
|
||||
int16_t dx, dy;
|
||||
|
||||
DBG(("%s(pixmap=%p (%dx%d), [(%d, %d), (%d, %d)], write=%d)\n",
|
||||
DBG(("%s(pixmap=%p (%dx%d), [(%d, %d), (%d, %d)], flags=%d)\n",
|
||||
__FUNCTION__, pixmap,
|
||||
pixmap->drawable.width, pixmap->drawable.height,
|
||||
RegionExtents(region)->x1, RegionExtents(region)->y1,
|
||||
RegionExtents(region)->x2, RegionExtents(region)->y2,
|
||||
write));
|
||||
flags));
|
||||
|
||||
priv = sna_pixmap(pixmap);
|
||||
if (priv == NULL) {
|
||||
|
|
@ -702,17 +719,25 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
|
|||
DBG(("%s: region subsumes drawable\n", __FUNCTION__));
|
||||
if (dx | dy)
|
||||
RegionTranslate(region, -dx, -dy);
|
||||
return sna_pixmap_move_to_cpu(pixmap, write);
|
||||
return sna_pixmap_move_to_cpu(pixmap, flags);
|
||||
}
|
||||
|
||||
if (pixmap->devPrivate.ptr == NULL) {
|
||||
assert(priv->ptr == NULL);
|
||||
assert(pixmap->devKind);
|
||||
assert(priv->cpu_damage == NULL);
|
||||
if (!sna_pixmap_alloc_cpu(sna, pixmap, priv))
|
||||
return false;
|
||||
if ((flags & MOVE_READ) == 0 && priv->cpu_bo && !priv->cpu_bo->vmap) {
|
||||
if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
|
||||
kgem_retire(&sna->kgem);
|
||||
if (sync_will_stall(priv->cpu_bo)) {
|
||||
sna_damage_subtract(&priv->cpu_damage, region);
|
||||
if (!sna_pixmap_move_to_gpu(pixmap))
|
||||
return false;
|
||||
|
||||
sna_pixmap_free_cpu(sna, priv);
|
||||
}
|
||||
}
|
||||
|
||||
if (pixmap->devPrivate.ptr == NULL &&
|
||||
!sna_pixmap_alloc_cpu(sna, pixmap, priv))
|
||||
return false;
|
||||
|
||||
if (priv->gpu_bo == NULL)
|
||||
goto done;
|
||||
|
||||
|
|
@ -723,9 +748,12 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
|
|||
region->extents.x2 - region->extents.x1,
|
||||
region->extents.y2 - region->extents.y1));
|
||||
|
||||
if (!write &&
|
||||
region->extents.x2 - region->extents.x1 == 1 &&
|
||||
region->extents.y2 - region->extents.y1 == 1) {
|
||||
if ((flags & MOVE_READ) == 0) {
|
||||
assert(flags == MOVE_WRITE);
|
||||
sna_damage_subtract(&priv->gpu_damage, region);
|
||||
} else if ((flags & MOVE_WRITE) == 0 &&
|
||||
region->extents.x2 - region->extents.x1 == 1 &&
|
||||
region->extents.y2 - region->extents.y1 == 1) {
|
||||
/* Often associated with synchronisation, KISS */
|
||||
sna_read_boxes(sna,
|
||||
priv->gpu_bo, 0, 0,
|
||||
|
|
@ -796,10 +824,10 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
|
|||
done:
|
||||
if (priv->cpu_bo) {
|
||||
DBG(("%s: syncing cpu bo\n", __FUNCTION__));
|
||||
kgem_bo_sync(&sna->kgem, priv->cpu_bo, write);
|
||||
kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
|
||||
}
|
||||
|
||||
if (write) {
|
||||
if (flags & MOVE_WRITE) {
|
||||
DBG(("%s: applying cpu damage\n", __FUNCTION__));
|
||||
assert_pixmap_contains_box(pixmap, RegionExtents(region));
|
||||
sna_damage_add(&priv->cpu_damage, region);
|
||||
|
|
@ -881,7 +909,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
|
|||
done:
|
||||
if (priv->cpu_damage == NULL)
|
||||
list_del(&priv->list);
|
||||
if (!priv->gpu_only && !priv->pinned)
|
||||
if (!priv->pinned)
|
||||
list_move(&priv->inactive, &sna->active_pixmaps);
|
||||
}
|
||||
|
||||
|
|
@ -924,13 +952,11 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable,
|
|||
|
||||
sna_pixmap_move_area_to_gpu(pixmap, &extents);
|
||||
done:
|
||||
if (damage) {
|
||||
if (sna_damage_contains_box(priv->gpu_damage,
|
||||
&extents) != PIXMAN_REGION_IN)
|
||||
*damage = &priv->gpu_damage;
|
||||
else
|
||||
*damage = NULL;
|
||||
}
|
||||
if (sna_damage_contains_box(priv->gpu_damage,
|
||||
&extents) != PIXMAN_REGION_IN)
|
||||
*damage = &priv->gpu_damage;
|
||||
else
|
||||
*damage = NULL;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -973,17 +999,15 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable,
|
|||
goto done;
|
||||
|
||||
if (sna_damage_contains_box(priv->gpu_damage,
|
||||
&extents) != PIXMAN_REGION_OUT)
|
||||
&extents) != PIXMAN_REGION_OUT)
|
||||
return FALSE;
|
||||
|
||||
done:
|
||||
if (damage) {
|
||||
if (sna_damage_contains_box(priv->cpu_damage,
|
||||
&extents) != PIXMAN_REGION_IN)
|
||||
*damage = &priv->cpu_damage;
|
||||
else
|
||||
*damage = NULL;
|
||||
}
|
||||
if (sna_damage_contains_box(priv->cpu_damage,
|
||||
&extents) != PIXMAN_REGION_IN)
|
||||
*damage = &priv->cpu_damage;
|
||||
else
|
||||
*damage = NULL;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -1053,7 +1077,6 @@ sna_pixmap_create_upload(ScreenPtr screen,
|
|||
priv->cpu_bo = NULL;
|
||||
priv->cpu_damage = priv->gpu_damage = NULL;
|
||||
priv->ptr = NULL;
|
||||
priv->gpu_only = 0;
|
||||
priv->pinned = 0;
|
||||
priv->freed = 1;
|
||||
list_init(&priv->list);
|
||||
|
|
@ -1195,7 +1218,7 @@ done:
|
|||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
list_del(&priv->list);
|
||||
if (!priv->gpu_only && !priv->pinned)
|
||||
if (!priv->pinned)
|
||||
list_move(&priv->inactive, &sna->active_pixmaps);
|
||||
priv->gpu = true;
|
||||
return priv;
|
||||
|
|
@ -1209,7 +1232,7 @@ static bool must_check sna_validate_pixmap(DrawablePtr draw, PixmapPtr pixmap)
|
|||
FbEvenTile(pixmap->drawable.width *
|
||||
pixmap->drawable.bitsPerPixel)) {
|
||||
DBG(("%s: flushing pixmap\n", __FUNCTION__));
|
||||
ret = sna_pixmap_move_to_cpu(pixmap, true);
|
||||
ret = sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
@ -1242,7 +1265,7 @@ static bool must_check sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
|
|||
|
||||
if (changes & GCStipple && gc->stipple) {
|
||||
DBG(("%s: flushing stipple pixmap\n", __FUNCTION__));
|
||||
if (!sna_pixmap_move_to_cpu(gc->stipple, false))
|
||||
if (!sna_pixmap_move_to_cpu(gc->stipple, MOVE_READ))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -1255,10 +1278,10 @@ static bool must_check sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
|
|||
|
||||
switch (gc->fillStyle) {
|
||||
case FillTiled:
|
||||
return sna_drawable_move_to_cpu(&gc->tile.pixmap->drawable, false);
|
||||
return sna_drawable_move_to_cpu(&gc->tile.pixmap->drawable, MOVE_READ);
|
||||
case FillStippled:
|
||||
case FillOpaqueStippled:
|
||||
return sna_drawable_move_to_cpu(&gc->stipple->drawable, false);
|
||||
return sna_drawable_move_to_cpu(&gc->stipple->drawable, MOVE_READ);
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1431,7 +1454,6 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
|
|||
pixmap, src_bo, -x, -y,
|
||||
pixmap, priv->gpu_bo, 0, 0,
|
||||
box, nbox);
|
||||
kgem_bo_sync(&sna->kgem, src_bo, true);
|
||||
kgem_bo_destroy(&sna->kgem, src_bo);
|
||||
}
|
||||
|
||||
|
|
@ -1462,6 +1484,8 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
|
|||
int16_t dx, dy;
|
||||
int n;
|
||||
|
||||
assert_pixmap_contains_box(pixmap, RegionExtents(region));
|
||||
|
||||
if (gc->alu != GXcopy)
|
||||
return false;
|
||||
|
||||
|
|
@ -1486,7 +1510,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
|
|||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
} else {
|
||||
assert_pixmap_contains_box(pixmap, RegionExtents(region));
|
||||
sna_damage_subtract(&priv->cpu_damage, region);
|
||||
sna_damage_add(&priv->gpu_damage, region);
|
||||
}
|
||||
|
|
@ -1494,8 +1517,46 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (priv->cpu_bo)
|
||||
kgem_bo_sync(&sna->kgem, priv->cpu_bo, true);
|
||||
if (priv->cpu_bo) {
|
||||
/* If the GPU is currently accessing the CPU pixmap, then
|
||||
* we will need to wait for that to finish before we can
|
||||
* modify the memory.
|
||||
*
|
||||
* However, we can queue some writes to the GPU bo to avoid
|
||||
* the wait. Or we can try to replace the CPU bo.
|
||||
*/
|
||||
if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
|
||||
kgem_retire(&sna->kgem);
|
||||
if (sync_will_stall(priv->cpu_bo)) {
|
||||
if (priv->cpu_bo->vmap) {
|
||||
if (sna_put_image_upload_blt(drawable, gc, region,
|
||||
x, y, w, h, bits, stride)) {
|
||||
if (region_subsumes_drawable(region, &pixmap->drawable)) {
|
||||
sna_damage_destroy(&priv->cpu_damage);
|
||||
sna_damage_all(&priv->gpu_damage,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
} else {
|
||||
sna_damage_subtract(&priv->cpu_damage, region);
|
||||
sna_damage_add(&priv->gpu_damage, region);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (!region_subsumes_drawable(region, &pixmap->drawable)) {
|
||||
sna_damage_subtract(&priv->cpu_damage, region);
|
||||
if (!sna_pixmap_move_to_gpu(pixmap))
|
||||
return false;
|
||||
}
|
||||
|
||||
sna_pixmap_free_cpu(sna, priv);
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->cpu_bo)
|
||||
kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
|
||||
}
|
||||
|
||||
if (pixmap->devPrivate.ptr == NULL &&
|
||||
!sna_pixmap_alloc_cpu(sna, pixmap, priv))
|
||||
|
|
@ -1508,7 +1569,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
|
|||
pixmap->drawable.height);
|
||||
sna_pixmap_destroy_gpu_bo(sna, priv);
|
||||
} else {
|
||||
assert_pixmap_contains_box(pixmap, RegionExtents(region));
|
||||
sna_damage_subtract(&priv->gpu_damage, region);
|
||||
sna_damage_add(&priv->cpu_damage, region);
|
||||
if (priv->gpu_bo &&
|
||||
|
|
@ -1909,7 +1969,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
|
||||
|
|
@ -1986,7 +2047,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
|||
|
||||
fallback:
|
||||
DBG(("%s: fallback", __FUNCTION__));
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, true))
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
|
||||
return;
|
||||
|
||||
stride = pixmap->devKind;
|
||||
|
|
@ -2033,6 +2094,22 @@ fallback:
|
|||
}
|
||||
}
|
||||
|
||||
static bool copy_use_gpu_bo(struct sna *sna,
|
||||
struct sna_pixmap *priv)
|
||||
{
|
||||
if (!priv->cpu_bo)
|
||||
return false;
|
||||
|
||||
if (priv->cpu_bo->gpu) {
|
||||
if (priv->cpu_bo->exec)
|
||||
return true;
|
||||
|
||||
kgem_retire(&sna->kgem);
|
||||
}
|
||||
|
||||
return priv->cpu_bo->gpu;
|
||||
}
|
||||
|
||||
static void
|
||||
sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
||||
BoxPtr box, int n,
|
||||
|
|
@ -2097,7 +2174,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
|||
|
||||
/* Try to maintain the data on the GPU */
|
||||
if (dst_priv && dst_priv->gpu_bo == NULL &&
|
||||
src_priv && src_priv->gpu_bo != NULL) {
|
||||
src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && src_priv->cpu_bo->gpu))) {
|
||||
uint32_t tiling =
|
||||
sna_pixmap_choose_tiling(dst_pixmap,
|
||||
src_priv->gpu_bo->tiling);
|
||||
|
|
@ -2118,10 +2195,9 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
|||
}
|
||||
|
||||
if (dst_priv && dst_priv->gpu_bo) {
|
||||
if (!src_priv && !dst_priv->gpu_only) {
|
||||
DBG(("%s: fallback - src_priv=%p but dst gpu_only=%d\n",
|
||||
__FUNCTION__,
|
||||
src_priv, dst_priv->gpu_only));
|
||||
if (!src_priv && !copy_use_gpu_bo(sna, dst_priv)) {
|
||||
DBG(("%s: fallback - src_priv=%p and not use dst gpu bo\n",
|
||||
__FUNCTION__, src_priv));
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
|
|
@ -2143,6 +2219,31 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
|||
goto fallback;
|
||||
}
|
||||
|
||||
if (replaces) {
|
||||
sna_damage_destroy(&dst_priv->cpu_damage);
|
||||
sna_damage_all(&dst_priv->gpu_damage,
|
||||
dst_pixmap->drawable.width,
|
||||
dst_pixmap->drawable.height);
|
||||
} else {
|
||||
RegionTranslate(®ion, dst_dx, dst_dy);
|
||||
assert_pixmap_contains_box(dst_pixmap,
|
||||
RegionExtents(®ion));
|
||||
sna_damage_add(&dst_priv->gpu_damage, ®ion);
|
||||
if (alu == GXcopy)
|
||||
sna_damage_subtract(&dst_priv->cpu_damage,
|
||||
®ion);
|
||||
RegionTranslate(®ion, -dst_dx, -dst_dy);
|
||||
}
|
||||
} else if (src_priv && src_priv->cpu_bo) {
|
||||
if (!sna->render.copy_boxes(sna, alu,
|
||||
src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
|
||||
dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
|
||||
box, n)) {
|
||||
DBG(("%s: fallback - accelerated copy boxes failed\n",
|
||||
__FUNCTION__));
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
if (replaces) {
|
||||
sna_damage_destroy(&dst_priv->cpu_damage);
|
||||
sna_damage_all(&dst_priv->gpu_damage,
|
||||
|
|
@ -2218,7 +2319,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
|||
if (src_priv) {
|
||||
RegionTranslate(®ion, src_dx, src_dy);
|
||||
if (!sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
|
||||
®ion, false))
|
||||
®ion, MOVE_READ))
|
||||
goto out;
|
||||
RegionTranslate(®ion, -src_dx, -src_dy);
|
||||
}
|
||||
|
|
@ -2271,39 +2372,24 @@ fallback:
|
|||
if (src_priv) {
|
||||
RegionTranslate(®ion, src_dx, src_dy);
|
||||
if (!sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
|
||||
®ion, false))
|
||||
®ion, MOVE_READ))
|
||||
goto out;
|
||||
RegionTranslate(®ion, -src_dx, -src_dy);
|
||||
}
|
||||
|
||||
RegionTranslate(®ion, dst_dx, dst_dy);
|
||||
if (dst_priv) {
|
||||
if (alu == GXcopy) {
|
||||
if (replaces) {
|
||||
sna_damage_all(&dst_priv->cpu_damage,
|
||||
dst_pixmap->drawable.width,
|
||||
dst_pixmap->drawable.height);
|
||||
sna_pixmap_destroy_gpu_bo(sna, dst_priv);
|
||||
} else {
|
||||
assert_pixmap_contains_box(dst_pixmap,
|
||||
RegionExtents(®ion));
|
||||
sna_damage_subtract(&dst_priv->gpu_damage,
|
||||
®ion);
|
||||
sna_damage_add(&dst_priv->cpu_damage,
|
||||
®ion);
|
||||
if (dst_priv->flush)
|
||||
list_move(&dst_priv->list,
|
||||
&sna->dirty_pixmaps);
|
||||
}
|
||||
unsigned mode;
|
||||
|
||||
if (dst_pixmap->devPrivate.ptr == NULL &&
|
||||
!sna_pixmap_alloc_cpu(sna, dst_pixmap, dst_priv))
|
||||
goto out;
|
||||
} else {
|
||||
if (!sna_drawable_move_region_to_cpu(&dst_pixmap->drawable,
|
||||
®ion, true))
|
||||
goto out;
|
||||
}
|
||||
assert_pixmap_contains_box(dst_pixmap,
|
||||
RegionExtents(®ion));
|
||||
|
||||
mode = MOVE_WRITE;
|
||||
if (alu != GXcopy)
|
||||
mode |= MOVE_READ;
|
||||
if (!sna_drawable_move_region_to_cpu(&dst_pixmap->drawable,
|
||||
®ion, mode))
|
||||
goto out;
|
||||
}
|
||||
|
||||
dst_stride = dst_pixmap->devKind;
|
||||
|
|
@ -2404,13 +2490,13 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
|
|||
if (!sna_gc_move_to_cpu(gc, dst))
|
||||
goto out;
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(dst, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(dst, ®ion, MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
RegionTranslate(®ion,
|
||||
src_x - dst_x - dst->x + src->x,
|
||||
src_y - dst_y - dst->y + src->y);
|
||||
if (!sna_drawable_move_region_to_cpu(src, ®ion, false))
|
||||
if (!sna_drawable_move_region_to_cpu(src, ®ion, MOVE_READ))
|
||||
goto out;
|
||||
|
||||
ret = fbCopyArea(src, dst, gc,
|
||||
|
|
@ -2930,7 +3016,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
fbFillSpans(drawable, gc, n, pt, width, sorted);
|
||||
|
|
@ -2958,7 +3045,8 @@ sna_set_spans(DrawablePtr drawable, GCPtr gc, char *src,
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
fbSetSpans(drawable, gc, src, pt, width, n, sorted);
|
||||
|
|
@ -3142,7 +3230,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
|
|||
if (n == 0)
|
||||
return;
|
||||
|
||||
if (!sna_pixmap_move_to_cpu(src_pixmap, false))
|
||||
if (!sna_pixmap_move_to_cpu(src_pixmap, MOVE_READ))
|
||||
return;
|
||||
get_drawable_deltas(source, src_pixmap, &dx, &dy);
|
||||
sx += dx;
|
||||
|
|
@ -3381,13 +3469,14 @@ fallback:
|
|||
if (!sna_gc_move_to_cpu(gc, dst))
|
||||
goto out;
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(dst, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(dst, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
RegionTranslate(®ion,
|
||||
src_x - dst_x - dst->x + src->x,
|
||||
src_y - dst_y - dst->y + src->y);
|
||||
if (!sna_drawable_move_region_to_cpu(src, ®ion, false))
|
||||
if (!sna_drawable_move_region_to_cpu(src, ®ion, MOVE_READ))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fbCopyPlane(%d, %d, %d, %d, %d,%d) %x\n",
|
||||
|
|
@ -3576,7 +3665,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fbPolyPoint\n", __FUNCTION__));
|
||||
|
|
@ -4432,7 +4522,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fbPolyLine\n", __FUNCTION__));
|
||||
|
|
@ -5104,6 +5195,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
|
|||
{
|
||||
PixmapPtr pixmap = get_drawable_pixmap(drawable);
|
||||
struct sna *sna = to_sna_from_pixmap(pixmap);
|
||||
struct sna_damage **damage;
|
||||
RegionRec region;
|
||||
unsigned flags;
|
||||
|
||||
|
|
@ -5141,7 +5233,6 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
|
|||
goto spans_fallback;
|
||||
if (gc->fillStyle == FillSolid) {
|
||||
struct sna_pixmap *priv = sna_pixmap(pixmap);
|
||||
struct sna_damage **damage;
|
||||
|
||||
DBG(("%s: trying blt solid fill [%08lx] paths\n",
|
||||
__FUNCTION__, gc->fgPixel));
|
||||
|
|
@ -5176,7 +5267,6 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
|
|||
}
|
||||
} else if (flags & 4) {
|
||||
struct sna_pixmap *priv = sna_pixmap(pixmap);
|
||||
struct sna_damage **damage;
|
||||
|
||||
/* Try converting these to a set of rectangles instead */
|
||||
if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage)) {
|
||||
|
|
@ -5241,7 +5331,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
|
|||
/* XXX Do we really want to base this decision on the amalgam ? */
|
||||
spans_fallback:
|
||||
if (USE_SPANS &&
|
||||
sna_drawable_use_gpu_bo(drawable, ®ion.extents, NULL)) {
|
||||
sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage)) {
|
||||
void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
|
||||
int i;
|
||||
|
||||
|
|
@ -5281,7 +5371,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fbPolySegment\n", __FUNCTION__));
|
||||
|
|
@ -5830,7 +5921,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fbPolyRectangle\n", __FUNCTION__));
|
||||
|
|
@ -5954,7 +6046,8 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
/* XXX may still fallthrough to miZeroPolyArc */
|
||||
|
|
@ -6141,7 +6234,7 @@ static uint32_t
|
|||
get_pixel(PixmapPtr pixmap)
|
||||
{
|
||||
DBG(("%s\n", __FUNCTION__));
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, false))
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
|
||||
return 0;
|
||||
|
||||
switch (pixmap->drawable.bitsPerPixel) {
|
||||
|
|
@ -7015,7 +7108,7 @@ sna_poly_fill_rect_stippled_blt(DrawablePtr drawable,
|
|||
bo = sna_pixmap(pixmap)->gpu_bo;
|
||||
}
|
||||
|
||||
if (!sna_drawable_move_to_cpu(&stipple->drawable, false))
|
||||
if (!sna_drawable_move_to_cpu(&stipple->drawable, MOVE_READ))
|
||||
return false;
|
||||
|
||||
DBG(("%s: origin (%d, %d), extents (stipple): (%d, %d), stipple size %dx%d\n",
|
||||
|
|
@ -7184,7 +7277,12 @@ fallback:
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, draw))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(draw, ®ion, true))
|
||||
|
||||
flags = MOVE_WRITE;
|
||||
if (gc->fillStyle == FillStippled ||
|
||||
!(gc->alu == GXcopy || gc->alu == GXclear || gc->alu == GXset))
|
||||
flags |= MOVE_READ;
|
||||
if (!sna_drawable_move_region_to_cpu(draw, ®ion, flags))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fallback - fbPolyFillRect\n", __FUNCTION__));
|
||||
|
|
@ -7519,7 +7617,7 @@ static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
|
|||
w = (w + 7) >> 3;
|
||||
|
||||
out->metrics = in->metrics;
|
||||
out->bits = malloc(w*h);
|
||||
out->bits = malloc((w*h + 7) & ~7);
|
||||
if (out->bits == NULL)
|
||||
return false;
|
||||
|
||||
|
|
@ -7630,7 +7728,8 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
|
|||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
|
||||
|
|
@ -7703,7 +7802,8 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
|
||||
|
|
@ -7776,7 +7876,8 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
|
||||
|
|
@ -7842,7 +7943,8 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
|
|||
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if(!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if(!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
|
||||
|
|
@ -8151,7 +8253,8 @@ fallback:
|
|||
DBG(("%s: fallback\n", __FUNCTION__));
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
|
||||
fbPolyGlyphBlt(drawable, gc, x, y, n, info, base);
|
||||
|
|
@ -8324,9 +8427,10 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
|
|||
DBG(("%s: fallback\n", __FUNCTION__));
|
||||
if (!sna_gc_move_to_cpu(gc, drawable))
|
||||
goto out;
|
||||
if (!sna_pixmap_move_to_cpu(bitmap, false))
|
||||
if (!sna_pixmap_move_to_cpu(bitmap, MOVE_READ))
|
||||
goto out;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto out;
|
||||
|
||||
DBG(("%s: fallback, fbPushPixels(%d, %d, %d %d)\n",
|
||||
|
|
@ -8408,7 +8512,7 @@ sna_get_image(DrawablePtr drawable,
|
|||
region.extents.y2 = region.extents.y1 + h;
|
||||
region.data = NULL;
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, false))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, MOVE_READ))
|
||||
return;
|
||||
|
||||
fbGetImage(drawable, x, y, w, h, format, mask, dst);
|
||||
|
|
@ -8424,7 +8528,7 @@ sna_get_spans(DrawablePtr drawable, int wMax,
|
|||
return;
|
||||
|
||||
region.data = NULL;
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, false))
|
||||
if (!sna_drawable_move_region_to_cpu(drawable, ®ion, MOVE_READ))
|
||||
return;
|
||||
|
||||
fbGetSpans(drawable, wMax, pt, width, n, start);
|
||||
|
|
@ -8442,7 +8546,7 @@ sna_copy_window(WindowPtr win, DDXPointRec origin, RegionPtr src)
|
|||
|
||||
if (wedged(sna)) {
|
||||
DBG(("%s: fallback -- wedged\n", __FUNCTION__));
|
||||
if (sna_pixmap_move_to_cpu(pixmap, true))
|
||||
if (sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
|
||||
fbCopyWindow(win, origin, src);
|
||||
return;
|
||||
}
|
||||
|
|
@ -8734,6 +8838,56 @@ static void sna_accel_expire(struct sna *sna)
|
|||
_sna_accel_disarm_timer(sna, EXPIRE_TIMER);
|
||||
}
|
||||
|
||||
static bool
|
||||
sna_pixmap_free_gpu(struct sna *sna, struct sna_pixmap *priv)
|
||||
{
|
||||
PixmapPtr pixmap = priv->pixmap;
|
||||
|
||||
assert (!priv->flush);
|
||||
|
||||
if (pixmap->devPrivate.ptr == NULL &&
|
||||
!sna_pixmap_alloc_cpu(sna, pixmap, priv))
|
||||
return false;
|
||||
|
||||
if (priv->gpu_damage) {
|
||||
BoxPtr box;
|
||||
int n;
|
||||
|
||||
DBG(("%s: flushing GPU damage\n", __FUNCTION__));
|
||||
|
||||
n = sna_damage_get_boxes(priv->gpu_damage, &box);
|
||||
if (n) {
|
||||
struct kgem_bo *dst_bo;
|
||||
Bool ok = FALSE;
|
||||
|
||||
dst_bo = NULL;
|
||||
if (sna->kgem.gen >= 30)
|
||||
dst_bo = pixmap_vmap(&sna->kgem, pixmap);
|
||||
if (dst_bo)
|
||||
ok = sna->render.copy_boxes(sna, GXcopy,
|
||||
pixmap, priv->gpu_bo, 0, 0,
|
||||
pixmap, dst_bo, 0, 0,
|
||||
box, n);
|
||||
if (!ok)
|
||||
sna_read_boxes(sna,
|
||||
priv->gpu_bo, 0, 0,
|
||||
pixmap, 0, 0,
|
||||
box, n);
|
||||
}
|
||||
|
||||
__sna_damage_destroy(priv->gpu_damage);
|
||||
priv->gpu_damage = NULL;
|
||||
}
|
||||
|
||||
sna_damage_all(&priv->cpu_damage,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
sna_pixmap_destroy_gpu_bo(sna, priv);
|
||||
|
||||
priv->gpu = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void sna_accel_inactive(struct sna *sna)
|
||||
{
|
||||
struct sna_pixmap *priv, *next;
|
||||
|
|
@ -8792,7 +8946,7 @@ static void sna_accel_inactive(struct sna *sna)
|
|||
if (!priv->pinned) {
|
||||
DBG(("%s: discarding inactive GPU bo handle=%d\n",
|
||||
__FUNCTION__, priv->gpu_bo->handle));
|
||||
if (!sna_pixmap_move_to_cpu(priv->pixmap, true))
|
||||
if (!sna_pixmap_free_gpu(sna, priv))
|
||||
list_add(&priv->inactive, &preserve);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -559,7 +559,7 @@ get_pixel(PicturePtr picture)
|
|||
|
||||
DBG(("%s: %p\n", __FUNCTION__, pixmap));
|
||||
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, false))
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
|
||||
return 0;
|
||||
|
||||
switch (pixmap->drawable.bitsPerPixel) {
|
||||
|
|
@ -981,11 +981,8 @@ static void blt_vmap_done(struct sna *sna, const struct sna_composite_op *op)
|
|||
struct kgem_bo *bo = (struct kgem_bo *)op->u.blt.src_pixmap;
|
||||
|
||||
blt_done(sna, op);
|
||||
if (bo) {
|
||||
struct kgem *kgem = &sna->kgem;
|
||||
kgem_bo_sync(kgem, bo, true);
|
||||
kgem_bo_destroy(kgem, bo);
|
||||
}
|
||||
if (bo)
|
||||
kgem_bo_destroy(&sna->kgem, bo);
|
||||
}
|
||||
|
||||
fastcall static void
|
||||
|
|
@ -1113,11 +1110,9 @@ prepare_blt_put(struct sna *sna,
|
|||
DBG(("%s\n", __FUNCTION__));
|
||||
|
||||
if (priv) {
|
||||
if (!priv->gpu_only) {
|
||||
src_bo = priv->cpu_bo;
|
||||
if (!src_bo)
|
||||
src_bo = pixmap_vmap(&sna->kgem, src);
|
||||
}
|
||||
src_bo = priv->cpu_bo;
|
||||
if (!src_bo)
|
||||
src_bo = pixmap_vmap(&sna->kgem, src);
|
||||
} else {
|
||||
src_bo = kgem_create_map(&sna->kgem,
|
||||
src->devPrivate.ptr,
|
||||
|
|
@ -1140,7 +1135,7 @@ prepare_blt_put(struct sna *sna,
|
|||
GXcopy))
|
||||
return FALSE;
|
||||
} else {
|
||||
if (!sna_pixmap_move_to_cpu(src, false))
|
||||
if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
|
||||
return FALSE;
|
||||
|
||||
op->blt = blt_put_composite;
|
||||
|
|
@ -1185,8 +1180,6 @@ has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
|
|||
return TRUE;
|
||||
if (!priv->gpu_bo)
|
||||
return TRUE;
|
||||
if (priv->gpu_only)
|
||||
return FALSE;
|
||||
|
||||
if (priv->gpu_damage == NULL)
|
||||
return TRUE;
|
||||
|
|
|
|||
|
|
@ -401,6 +401,7 @@ sna_composite(CARD8 op,
|
|||
{
|
||||
struct sna *sna = to_sna_from_drawable(dst->pDrawable);
|
||||
struct sna_composite_op tmp;
|
||||
unsigned flags;
|
||||
RegionRec region;
|
||||
int dx, dy;
|
||||
|
||||
|
|
@ -486,24 +487,34 @@ fallback:
|
|||
dst_x, dst_y,
|
||||
dst->pDrawable->x, dst->pDrawable->y,
|
||||
width, height));
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion, true))
|
||||
if (op == PictOpSrc || op == PictOpClear)
|
||||
flags = MOVE_WRITE;
|
||||
else
|
||||
flags = MOVE_WRITE | MOVE_READ;
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion, flags))
|
||||
goto out;
|
||||
if (dst->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true))
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
|
||||
MOVE_WRITE | MOVE_READ))
|
||||
goto out;
|
||||
if (src->pDrawable) {
|
||||
if (!sna_drawable_move_to_cpu(src->pDrawable, false))
|
||||
if (!sna_drawable_move_to_cpu(src->pDrawable,
|
||||
MOVE_READ))
|
||||
goto out;
|
||||
|
||||
if (src->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(src->alphaMap->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
|
||||
MOVE_READ))
|
||||
goto out;
|
||||
}
|
||||
if (mask && mask->pDrawable) {
|
||||
if (!sna_drawable_move_to_cpu(mask->pDrawable, false))
|
||||
if (!sna_drawable_move_to_cpu(mask->pDrawable,
|
||||
MOVE_READ))
|
||||
goto out;
|
||||
|
||||
if (mask->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(mask->alphaMap->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(mask->alphaMap->pDrawable,
|
||||
MOVE_READ))
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
@ -708,7 +719,7 @@ sna_composite_rectangles(CARD8 op,
|
|||
*/
|
||||
if (op == PictOpSrc || op == PictOpClear) {
|
||||
priv = sna_pixmap_attach(pixmap);
|
||||
if (priv && !priv->gpu_only)
|
||||
if (priv)
|
||||
sna_damage_subtract(&priv->cpu_damage, ®ion);
|
||||
}
|
||||
|
||||
|
|
@ -730,19 +741,22 @@ sna_composite_rectangles(CARD8 op,
|
|||
goto fallback;
|
||||
}
|
||||
|
||||
if (!priv->gpu_only) {
|
||||
assert_pixmap_contains_box(pixmap, RegionExtents(®ion));
|
||||
sna_damage_add(&priv->gpu_damage, ®ion);
|
||||
}
|
||||
assert_pixmap_contains_box(pixmap, RegionExtents(®ion));
|
||||
sna_damage_add(&priv->gpu_damage, ®ion);
|
||||
|
||||
goto done;
|
||||
|
||||
fallback:
|
||||
DBG(("%s: fallback\n", __FUNCTION__));
|
||||
if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, ®ion, true))
|
||||
if (op <= PictOpSrc)
|
||||
error = MOVE_WRITE;
|
||||
else
|
||||
error = MOVE_WRITE | MOVE_READ;
|
||||
if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, ®ion, error))
|
||||
goto done;
|
||||
|
||||
if (dst->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true))
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, error))
|
||||
goto done;
|
||||
|
||||
if (op == PictOpSrc || op == PictOpClear) {
|
||||
|
|
|
|||
|
|
@ -1704,6 +1704,7 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
|
|||
|
||||
if (old_fb_id)
|
||||
drmModeRmFB(sna->kgem.fd, old_fb_id);
|
||||
sna_pixmap_get_bo(old_front)->needs_flush = true;
|
||||
scrn->pScreen->DestroyPixmap(old_front);
|
||||
|
||||
return TRUE;
|
||||
|
|
@ -1834,7 +1835,6 @@ sna_page_flip(struct sna *sna,
|
|||
count = do_page_flip(sna, data, ref_crtc_hw_id);
|
||||
DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
|
||||
if (count) {
|
||||
bo->cpu_read = bo->cpu_write = false;
|
||||
bo->gpu = true;
|
||||
|
||||
/* Although the kernel performs an implicit flush upon
|
||||
|
|
|
|||
|
|
@ -319,9 +319,6 @@ static void damage(PixmapPtr pixmap, RegionPtr region)
|
|||
struct sna_pixmap *priv;
|
||||
|
||||
priv = sna_pixmap(pixmap);
|
||||
if (priv->gpu_only)
|
||||
return;
|
||||
|
||||
if (region == NULL) {
|
||||
damage_all:
|
||||
sna_damage_all(&priv->gpu_damage,
|
||||
|
|
@ -346,12 +343,11 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
|
|||
struct sna_pixmap *priv;
|
||||
|
||||
priv = sna_pixmap(pixmap);
|
||||
if (!priv->gpu_only) {
|
||||
sna_damage_all(&priv->gpu_damage,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
sna_damage_destroy(&priv->cpu_damage);
|
||||
}
|
||||
sna_damage_all(&priv->gpu_damage,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
sna_damage_destroy(&priv->cpu_damage);
|
||||
|
||||
assert(priv->gpu_bo->refcnt > 1);
|
||||
priv->gpu_bo->refcnt--;
|
||||
priv->gpu_bo = ref(bo);
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@
|
|||
#define NO_GLYPH_CACHE 0
|
||||
#define NO_GLYPHS_TO_DST 0
|
||||
#define NO_GLYPHS_VIA_MASK 0
|
||||
#define NO_SMALL_MASK 0
|
||||
#define NO_GLYPHS_SLOW 0
|
||||
|
||||
#define CACHE_PICTURE_SIZE 1024
|
||||
|
|
@ -642,7 +643,6 @@ static bool
|
|||
clear_pixmap(struct sna *sna, PixmapPtr pixmap)
|
||||
{
|
||||
struct sna_pixmap *priv = sna_pixmap(pixmap);
|
||||
assert(priv->gpu_only);
|
||||
return sna->render.fill_one(sna, pixmap, priv->gpu_bo, 0,
|
||||
0, 0,
|
||||
pixmap->drawable.width,
|
||||
|
|
@ -711,7 +711,8 @@ glyphs_via_mask(struct sna *sna,
|
|||
}
|
||||
|
||||
component_alpha = NeedsComponent(format->format);
|
||||
if ((uint32_t)width * height * format->depth < 8 * 4096) {
|
||||
if (!NO_SMALL_MASK &&
|
||||
(uint32_t)width * height * format->depth < 8 * 4096) {
|
||||
pixman_image_t *mask_image;
|
||||
int s;
|
||||
|
||||
|
|
@ -1023,16 +1024,22 @@ glyphs_fallback(CARD8 op,
|
|||
if (!RegionNotEmpty(®ion))
|
||||
return;
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
return;
|
||||
if (dst->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true))
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
return;
|
||||
|
||||
if (src->pDrawable) {
|
||||
if (!sna_drawable_move_to_cpu(src->pDrawable, false))
|
||||
if (!sna_drawable_move_to_cpu(src->pDrawable,
|
||||
MOVE_READ))
|
||||
return;
|
||||
|
||||
if (src->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(src->alphaMap->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
|
||||
MOVE_READ))
|
||||
return;
|
||||
}
|
||||
RegionTranslate(®ion, -dst->pDrawable->x, -dst->pDrawable->y);
|
||||
|
|
|
|||
|
|
@ -347,9 +347,6 @@ _texture_is_cpu(PixmapPtr pixmap, const BoxRec *box)
|
|||
if (priv == NULL)
|
||||
return TRUE;
|
||||
|
||||
if (priv->gpu_only)
|
||||
return FALSE;
|
||||
|
||||
if (priv->gpu_bo == NULL)
|
||||
return TRUE;
|
||||
|
||||
|
|
@ -955,7 +952,7 @@ sna_render_picture_fixup(struct sna *sna,
|
|||
}
|
||||
|
||||
if (picture->pDrawable &&
|
||||
!sna_drawable_move_to_cpu(picture->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(picture->pDrawable, MOVE_READ))
|
||||
return 0;
|
||||
|
||||
channel->bo = kgem_create_buffer(&sna->kgem,
|
||||
|
|
@ -1099,7 +1096,7 @@ sna_render_picture_convert(struct sna *sna,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, false))
|
||||
if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
|
||||
return 0;
|
||||
|
||||
src = pixman_image_create_bits(picture->format,
|
||||
|
|
|
|||
|
|
@ -73,6 +73,13 @@ is_gpu(DrawablePtr drawable)
|
|||
return priv && priv->gpu_damage;
|
||||
}
|
||||
|
||||
static inline Bool
|
||||
is_busy_cpu(DrawablePtr drawable)
|
||||
{
|
||||
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
|
||||
return priv && priv->cpu_bo && priv->cpu_bo->gpu;
|
||||
}
|
||||
|
||||
static inline Bool
|
||||
is_cpu(DrawablePtr drawable)
|
||||
{
|
||||
|
|
@ -91,7 +98,7 @@ static inline Bool
|
|||
too_small(DrawablePtr drawable)
|
||||
{
|
||||
return ((uint32_t)drawable->width * drawable->height * drawable->bitsPerPixel <= 8*4096) &&
|
||||
!is_dirty_gpu(drawable);
|
||||
!(is_dirty_gpu(drawable) || is_busy_cpu(drawable));
|
||||
}
|
||||
|
||||
static inline Bool
|
||||
|
|
@ -99,7 +106,7 @@ picture_is_gpu(PicturePtr picture)
|
|||
{
|
||||
if (!picture || !picture->pDrawable)
|
||||
return FALSE;
|
||||
return is_gpu(picture->pDrawable);
|
||||
return is_gpu(picture->pDrawable) || is_busy_cpu(picture->pDrawable);
|
||||
}
|
||||
|
||||
static inline Bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
|
||||
|
|
|
|||
|
|
@ -199,17 +199,38 @@ sna_tiling_composite_done(struct sna *sna,
|
|||
}
|
||||
tmp.done(sna, &tmp);
|
||||
} else {
|
||||
unsigned int flags;
|
||||
DBG(("%s -- falback\n", __FUNCTION__));
|
||||
|
||||
if (!sna_drawable_move_to_cpu(tile->dst->pDrawable, true))
|
||||
if (tile->op <= PictOpSrc)
|
||||
flags = MOVE_WRITE;
|
||||
else
|
||||
flags = MOVE_WRITE | MOVE_READ;
|
||||
if (!sna_drawable_move_to_cpu(tile->dst->pDrawable,
|
||||
flags))
|
||||
goto done;
|
||||
if (tile->dst->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(tile->dst->alphaMap->pDrawable,
|
||||
flags))
|
||||
goto done;
|
||||
|
||||
if (tile->src->pDrawable &&
|
||||
!sna_drawable_move_to_cpu(tile->src->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(tile->src->pDrawable,
|
||||
MOVE_READ))
|
||||
goto done;
|
||||
if (tile->src->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(tile->src->alphaMap->pDrawable,
|
||||
MOVE_READ))
|
||||
goto done;
|
||||
|
||||
if (tile->mask && tile->mask->pDrawable &&
|
||||
!sna_drawable_move_to_cpu(tile->mask->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(tile->mask->pDrawable,
|
||||
MOVE_READ))
|
||||
goto done;
|
||||
|
||||
if (tile->mask && tile->mask->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(tile->mask->alphaMap->pDrawable,
|
||||
MOVE_READ))
|
||||
goto done;
|
||||
|
||||
fbComposite(tile->op,
|
||||
|
|
|
|||
|
|
@ -3176,7 +3176,8 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
|
|||
}
|
||||
|
||||
region.data = NULL;
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
return true;
|
||||
|
||||
pixmap = get_drawable_pixmap(dst->pDrawable);
|
||||
|
|
@ -3313,16 +3314,20 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
|
|||
region.extents.y2 = dst_y + extents.y2;
|
||||
region.data = NULL;
|
||||
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion, true))
|
||||
if (!sna_drawable_move_region_to_cpu(dst->pDrawable, ®ion,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto done;
|
||||
if (dst->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true))
|
||||
!sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
goto done;
|
||||
if (src->pDrawable) {
|
||||
if (!sna_drawable_move_to_cpu(src->pDrawable, false))
|
||||
if (!sna_drawable_move_to_cpu(src->pDrawable,
|
||||
MOVE_READ))
|
||||
goto done;
|
||||
if (src->alphaMap &&
|
||||
!sna_drawable_move_to_cpu(src->alphaMap->pDrawable, false))
|
||||
!sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
|
||||
MOVE_READ))
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
@ -3661,20 +3666,18 @@ skip:
|
|||
static void mark_damaged(PixmapPtr pixmap, struct sna_pixmap *priv,
|
||||
BoxPtr box, int16_t x, int16_t y)
|
||||
{
|
||||
if (!priv->gpu_only) {
|
||||
box->x1 += x; box->x2 += x;
|
||||
box->y1 += y; box->y2 += y;
|
||||
if (box->x1 <= 0 && box->y1 <= 0 &&
|
||||
box->x2 >= pixmap->drawable.width &&
|
||||
box->y2 >= pixmap->drawable.height) {
|
||||
sna_damage_destroy(&priv->cpu_damage);
|
||||
sna_damage_all(&priv->gpu_damage,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
} else {
|
||||
sna_damage_add_box(&priv->gpu_damage, box);
|
||||
sna_damage_subtract_box(&priv->cpu_damage, box);
|
||||
}
|
||||
box->x1 += x; box->x2 += x;
|
||||
box->y1 += y; box->y2 += y;
|
||||
if (box->x1 <= 0 && box->y1 <= 0 &&
|
||||
box->x2 >= pixmap->drawable.width &&
|
||||
box->y2 >= pixmap->drawable.height) {
|
||||
sna_damage_destroy(&priv->cpu_damage);
|
||||
sna_damage_all(&priv->gpu_damage,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
} else {
|
||||
sna_damage_add_box(&priv->gpu_damage, box);
|
||||
sna_damage_subtract_box(&priv->cpu_damage, box);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3887,7 +3890,8 @@ sna_add_traps(PicturePtr picture, INT16 x, INT16 y, int n, xTrap *t)
|
|||
}
|
||||
|
||||
DBG(("%s -- fallback\n", __FUNCTION__));
|
||||
if (sna_drawable_move_to_cpu(picture->pDrawable, true))
|
||||
if (sna_drawable_move_to_cpu(picture->pDrawable,
|
||||
MOVE_READ | MOVE_WRITE))
|
||||
fbAddTraps(picture, x, y, n, t);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue