sna: Prevent mapping through the GTT for large bo
If the bo is larger than a quarter of the aperture, it is unlikely that we will be able to evict enough contiguous space in the GATT to accommodate that buffer. So don't attempt to map them and use the indirect access instead. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
7c81bcd0c4
commit
d35b6955db
|
|
@ -394,13 +394,19 @@ static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
|
|||
__FUNCTION__, bo->handle,
|
||||
bo->domain, bo->presumed_offset, bo->size));
|
||||
|
||||
if (!kgem_bo_is_mappable(kgem, bo))
|
||||
return true;
|
||||
|
||||
if (kgem->wedged)
|
||||
return false;
|
||||
|
||||
if (kgem_bo_is_busy(bo))
|
||||
return true;
|
||||
|
||||
if (bo->presumed_offset == 0)
|
||||
return !list_is_empty(&kgem->requests);
|
||||
|
||||
return !kgem_bo_is_mappable(kgem, bo);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
|
||||
|
|
|
|||
|
|
@ -797,7 +797,7 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
|
|||
sna_pixmap_choose_tiling(pixmap),
|
||||
CREATE_GTT_MAP | CREATE_INACTIVE);
|
||||
|
||||
return priv->gpu_bo != NULL;
|
||||
return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
@ -835,7 +835,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
|
|||
priv->gpu_bo->exec == NULL)
|
||||
kgem_retire(&sna->kgem);
|
||||
|
||||
if (kgem_bo_is_busy(priv->gpu_bo)) {
|
||||
if (kgem_bo_map_will_stall(&sna->kgem,
|
||||
priv->gpu_bo)) {
|
||||
if (priv->pinned)
|
||||
goto skip_inplace_map;
|
||||
|
||||
|
|
@ -897,7 +898,7 @@ skip_inplace_map:
|
|||
|
||||
if (flags & MOVE_INPLACE_HINT &&
|
||||
priv->stride && priv->gpu_bo &&
|
||||
!kgem_bo_is_busy(priv->gpu_bo) &&
|
||||
!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
|
||||
pixmap_inplace(sna, pixmap, priv) &&
|
||||
sna_pixmap_move_to_gpu(pixmap, flags)) {
|
||||
assert(flags & MOVE_WRITE);
|
||||
|
|
@ -1250,7 +1251,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
|
|||
|
||||
if (flags & MOVE_INPLACE_HINT &&
|
||||
priv->stride && priv->gpu_bo &&
|
||||
!kgem_bo_is_busy(priv->gpu_bo) &&
|
||||
!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
|
||||
region_inplace(sna, pixmap, region, priv) &&
|
||||
sna_pixmap_move_area_to_gpu(pixmap, ®ion->extents, flags)) {
|
||||
assert(flags & MOVE_WRITE);
|
||||
|
|
|
|||
|
|
@ -122,8 +122,7 @@ void sna_read_boxes(struct sna *sna,
|
|||
* this path.
|
||||
*/
|
||||
|
||||
if (DEBUG_NO_IO || kgem->wedged ||
|
||||
!kgem_bo_map_will_stall(kgem, src_bo) ||
|
||||
if (!kgem_bo_map_will_stall(kgem, src_bo) ||
|
||||
src_bo->tiling == I915_TILING_NONE) {
|
||||
fallback:
|
||||
read_boxes_inplace(kgem,
|
||||
|
|
@ -386,10 +385,7 @@ static bool upload_inplace(struct kgem *kgem,
|
|||
int n, int bpp)
|
||||
{
|
||||
if (DEBUG_NO_IO)
|
||||
return true;
|
||||
|
||||
if (unlikely(kgem->wedged))
|
||||
return true;
|
||||
return kgem_bo_is_mappable(kgem, bo);
|
||||
|
||||
/* If we are writing through the GTT, check first if we might be
|
||||
* able to almagamate a series of small writes into a single
|
||||
|
|
@ -993,14 +989,27 @@ struct kgem_bo *sna_replace(struct sna *sna,
|
|||
kgem_bo_write(kgem, bo, src,
|
||||
(pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8);
|
||||
} else {
|
||||
dst = kgem_bo_map(kgem, bo);
|
||||
if (dst) {
|
||||
memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel,
|
||||
stride, bo->pitch,
|
||||
0, 0,
|
||||
0, 0,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
if (kgem_bo_is_mappable(kgem, bo)) {
|
||||
dst = kgem_bo_map(kgem, bo);
|
||||
if (dst) {
|
||||
memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel,
|
||||
stride, bo->pitch,
|
||||
0, 0,
|
||||
0, 0,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height);
|
||||
}
|
||||
} else {
|
||||
BoxRec box;
|
||||
|
||||
box.x1 = box.y1 = 0;
|
||||
box.x2 = pixmap->drawable.width;
|
||||
box.y2 = pixmap->drawable.height;
|
||||
|
||||
sna_write_boxes(sna, pixmap,
|
||||
bo, 0, 0,
|
||||
src, stride, 0, 0,
|
||||
&box, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1038,15 +1047,29 @@ struct kgem_bo *sna_replace__xor(struct sna *sna,
|
|||
}
|
||||
}
|
||||
|
||||
dst = kgem_bo_map(kgem, bo);
|
||||
if (dst) {
|
||||
memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel,
|
||||
stride, bo->pitch,
|
||||
0, 0,
|
||||
0, 0,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height,
|
||||
and, or);
|
||||
if (kgem_bo_is_mappable(kgem, bo)) {
|
||||
dst = kgem_bo_map(kgem, bo);
|
||||
if (dst) {
|
||||
memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel,
|
||||
stride, bo->pitch,
|
||||
0, 0,
|
||||
0, 0,
|
||||
pixmap->drawable.width,
|
||||
pixmap->drawable.height,
|
||||
and, or);
|
||||
}
|
||||
} else {
|
||||
BoxRec box;
|
||||
|
||||
box.x1 = box.y1 = 0;
|
||||
box.x2 = pixmap->drawable.width;
|
||||
box.y2 = pixmap->drawable.height;
|
||||
|
||||
sna_write_boxes__xor(sna, pixmap,
|
||||
bo, 0, 0,
|
||||
src, stride, 0, 0,
|
||||
&box, 1,
|
||||
and, or);
|
||||
}
|
||||
|
||||
return bo;
|
||||
|
|
|
|||
Loading…
Reference in New Issue