diff --git a/src/intel_uxa.c b/src/intel_uxa.c index 9bb0e0a8..452db6e5 100644 --- a/src/intel_uxa.c +++ b/src/intel_uxa.c @@ -228,11 +228,14 @@ intel_uxa_pixmap_compute_size(PixmapPtr pixmap, } if (*tiling == I915_TILING_NONE) { + /* We only require a 64 byte alignment for scanouts, but + * a 256 byte alignment for sharing with PRIME. + */ + *stride = ALIGN(pitch, 256); /* Round the height up so that the GPU's access to a 2x2 aligned * subspan doesn't address an invalid page offset beyond the * end of the GTT. */ - *stride = ALIGN(pitch, 64); size = *stride * ALIGN(h, 2); } diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index 1a02db37..c00b5b83 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -944,6 +944,22 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle) if (priv->gpu_bo->tiling && !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE)) return FALSE; + + /* nvidia requires a minimum pitch alignment of 256 */ + if (priv->gpu_bo->pitch & 255) { + struct kgem_bo *bo; + + bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo, + pixmap->drawable.width, + pixmap->drawable.height, + ALIGN(priv->gpu_bo->pitch, 256), + pixmap->drawable.bitsPerPixel); + if (bo == NULL) + return FALSE; + + kgem_bo_destroy(&sna->kgem, priv->gpu_bo); + priv->gpu_bo = bo; + } assert(priv->gpu_bo->tiling == I915_TILING_NONE); /* And export the bo->pitch via pixmap->devKind */