batch: Track pixmap domains.
In order to detect when we require cache flushes we need to track which domains the pixmap currently belongs to. So to do so we create a device private structure to hold the extra information and hook it up. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
2c3aee2b57
commit
285f286597
83
src/i830.h
83
src/i830.h
|
|
@ -77,6 +77,87 @@ void i830_uxa_block_handler(ScreenPtr pScreen);
|
|||
Bool i830_get_aperture_space(ScrnInfoPtr scrn, drm_intel_bo ** bo_table,
|
||||
int num_bos);
|
||||
|
||||
/* classic doubly-link circular list */
|
||||
struct list {
|
||||
struct list *next, *prev;
|
||||
};
|
||||
|
||||
static void
|
||||
list_init(struct list *list)
|
||||
{
|
||||
list->next = list->prev = list;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__list_add(struct list *entry,
|
||||
struct list *prev,
|
||||
struct list *next)
|
||||
{
|
||||
next->prev = entry;
|
||||
entry->next = next;
|
||||
entry->prev = prev;
|
||||
prev->next = entry;
|
||||
}
|
||||
|
||||
static inline void
|
||||
list_add(struct list *entry, struct list *head)
|
||||
{
|
||||
__list_add(entry, head, head->next);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__list_del(struct list *prev, struct list *next)
|
||||
{
|
||||
next->prev = prev;
|
||||
prev->next = next;
|
||||
}
|
||||
|
||||
static inline void
|
||||
list_del(struct list *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
list_init(entry);
|
||||
}
|
||||
|
||||
static inline Bool
|
||||
list_is_empty(struct list *head)
|
||||
{
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
#ifndef container_of
|
||||
#define container_of(ptr, type, member) \
|
||||
(type *)((char *)(ptr) - (char *) &((type *)0)->member)
|
||||
#endif
|
||||
|
||||
#define list_entry(ptr, type, member) \
|
||||
container_of(ptr, type, member)
|
||||
|
||||
#define list_first_entry(ptr, type, member) \
|
||||
list_entry((ptr)->next, type, member)
|
||||
|
||||
struct intel_pixmap {
|
||||
dri_bo *bo;
|
||||
uint32_t tiling;
|
||||
uint32_t flush_write_domain;
|
||||
uint32_t flush_read_domains;
|
||||
uint32_t batch_write_domain;
|
||||
uint32_t batch_read_domains;
|
||||
struct list flush, batch;
|
||||
};
|
||||
|
||||
struct intel_pixmap *i830_get_pixmap_intel(PixmapPtr pixmap);
|
||||
|
||||
static inline Bool i830_uxa_pixmap_is_dirty(PixmapPtr pixmap)
|
||||
{
|
||||
return i830_get_pixmap_intel(pixmap)->flush_write_domain != 0;
|
||||
}
|
||||
|
||||
static inline Bool i830_pixmap_tiled(PixmapPtr pixmap)
|
||||
{
|
||||
return i830_get_pixmap_intel(pixmap)->tiling != I915_TILING_NONE;
|
||||
}
|
||||
|
||||
dri_bo *i830_get_pixmap_bo(PixmapPtr pixmap);
|
||||
void i830_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo);
|
||||
|
||||
|
|
@ -194,6 +275,8 @@ typedef struct intel_screen_private {
|
|||
Bool in_batch_atomic;
|
||||
/** Ending batch_used that was verified by i830_start_batch_atomic() */
|
||||
int batch_atomic_limit;
|
||||
struct list batch_pixmaps;
|
||||
struct list flush_pixmaps;
|
||||
|
||||
/* For Xvideo */
|
||||
Bool use_drmmode_overlay;
|
||||
|
|
|
|||
|
|
@ -63,35 +63,16 @@ void I830Sync(ScrnInfoPtr scrn)
|
|||
if (!scrn->vtSema || !intel->batch_bo)
|
||||
return;
|
||||
|
||||
I830EmitFlush(scrn);
|
||||
|
||||
intel_batch_flush(scrn, TRUE);
|
||||
intel_batch_wait_last(scrn);
|
||||
}
|
||||
|
||||
void I830EmitFlush(ScrnInfoPtr scrn)
|
||||
{
|
||||
intel_screen_private *intel = intel_get_screen_private(scrn);
|
||||
int flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
|
||||
|
||||
if (IS_I965G(intel))
|
||||
flags = 0;
|
||||
|
||||
{
|
||||
BEGIN_BATCH(1);
|
||||
OUT_BATCH(MI_FLUSH | flags);
|
||||
ADVANCE_BATCH();
|
||||
}
|
||||
}
|
||||
|
||||
void i830_debug_flush(ScrnInfoPtr scrn)
|
||||
{
|
||||
intel_screen_private *intel = intel_get_screen_private(scrn);
|
||||
|
||||
if (intel->debug_flush & DEBUG_FLUSH_BATCHES)
|
||||
if (intel->debug_flush & (DEBUG_FLUSH_BATCHES | DEBUG_FLUSH_CACHES))
|
||||
intel_batch_flush(scrn, FALSE);
|
||||
else if (intel->debug_flush & DEBUG_FLUSH_CACHES)
|
||||
I830EmitFlush(scrn);
|
||||
}
|
||||
|
||||
/* The following function sets up the supported acceleration. Call it
|
||||
|
|
|
|||
|
|
@ -90,17 +90,45 @@ void intel_batch_teardown(ScrnInfoPtr scrn)
|
|||
}
|
||||
}
|
||||
|
||||
void intel_batch_flush(ScrnInfoPtr scrn, Bool flushed)
|
||||
void intel_batch_pipelined_flush(ScrnInfoPtr scrn)
|
||||
{
|
||||
intel_screen_private *intel = intel_get_screen_private(scrn);
|
||||
int ret;
|
||||
int flags;
|
||||
|
||||
assert (!intel->in_batch_atomic);
|
||||
|
||||
if (intel->batch_used == 0)
|
||||
return;
|
||||
|
||||
if (intel->debug_flush & DEBUG_FLUSH_CACHES) {
|
||||
/* Big hammer, look to the pipelined flushes in future. */
|
||||
flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
|
||||
if (IS_I965G(intel))
|
||||
flags = 0;
|
||||
|
||||
BEGIN_BATCH(1);
|
||||
OUT_BATCH(MI_FLUSH | flags);
|
||||
ADVANCE_BATCH();
|
||||
|
||||
while (!list_is_empty(&intel->flush_pixmaps)) {
|
||||
struct intel_pixmap *entry;
|
||||
|
||||
entry = list_first_entry(&intel->flush_pixmaps,
|
||||
struct intel_pixmap,
|
||||
flush);
|
||||
|
||||
entry->flush_read_domains = entry->flush_write_domain = 0;
|
||||
list_del(&entry->flush);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_batch_flush(ScrnInfoPtr scrn, Bool flush)
|
||||
{
|
||||
intel_screen_private *intel = intel_get_screen_private(scrn);
|
||||
int ret;
|
||||
|
||||
assert (!intel->in_batch_atomic);
|
||||
|
||||
if (flush || intel->debug_flush & DEBUG_FLUSH_CACHES) {
|
||||
int flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
|
||||
|
||||
if (IS_I965G(intel))
|
||||
|
|
@ -111,6 +139,9 @@ void intel_batch_flush(ScrnInfoPtr scrn, Bool flushed)
|
|||
intel->batch_used += 4;
|
||||
}
|
||||
|
||||
if (intel->batch_used == 0)
|
||||
return;
|
||||
|
||||
/* Emit a padding dword if we aren't going to be quad-word aligned. */
|
||||
if ((intel->batch_used & 4) == 0) {
|
||||
*(uint32_t *) (intel->batch_ptr + intel->batch_used) = MI_NOOP;
|
||||
|
|
@ -132,6 +163,27 @@ void intel_batch_flush(ScrnInfoPtr scrn, Bool flushed)
|
|||
FatalError("Failed to submit batchbuffer: %s\n",
|
||||
strerror(-ret));
|
||||
|
||||
while (!list_is_empty(&intel->batch_pixmaps)) {
|
||||
struct intel_pixmap *entry;
|
||||
|
||||
entry = list_first_entry(&intel->batch_pixmaps,
|
||||
struct intel_pixmap,
|
||||
batch);
|
||||
|
||||
entry->batch_read_domains = entry->batch_write_domain = 0;
|
||||
list_del(&entry->batch);
|
||||
}
|
||||
while (!list_is_empty(&intel->flush_pixmaps)) {
|
||||
struct intel_pixmap *entry;
|
||||
|
||||
entry = list_first_entry(&intel->flush_pixmaps,
|
||||
struct intel_pixmap,
|
||||
flush);
|
||||
|
||||
entry->flush_read_domains = entry->flush_write_domain = 0;
|
||||
list_del(&entry->flush);
|
||||
}
|
||||
|
||||
/* Save a ref to the last batch emitted, which we use for syncing
|
||||
* in debug code.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -32,9 +32,11 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
#define BATCH_RESERVED 16
|
||||
|
||||
|
||||
void intel_batch_init(ScrnInfoPtr scrn);
|
||||
void intel_batch_teardown(ScrnInfoPtr scrn);
|
||||
void intel_batch_flush(ScrnInfoPtr scrn, Bool flushed);
|
||||
void intel_batch_pipelined_flush(ScrnInfoPtr scrn);
|
||||
void intel_batch_flush(ScrnInfoPtr scrn, Bool flush);
|
||||
void intel_batch_wait_last(ScrnInfoPtr scrn);
|
||||
|
||||
static inline int intel_batch_space(intel_screen_private *intel)
|
||||
|
|
@ -92,15 +94,42 @@ intel_batch_emit_reloc(intel_screen_private *intel,
|
|||
intel->batch_used += 4;
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_batch_mark_pixmap_domains(intel_screen_private *intel,
|
||||
struct intel_pixmap *priv,
|
||||
uint32_t read_domains, uint32_t write_domain)
|
||||
{
|
||||
assert (read_domains);
|
||||
assert (write_domain == 0 || write_domain == read_domains);
|
||||
assert (write_domain == 0 ||
|
||||
priv->flush_write_domain == 0 ||
|
||||
priv->flush_write_domain == write_domain);
|
||||
|
||||
priv->flush_read_domains |= read_domains;
|
||||
priv->batch_read_domains |= read_domains;
|
||||
priv->flush_write_domain |= write_domain;
|
||||
priv->batch_write_domain |= write_domain;
|
||||
if (list_is_empty(&priv->batch))
|
||||
list_add(&priv->batch, &intel->batch_pixmaps);
|
||||
if (list_is_empty(&priv->flush))
|
||||
list_add(&priv->flush, &intel->flush_pixmaps);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_batch_emit_reloc_pixmap(intel_screen_private *intel, PixmapPtr pixmap,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
dri_bo *bo = i830_get_pixmap_bo(pixmap);
|
||||
struct intel_pixmap *priv = i830_get_pixmap_intel(pixmap);
|
||||
|
||||
assert(intel->batch_ptr != NULL);
|
||||
assert(intel_batch_space(intel) >= 4);
|
||||
intel_batch_emit_reloc(intel, bo, read_domains, write_domain, delta);
|
||||
|
||||
intel_batch_mark_pixmap_domains(intel, priv, read_domains, write_domain);
|
||||
|
||||
intel_batch_emit_reloc(intel, priv->bo,
|
||||
read_domains, write_domain,
|
||||
delta);
|
||||
}
|
||||
|
||||
#define OUT_BATCH(dword) intel_batch_emit_dword(intel, dword)
|
||||
|
|
|
|||
|
|
@ -349,12 +349,10 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
|
|||
/* Emit a flush of the rendering cache, or on the 965 and beyond
|
||||
* rendering results may not hit the framebuffer until significantly
|
||||
* later.
|
||||
*/
|
||||
I830EmitFlush(scrn);
|
||||
intel->need_mi_flush = FALSE;
|
||||
|
||||
/* We can't rely on getting into the block handler before the DRI
|
||||
*
|
||||
* We can't rely on getting into the block handler before the DRI
|
||||
* client gets to run again so flush now. */
|
||||
intel->need_mi_flush = FALSE;
|
||||
intel_batch_flush(scrn, TRUE);
|
||||
#if ALWAYS_SYNC
|
||||
I830Sync(scrn);
|
||||
|
|
|
|||
|
|
@ -985,20 +985,19 @@ I830BlockHandler(int i, pointer blockData, pointer pTimeout, pointer pReadmask)
|
|||
screen->BlockHandler = I830BlockHandler;
|
||||
|
||||
if (scrn->vtSema) {
|
||||
Bool flushed = FALSE;
|
||||
Bool flush = FALSE;
|
||||
|
||||
/* Emit a flush of the rendering cache, or on the 965 and beyond
|
||||
* rendering results may not hit the framebuffer until significantly
|
||||
* later.
|
||||
*/
|
||||
if (intel->need_mi_flush || intel->batch_used) {
|
||||
flushed = TRUE;
|
||||
I830EmitFlush(scrn);
|
||||
}
|
||||
if (intel->need_mi_flush || intel->batch_used)
|
||||
flush = TRUE;
|
||||
|
||||
/* Flush the batch, so that any rendering is executed in a timely
|
||||
* fashion.
|
||||
*/
|
||||
intel_batch_flush(scrn, flushed);
|
||||
intel_batch_flush(scrn, flush);
|
||||
drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE);
|
||||
|
||||
intel->need_mi_flush = FALSE;
|
||||
|
|
@ -1138,6 +1137,9 @@ void i830_init_bufmgr(ScrnInfoPtr scrn)
|
|||
|
||||
intel->bufmgr = intel_bufmgr_gem_init(intel->drmSubFD, batch_size);
|
||||
intel_bufmgr_gem_enable_reuse(intel->bufmgr);
|
||||
|
||||
list_init(&intel->batch_pixmaps);
|
||||
list_init(&intel->flush_pixmaps);
|
||||
}
|
||||
|
||||
Bool i830_crtc_on(xf86CrtcPtr crtc)
|
||||
|
|
|
|||
235
src/i830_uxa.c
235
src/i830_uxa.c
|
|
@ -79,31 +79,6 @@ const int I830PatternROP[16] = {
|
|||
|
||||
static int uxa_pixmap_index;
|
||||
|
||||
/**
|
||||
* Returns whether a given pixmap is tiled or not.
|
||||
*
|
||||
* Currently, we only have one pixmap that might be tiled, which is the front
|
||||
* buffer. At the point where we are tiling some pixmaps managed by the
|
||||
* general allocator, we should move this to using pixmap privates.
|
||||
*/
|
||||
Bool i830_pixmap_tiled(PixmapPtr pixmap)
|
||||
{
|
||||
dri_bo *bo;
|
||||
uint32_t tiling_mode, swizzle_mode;
|
||||
int ret;
|
||||
|
||||
bo = i830_get_pixmap_bo(pixmap);
|
||||
assert(bo != NULL);
|
||||
|
||||
ret = drm_intel_bo_get_tiling(bo, &tiling_mode, &swizzle_mode);
|
||||
if (ret != 0) {
|
||||
FatalError("Couldn't get tiling on bo %p: %s\n",
|
||||
bo, strerror(-ret));
|
||||
}
|
||||
|
||||
return tiling_mode != I915_TILING_NONE;
|
||||
}
|
||||
|
||||
Bool
|
||||
i830_get_aperture_space(ScrnInfoPtr scrn, drm_intel_bo ** bo_table,
|
||||
int num_bos)
|
||||
|
|
@ -143,6 +118,62 @@ static int i830_pixmap_pitch_is_aligned(PixmapPtr pixmap)
|
|||
intel->accel_pixmap_pitch_alignment == 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
i830_uxa_pixmap_compute_size(PixmapPtr pixmap,
|
||||
int w, int h,
|
||||
uint32_t *tiling,
|
||||
int *stride)
|
||||
{
|
||||
ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
|
||||
intel_screen_private *intel = intel_get_screen_private(scrn);
|
||||
int pitch_align;
|
||||
int size;
|
||||
|
||||
if (*tiling != I915_TILING_NONE) {
|
||||
/* First check whether tiling is necessary. */
|
||||
pitch_align = intel->accel_pixmap_pitch_alignment;
|
||||
size = ROUND_TO((w * pixmap->drawable.bitsPerPixel + 7) / 8,
|
||||
pitch_align) * ALIGN (h, 2);
|
||||
if (size < 4096)
|
||||
*tiling = I915_TILING_NONE;
|
||||
}
|
||||
|
||||
if (*tiling == I915_TILING_NONE) {
|
||||
pitch_align = intel->accel_pixmap_pitch_alignment;
|
||||
} else {
|
||||
pitch_align = 512;
|
||||
}
|
||||
|
||||
*stride = ROUND_TO((w * pixmap->drawable.bitsPerPixel + 7) / 8,
|
||||
pitch_align);
|
||||
|
||||
if (*tiling == I915_TILING_NONE) {
|
||||
/* Round the height up so that the GPU's access to a 2x2 aligned
|
||||
* subspan doesn't address an invalid page offset beyond the
|
||||
* end of the GTT.
|
||||
*/
|
||||
size = *stride * ALIGN(h, 2);
|
||||
} else {
|
||||
int aligned_h = h;
|
||||
if (*tiling == I915_TILING_X)
|
||||
aligned_h = ALIGN(h, 8);
|
||||
else
|
||||
aligned_h = ALIGN(h, 32);
|
||||
|
||||
*stride = i830_get_fence_pitch(intel, *stride, *tiling);
|
||||
/* Round the object up to the size of the fence it will live in
|
||||
* if necessary. We could potentially make the kernel allocate
|
||||
* a larger aperture space and just bind the subset of pages in,
|
||||
* but this is easier and also keeps us out of trouble (as much)
|
||||
* with drm_intel_bufmgr_check_aperture().
|
||||
*/
|
||||
size = i830_get_fence_size(intel, *stride * aligned_h);
|
||||
assert(size >= *stride * aligned_h);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets up hardware state for a series of solid fills.
|
||||
*/
|
||||
|
|
@ -452,25 +483,71 @@ Bool i830_transform_is_affine(PictTransformPtr t)
|
|||
return t->matrix[2][0] == 0 && t->matrix[2][1] == 0;
|
||||
}
|
||||
|
||||
dri_bo *i830_get_pixmap_bo(PixmapPtr pixmap)
|
||||
struct intel_pixmap *i830_get_pixmap_intel(PixmapPtr pixmap)
|
||||
{
|
||||
return dixLookupPrivate(&pixmap->devPrivates, &uxa_pixmap_index);
|
||||
}
|
||||
|
||||
void i830_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
|
||||
static void i830_uxa_set_pixmap_intel(PixmapPtr pixmap, struct intel_pixmap *intel)
|
||||
{
|
||||
dri_bo *old_bo = i830_get_pixmap_bo(pixmap);
|
||||
|
||||
if (old_bo)
|
||||
dri_bo_unreference(old_bo);
|
||||
if (bo != NULL)
|
||||
dri_bo_reference(bo);
|
||||
dixSetPrivate(&pixmap->devPrivates, &uxa_pixmap_index, bo);
|
||||
dixSetPrivate(&pixmap->devPrivates, &uxa_pixmap_index, intel);
|
||||
}
|
||||
|
||||
static void i830_uxa_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
|
||||
dri_bo *i830_get_pixmap_bo(PixmapPtr pixmap)
|
||||
{
|
||||
dixSetPrivate(&pixmap->devPrivates, &uxa_pixmap_index, bo);
|
||||
struct intel_pixmap *intel;
|
||||
|
||||
intel = i830_get_pixmap_intel(pixmap);
|
||||
if (intel == NULL)
|
||||
return NULL;
|
||||
|
||||
return intel->bo;
|
||||
}
|
||||
|
||||
void i830_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
|
||||
{
|
||||
struct intel_pixmap *priv;
|
||||
|
||||
priv = i830_get_pixmap_intel(pixmap);
|
||||
|
||||
if (priv != NULL) {
|
||||
dri_bo_unreference(priv->bo);
|
||||
|
||||
priv->flush_read_domains = priv->flush_write_domain = 0;
|
||||
priv->batch_read_domains = priv->batch_write_domain = 0;
|
||||
list_del(&priv->batch);
|
||||
list_del(&priv->flush);
|
||||
}
|
||||
|
||||
if (bo != NULL) {
|
||||
uint32_t swizzle_mode;
|
||||
int ret;
|
||||
|
||||
if (priv == NULL) {
|
||||
priv = xcalloc(1, sizeof (struct intel_pixmap));
|
||||
if (priv == NULL)
|
||||
goto BAIL;
|
||||
}
|
||||
|
||||
dri_bo_reference(bo);
|
||||
priv->bo = bo;
|
||||
|
||||
ret = drm_intel_bo_get_tiling(bo,
|
||||
&priv->tiling,
|
||||
&swizzle_mode);
|
||||
if (ret != 0) {
|
||||
FatalError("Couldn't get tiling on bo %p: %s\n",
|
||||
bo, strerror(-ret));
|
||||
}
|
||||
} else {
|
||||
if (priv != NULL) {
|
||||
xfree(priv);
|
||||
priv = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
BAIL:
|
||||
i830_uxa_set_pixmap_intel(pixmap, priv);
|
||||
}
|
||||
|
||||
static Bool i830_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
|
||||
|
|
@ -562,8 +639,6 @@ i830_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
|
|||
{
|
||||
ScrnInfoPtr scrn = xf86Screens[screen->myNum];
|
||||
intel_screen_private *intel = intel_get_screen_private(scrn);
|
||||
dri_bo *bo;
|
||||
int stride;
|
||||
PixmapPtr pixmap;
|
||||
|
||||
if (w > 32767 || h > 32767)
|
||||
|
|
@ -575,46 +650,25 @@ i830_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
|
|||
pixmap = fbCreatePixmap(screen, 0, 0, depth, usage);
|
||||
|
||||
if (w && h) {
|
||||
struct intel_pixmap *priv;
|
||||
unsigned int size;
|
||||
uint32_t tiling = I915_TILING_NONE;
|
||||
int pitch_align;
|
||||
int stride;
|
||||
|
||||
if (usage == INTEL_CREATE_PIXMAP_TILING_X) {
|
||||
tiling = I915_TILING_X;
|
||||
pitch_align = 512;
|
||||
} else if (usage == INTEL_CREATE_PIXMAP_TILING_Y) {
|
||||
tiling = I915_TILING_Y;
|
||||
pitch_align = 512;
|
||||
} else {
|
||||
pitch_align = intel->accel_pixmap_pitch_alignment;
|
||||
priv = xcalloc(1, sizeof (struct intel_pixmap));
|
||||
if (priv == NULL) {
|
||||
fbDestroyPixmap(pixmap);
|
||||
return NullPixmap;
|
||||
}
|
||||
|
||||
stride = ROUND_TO((w * pixmap->drawable.bitsPerPixel + 7) / 8,
|
||||
pitch_align);
|
||||
if (usage == INTEL_CREATE_PIXMAP_TILING_X)
|
||||
priv->tiling = I915_TILING_X;
|
||||
else if (usage == INTEL_CREATE_PIXMAP_TILING_Y)
|
||||
priv->tiling = I915_TILING_Y;
|
||||
else
|
||||
priv->tiling = I915_TILING_NONE;
|
||||
|
||||
if (tiling == I915_TILING_NONE) {
|
||||
/* Round the height up so that the GPU's access to a 2x2 aligned
|
||||
* subspan doesn't address an invalid page offset beyond the
|
||||
* end of the GTT.
|
||||
*/
|
||||
size = stride * ALIGN(h, 2);
|
||||
} else {
|
||||
int aligned_h = h;
|
||||
if (tiling == I915_TILING_X)
|
||||
aligned_h = ALIGN(h, 8);
|
||||
else
|
||||
aligned_h = ALIGN(h, 32);
|
||||
|
||||
stride = i830_get_fence_pitch(intel, stride, tiling);
|
||||
/* Round the object up to the size of the fence it will live in
|
||||
* if necessary. We could potentially make the kernel allocate
|
||||
* a larger aperture space and just bind the subset of pages in,
|
||||
* but this is easier and also keeps us out of trouble (as much)
|
||||
* with drm_intel_bufmgr_check_aperture().
|
||||
*/
|
||||
size = i830_get_fence_size(intel, stride * aligned_h);
|
||||
assert(size >= stride * aligned_h);
|
||||
}
|
||||
size = i830_uxa_pixmap_compute_size(pixmap, w, h,
|
||||
&priv->tiling, &stride);
|
||||
|
||||
/* Fail very large allocations on 32-bit systems. Large BOs will
|
||||
* tend to hit SW fallbacks frequently, and also will tend to fail
|
||||
|
|
@ -626,27 +680,34 @@ i830_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
|
|||
*/
|
||||
if (sizeof(unsigned long) == 4 &&
|
||||
size > (unsigned int)(1024 * 1024 * 1024)) {
|
||||
xfree(priv);
|
||||
fbDestroyPixmap(pixmap);
|
||||
return NullPixmap;
|
||||
}
|
||||
|
||||
if (usage == UXA_CREATE_PIXMAP_FOR_MAP)
|
||||
bo = drm_intel_bo_alloc(intel->bufmgr, "pixmap", size,
|
||||
0);
|
||||
priv->bo = drm_intel_bo_alloc(intel->bufmgr,
|
||||
"pixmap", size, 0);
|
||||
else
|
||||
bo = drm_intel_bo_alloc_for_render(intel->bufmgr,
|
||||
"pixmap", size, 0);
|
||||
if (!bo) {
|
||||
priv->bo = drm_intel_bo_alloc_for_render(intel->bufmgr,
|
||||
"pixmap",
|
||||
size, 0);
|
||||
if (!priv->bo) {
|
||||
xfree(priv);
|
||||
fbDestroyPixmap(pixmap);
|
||||
return NullPixmap;
|
||||
}
|
||||
|
||||
if (tiling != I915_TILING_NONE)
|
||||
drm_intel_bo_set_tiling(bo, &tiling, stride);
|
||||
if (priv->tiling != I915_TILING_NONE)
|
||||
drm_intel_bo_set_tiling(priv->bo,
|
||||
&priv->tiling,
|
||||
stride);
|
||||
|
||||
screen->ModifyPixmapHeader(pixmap, w, h, 0, 0, stride, NULL);
|
||||
|
||||
i830_uxa_set_pixmap_bo(pixmap, bo);
|
||||
list_init(&priv->batch);
|
||||
list_init(&priv->flush);
|
||||
i830_uxa_set_pixmap_intel(pixmap, priv);
|
||||
}
|
||||
|
||||
return pixmap;
|
||||
|
|
@ -654,16 +715,13 @@ i830_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
|
|||
|
||||
static Bool i830_uxa_destroy_pixmap(PixmapPtr pixmap)
|
||||
{
|
||||
if (pixmap->refcnt == 1) {
|
||||
dri_bo *bo = i830_get_pixmap_bo(pixmap);
|
||||
|
||||
if (bo)
|
||||
dri_bo_unreference(bo);
|
||||
}
|
||||
if (pixmap->refcnt == 1)
|
||||
i830_set_pixmap_bo(pixmap, NULL);
|
||||
fbDestroyPixmap(pixmap);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
void i830_uxa_create_screen_resources(ScreenPtr screen)
|
||||
{
|
||||
ScrnInfoPtr scrn = xf86Screens[screen->myNum];
|
||||
|
|
@ -672,8 +730,7 @@ void i830_uxa_create_screen_resources(ScreenPtr screen)
|
|||
|
||||
if (bo != NULL) {
|
||||
PixmapPtr pixmap = screen->GetScreenPixmap(screen);
|
||||
i830_uxa_set_pixmap_bo(pixmap, bo);
|
||||
dri_bo_reference(bo);
|
||||
i830_set_pixmap_bo(pixmap, bo);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1046,13 +1046,14 @@ static sampler_state_extend_t sampler_state_extend_from_picture(int repeat_type)
|
|||
* picture in the given surface state buffer.
|
||||
*/
|
||||
static void
|
||||
i965_set_picture_surface_state(dri_bo * ss_bo, int ss_index,
|
||||
i965_set_picture_surface_state(intel_screen_private *intel,
|
||||
dri_bo * ss_bo, int ss_index,
|
||||
PicturePtr picture, PixmapPtr pixmap,
|
||||
Bool is_dst)
|
||||
{
|
||||
struct brw_surface_state_padded *ss;
|
||||
struct brw_surface_state local_ss;
|
||||
dri_bo *pixmap_bo = i830_get_pixmap_bo(pixmap);
|
||||
struct intel_pixmap *priv = i830_get_pixmap_intel(pixmap);
|
||||
|
||||
ss = (struct brw_surface_state_padded *)ss_bo->virtual + ss_index;
|
||||
|
||||
|
|
@ -1082,7 +1083,7 @@ i965_set_picture_surface_state(dri_bo * ss_bo, int ss_index,
|
|||
local_ss.ss0.vert_line_stride_ofs = 0;
|
||||
local_ss.ss0.mipmap_layout_mode = 0;
|
||||
local_ss.ss0.render_cache_read_mode = 0;
|
||||
local_ss.ss1.base_addr = pixmap_bo->offset;
|
||||
local_ss.ss1.base_addr = priv->bo->offset;
|
||||
|
||||
local_ss.ss2.mip_count = 0;
|
||||
local_ss.ss2.render_target_rotation = 0;
|
||||
|
|
@ -1094,7 +1095,7 @@ i965_set_picture_surface_state(dri_bo * ss_bo, int ss_index,
|
|||
|
||||
memcpy(ss, &local_ss, sizeof(local_ss));
|
||||
|
||||
if (pixmap_bo != NULL) {
|
||||
if (priv->bo != NULL) {
|
||||
uint32_t write_domain, read_domains;
|
||||
|
||||
if (is_dst) {
|
||||
|
|
@ -1104,11 +1105,13 @@ i965_set_picture_surface_state(dri_bo * ss_bo, int ss_index,
|
|||
write_domain = 0;
|
||||
read_domains = I915_GEM_DOMAIN_SAMPLER;
|
||||
}
|
||||
|
||||
intel_batch_mark_pixmap_domains(intel, priv, read_domains, write_domain);
|
||||
dri_bo_emit_reloc(ss_bo, read_domains, write_domain,
|
||||
0,
|
||||
ss_index * sizeof(*ss) +
|
||||
offsetof(struct brw_surface_state, ss1),
|
||||
pixmap_bo);
|
||||
priv->bo);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1508,14 +1511,14 @@ i965_prepare_composite(int op, PicturePtr source_picture,
|
|||
return FALSE;
|
||||
}
|
||||
/* Set up the state buffer for the destination surface */
|
||||
i965_set_picture_surface_state(surface_state_bo, 0,
|
||||
i965_set_picture_surface_state(intel, surface_state_bo, 0,
|
||||
dest_picture, dest, TRUE);
|
||||
/* Set up the source surface state buffer */
|
||||
i965_set_picture_surface_state(surface_state_bo, 1,
|
||||
i965_set_picture_surface_state(intel, surface_state_bo, 1,
|
||||
source_picture, source, FALSE);
|
||||
if (mask) {
|
||||
/* Set up the mask surface state buffer */
|
||||
i965_set_picture_surface_state(surface_state_bo, 2,
|
||||
i965_set_picture_surface_state(intel, surface_state_bo, 2,
|
||||
mask_picture, mask, FALSE);
|
||||
}
|
||||
dri_bo_unmap(surface_state_bo);
|
||||
|
|
|
|||
Loading…
Reference in New Issue