sna: And keep unity happy

Rewrite the DRI layer to avoid the various bugs and shortcomings of the
Xserver and interfacing with mesa.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=38732
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=39044
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2011-11-15 11:06:04 +00:00
parent aac022cbb3
commit 78d4e99fc9
7 changed files with 569 additions and 465 deletions

View File

@ -263,7 +263,6 @@ static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
bo->handle = handle;
bo->size = size;
bo->reusable = true;
bo->purgeable = true;
bo->cpu_read = true;
bo->cpu_write = true;
list_init(&bo->request);
@ -577,6 +576,8 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
{
struct kgem_bo_binding *b;
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
b = bo->binding.next;
while (b) {
struct kgem_bo_binding *next = b->next;
@ -592,6 +593,8 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
{
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
assert(list_is_empty(&bo->list));
assert(bo->refcnt == 0);
@ -603,9 +606,11 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
if(!bo->reusable)
goto destroy;
if (bo->purgeable && !bo->rq && !bo->needs_flush) {
if (!bo->rq && !bo->needs_flush) {
assert(!bo->purged);
DBG(("%s: handle=%d, purged\n", __FUNCTION__, bo->handle));
if (!gem_madvise(kgem->fd, bo->handle, I915_MADV_DONTNEED)) {
kgem->need_purge |= bo->gpu;
goto destroy;
@ -616,10 +621,13 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
kgem->need_expire = true;
if (bo->rq) {
DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
list_move(&bo->list, active(kgem, bo->size));
} else if (bo->purged) {
DBG(("%s: handle=%d -> inactive\n", __FUNCTION__, bo->handle));
list_move(&bo->list, inactive(kgem, bo->size));
} else {
DBG(("%s: handle=%d -> flushing\n", __FUNCTION__, bo->handle));
assert(list_is_empty(&bo->request));
list_add(&bo->request, &kgem->flushing);
list_move(&bo->list, active(kgem, bo->size));
@ -651,8 +659,7 @@ bool kgem_retire(struct kgem *kgem)
DBG(("%s: moving %d from flush to inactive\n",
__FUNCTION__, bo->handle));
if (bo->purgeable &&
gem_madvise(kgem->fd, bo->handle, I915_MADV_DONTNEED)) {
if (gem_madvise(kgem->fd, bo->handle, I915_MADV_DONTNEED)) {
bo->purged = true;
bo->needs_flush = false;
bo->gpu = false;
@ -692,7 +699,7 @@ bool kgem_retire(struct kgem *kgem)
if (bo->refcnt == 0) {
if (bo->reusable) {
if (bo->needs_flush || !bo->purgeable) {
if (bo->needs_flush) {
DBG(("%s: moving %d to flushing\n",
__FUNCTION__, bo->handle));
list_add(&bo->request, &kgem->flushing);
@ -813,7 +820,6 @@ static void kgem_finish_partials(struct kgem *kgem)
if (base) {
memcpy(base, &bo->base, sizeof (*base));
base->reusable = true;
base->purgeable = true;
list_init(&base->list);
list_replace(&bo->base.request, &base->request);
free(bo);
@ -1269,7 +1275,6 @@ search_linear_cache(struct kgem *kgem, unsigned int size, bool use_active)
use_active ? "active" : "inactive"));
assert(bo->refcnt == 0);
assert(bo->reusable);
assert(use_active || bo->purgeable);
assert(use_active || bo->gpu == 0);
//assert(use_active || !kgem_busy(kgem, bo->handle));
return bo;
@ -1298,7 +1303,6 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
}
bo->reusable = false;
bo->purgeable = false;
return bo;
}
@ -1613,7 +1617,6 @@ skip_active_search:
bo->pitch, bo->tiling, bo->handle, bo->unique_id));
assert(bo->refcnt == 0);
assert(bo->reusable);
assert(bo->purgeable);
assert((flags & CREATE_INACTIVE) == 0 || bo->gpu == 0);
assert((flags & CREATE_INACTIVE) == 0 ||
!kgem_busy(kgem, bo->handle));
@ -1846,7 +1849,16 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
* on the buffer, and *presuming* they do not pass it on to a third
* party, we track the lifetime accurately.
*/
bo->purgeable = false;
bo->reusable = false;
/* The bo is outside of our control, so presume it is written to */
bo->needs_flush = true;
bo->gpu = true;
bo->cpu_read = bo->cpu_write = false;
bo->flush = 1;
if (bo->exec)
kgem->flush = 1;
return flink.name;
}
@ -1888,7 +1900,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
return NULL;
}
bo->purgeable = bo->reusable = false;
bo->reusable = false;
bo->sync = true;
DBG(("%s(ptr=%p, size=%d, read_only=%d) => handle=%d\n",
__FUNCTION__, ptr, size, read_only, handle));
@ -2017,7 +2029,7 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
if (bo == NULL)
return NULL;
bo->purgeable = bo->reusable = false;
bo->reusable = false;
bo->proxy = kgem_bo_reference(target);
bo->delta = offset;
return bo;
@ -2146,7 +2158,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
bo->base.vmap = true;
bo->need_io = 0;
}
bo->base.purgeable = bo->base.reusable = false;
bo->base.reusable = false;
bo->alloc = alloc;
bo->used = size;

View File

@ -75,7 +75,6 @@ struct kgem_bo {
uint32_t vmap : 1;
uint32_t flush : 1;
uint32_t sync : 1;
uint32_t purgeable : 1;
uint32_t purged : 1;
};

View File

@ -305,15 +305,15 @@ extern void sna_mode_fini(struct sna *sna);
extern int sna_crtc_id(xf86CrtcPtr crtc);
extern int sna_output_dpms_status(xf86OutputPtr output);
extern int sna_do_pageflip(struct sna *sna,
PixmapPtr pixmap,
void *data,
int ref_crtc_hw_id,
uint32_t *old_fb);
extern int sna_page_flip(struct sna *sna,
struct kgem_bo *bo,
void *data,
int ref_crtc_hw_id,
uint32_t *old_fb);
extern PixmapPtr sna_set_screen_pixmap(struct sna *sna, PixmapPtr pixmap);
void sna_mode_delete_fb(struct sna *sna, PixmapPtr pixmap, uint32_t fb);
void sna_mode_delete_fb(struct sna *sna, uint32_t fb);
static inline struct sna *
to_sna(ScrnInfoPtr scrn)

View File

@ -1773,21 +1773,16 @@ PixmapPtr sna_set_screen_pixmap(struct sna *sna, PixmapPtr pixmap)
}
int
sna_do_pageflip(struct sna *sna,
PixmapPtr pixmap,
void *data,
int ref_crtc_hw_id,
uint32_t *old_fb)
sna_page_flip(struct sna *sna,
struct kgem_bo *bo,
void *data,
int ref_crtc_hw_id,
uint32_t *old_fb)
{
ScrnInfoPtr scrn = sna->scrn;
struct sna_mode *mode = &sna->mode;
struct kgem_bo *bo;
int count;
bo = sna_pixmap_pin(pixmap);
if (!bo)
return 0;
*old_fb = mode->fb_id;
/*
@ -1822,7 +1817,6 @@ sna_do_pageflip(struct sna *sna,
count = do_page_flip(sna, data, ref_crtc_hw_id);
DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
if (count) {
sna->mode.fb_pixmap = pixmap->drawable.serialNumber;
bo->cpu_read = bo->cpu_write = false;
bo->gpu = true;
@ -1832,6 +1826,7 @@ sna_do_pageflip(struct sna *sna,
* upon release.
*/
bo->needs_flush = true;
bo->reusable = true;
} else {
drmModeRmFB(sna->kgem.fd, mode->fb_id);
mode->fb_id = *old_fb;
@ -1840,13 +1835,10 @@ sna_do_pageflip(struct sna *sna,
return count;
}
void sna_mode_delete_fb(struct sna *sna, PixmapPtr pixmap, uint32_t fb)
void sna_mode_delete_fb(struct sna *sna, uint32_t fb)
{
if (fb)
drmModeRmFB(sna->kgem.fd, fb);
if (pixmap)
pixmap->drawable.pScreen->DestroyPixmap(pixmap);
}
static const xf86CrtcConfigFuncsRec sna_crtc_config_funcs = {

File diff suppressed because it is too large Load Diff

View File

@ -613,12 +613,12 @@ sna_wakeup_handler(int i, pointer data, unsigned long result, pointer read_mask)
if ((int)result < 0)
return;
if (FD_ISSET(sna->kgem.fd, (fd_set*)read_mask))
sna_dri_wakeup(sna);
sna->WakeupHandler(i, sna->WakeupData, result, read_mask);
sna_accel_wakeup_handler(sna);
if (FD_ISSET(sna->kgem.fd, (fd_set*)read_mask))
sna_dri_wakeup(sna);
}
#if HAVE_UDEV

View File

@ -432,7 +432,6 @@ struct kgem_bo *sna_replace(struct sna *sna,
DBG(("%s(handle=%d, %dx%d, bpp=%d, tiling=%d)\n",
__FUNCTION__, bo->handle, width, height, bpp, bo->tiling));
assert(bo->reusable);
if (kgem_bo_is_busy(bo)) {
struct kgem_bo *new_bo;