xvmc: Handle allocation failure around batch submission

If we fail to allocate a new batch, just stall and reuse the old one
rather than crashing.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2013-11-11 14:56:22 +00:00
parent 3e93449b54
commit c489934ed7
6 changed files with 50 additions and 38 deletions

View File

@ -1207,7 +1207,7 @@ static int i915_xvmc_mc_render_surface(Display * display, XvMCContext * context,
}
}
intelFlushBatch(TRUE);
intelFlushBatch();
i915_xvmc_free_render_state_buffers(pI915XvMC);

View File

@ -841,7 +841,7 @@ static Status render_surface(Display * display,
}
}
}
intelFlushBatch(TRUE);
intelFlushBatch();
UNLOCK_HARDWARE(intel_ctx->hw_context);
}
return Success;

View File

@ -62,6 +62,15 @@ static void i965_end_batch(void)
xvmc_driver->batch.ptr += 4;
}
static void reset_batch(void)
{
dri_bo *bo = xvmc_driver->batch.buf;
xvmc_driver->batch.ptr = xvmc_driver->batch.init_ptr = bo->virtual;
xvmc_driver->batch.size = bo->size;
xvmc_driver->batch.space = bo->size - 8;
}
Bool intelInitBatchBuffer(void)
{
if ((xvmc_driver->batch.buf =
@ -71,59 +80,50 @@ Bool intelInitBatchBuffer(void)
return False;
}
drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
if (drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf)) {
drm_intel_bo_unreference(xvmc_driver->batch.buf);
return False;
}
xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
xvmc_driver->batch.size = BATCH_SIZE;
xvmc_driver->batch.space = BATCH_SIZE;
xvmc_driver->batch.ptr = xvmc_driver->batch.init_ptr;
reset_batch();
return True;
}
void intelFiniBatchBuffer(void)
{
drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
if (xvmc_driver->batch.buf == NULL)
return;
drm_intel_bo_unreference(xvmc_driver->batch.buf);
}
void intelFlushBatch(Bool refill)
void intelFlushBatch(void)
{
i965_end_batch();
dri_bo *bo;
drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
i965_end_batch();
drm_intel_bo_exec(xvmc_driver->batch.buf,
xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr,
0, 0, 0);
bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
"batch buffer", BATCH_SIZE, 0x1000);
if (bo != NULL && drm_intel_gem_bo_map_gtt(bo) == 0) {
drm_intel_bo_unreference(xvmc_driver->batch.buf);
if ((xvmc_driver->batch.buf =
drm_intel_bo_alloc(xvmc_driver->bufmgr,
"batch buffer", BATCH_SIZE, 0x1000)) == NULL) {
fprintf(stderr, "unable to alloc batch buffer\n");
xvmc_driver->batch.buf = bo;
} else {
if (bo != NULL)
drm_intel_bo_unreference(bo);
drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
}
drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
xvmc_driver->batch.size = BATCH_SIZE;
xvmc_driver->batch.space = BATCH_SIZE;
xvmc_driver->batch.ptr = xvmc_driver->batch.init_ptr;
}
void intelBatchbufferRequireSpace(int size)
{
assert(xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr + size <
xvmc_driver->batch.size - 8);
if (xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr + size
>= xvmc_driver->batch.size - 8)
intelFlushBatch(1);
reset_batch();
}
void intelBatchbufferData(const void *data, unsigned bytes, unsigned flags)
{
intelBatchbufferRequireSpace(bytes);
assert(bytes <= xvmc_driver->batch.space);
memcpy(xvmc_driver->batch.ptr, data, bytes);
xvmc_driver->batch.ptr += bytes;
xvmc_driver->batch.space -= bytes;

View File

@ -11,8 +11,6 @@ extern int VERBOSE;
#define BEGIN_BATCH(n) \
do { \
assert(xvmc_driver->batch.space >= (n) *4); \
if (xvmc_driver->batch.space < (n)*4) \
intelFlushBatch(TRUE); \
batch_ptr = xvmc_driver->batch.ptr; \
} while (0)
@ -46,7 +44,7 @@ extern int VERBOSE;
xvmc_driver->batch.ptr = batch_ptr; \
} while(0)
extern void intelFlushBatch(Bool);
extern void intelFlushBatch(void);
extern void intelBatchbufferData(const void *, unsigned, unsigned);
extern Bool intelInitBatchBuffer(void);
extern void intelFiniBatchBuffer(void);

View File

@ -313,6 +313,16 @@ _X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port,
}
drm_intel_bufmgr_gem_enable_reuse(xvmc_driver->bufmgr);
if (!intelInitBatchBuffer()) {
XFree(priv_data);
context->privData = NULL;
dri_bufmgr_destroy(xvmc_driver->bufmgr);
xvmc_driver = NULL;
return BadAlloc;
}
/* call driver hook.
* driver hook should free priv_data after return if success.*/
ret =
@ -320,14 +330,18 @@ _X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port,
priv_data);
if (ret) {
XVMC_ERR("driver create context failed\n");
intelFiniBatchBuffer();
XFree(priv_data);
context->privData = NULL;
dri_bufmgr_destroy(xvmc_driver->bufmgr);
xvmc_driver = NULL;
return ret;
}
pthread_mutex_init(&xvmc_driver->ctxmutex, NULL);
intelInitBatchBuffer();
intel_xvmc_dump_open();
return Success;

View File

@ -1010,7 +1010,7 @@ static Status put_slice2(Display * display, XvMCContext * context,
cs_buffer();
vld_send_media_object(media_state.slice_data.bo,
nbytes, 0, mb_row, 6, 127, q_scale_code);
intelFlushBatch(TRUE);
intelFlushBatch();
UNLOCK_HARDWARE(intel_ctx->hw_context);
return Success;
@ -1207,7 +1207,7 @@ static Status render_surface(Display * display,
}
}
}
intelFlushBatch(TRUE);
intelFlushBatch();
UNLOCK_HARDWARE(intel_ctx->hw_context);
return Success;
}