sna: Disable rendering with the DRM device whilst away from VT

As root, X gets away with many things, including submitting commands to
the DRM device whilst it is no longer authorised (i.e. when it has
relinquished master to another client across a VT switch). In the
non-root future, if we attempt to use the device whilst unauthorized the
rendering will be lost and we will mark the device as unusable. So flush
our render queue to the device around a VT switch.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2014-07-23 21:43:02 +01:00
parent 8d95e90b7b
commit bc50dff844
7 changed files with 66 additions and 10 deletions

View File

@ -528,6 +528,17 @@ int __intel_peek_fd(ScrnInfoPtr scrn)
return dev->fd;
}
int intel_has_render_node(ScrnInfoPtr scrn)
{
struct intel_device *dev;
struct stat st;
dev = intel_device(scrn);
assert(dev && dev->fd != -1);
return is_render_node(dev->fd, &st);
}
int intel_get_device(ScrnInfoPtr scrn)
{
struct intel_device *dev;

View File

@ -126,6 +126,7 @@ int intel_open_device(int entity_num,
const struct pci_device *pci,
struct xf86_platform_device *dev);
int __intel_peek_fd(ScrnInfoPtr scrn);
int intel_has_render_node(ScrnInfoPtr scrn);
int intel_get_device(ScrnInfoPtr scrn);
const char *intel_get_client_name(ScrnInfoPtr scrn);
int intel_get_client_fd(ScrnInfoPtr scrn);

View File

@ -3378,7 +3378,7 @@ void _kgem_submit(struct kgem *kgem)
assert(kgem->next_request != NULL);
}
static void find_hang_state(struct kgem *kgem, char *path, int maxlen)
static bool find_hang_state(struct kgem *kgem, char *path, int maxlen)
{
int minor = kgem_get_minor(kgem);
@ -3389,16 +3389,17 @@ static void find_hang_state(struct kgem *kgem, char *path, int maxlen)
snprintf(path, maxlen, "/sys/class/drm/card%d/error", minor);
if (access(path, R_OK) == 0)
return;
return true;
snprintf(path, maxlen, "/sys/kernel/debug/dri/%d/i915_error_state", minor);
if (access(path, R_OK) == 0)
return;
return true;
snprintf(path, maxlen, "/debug/dri/%d/i915_error_state", minor);
if (access(path, R_OK) == 0)
return;
return true;
return false;
path[0] = '\0';
}
@ -3409,21 +3410,27 @@ void kgem_throttle(struct kgem *kgem)
kgem->wedged = __kgem_throttle(kgem, true);
if (kgem->wedged) {
static int once;
char path[128];
find_hang_state(kgem, path, sizeof(path));
xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
"Detected a hung GPU, disabling acceleration.\n");
if (*path != '\0')
if (!once && find_hang_state(kgem, path, sizeof(path))) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
"When reporting this, please include %s and the full dmesg.\n",
path);
once = 1;
}
kgem->need_throttle = false;
}
}
int kgem_is_wedged(struct kgem *kgem)
{
return __kgem_throttle(kgem, true);
}
static void kgem_purge_cache(struct kgem *kgem)
{
struct kgem_bo *bo, *next;

View File

@ -121,7 +121,7 @@ enum {
struct kgem {
int fd;
int wedged;
unsigned wedged;
unsigned gen;
uint32_t unique_id;
@ -761,6 +761,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
int kgem_is_wedged(struct kgem *kgem);
void kgem_throttle(struct kgem *kgem);
#define MAX_INACTIVE_TIME 10
bool kgem_expire_cache(struct kgem *kgem);

View File

@ -995,6 +995,8 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv);
void sna_accel_wakeup_handler(struct sna *sna);
void sna_accel_watch_flush(struct sna *sna, int enable);
void sna_accel_flush(struct sna *sna);
void sna_accel_enter(struct sna *sna);
void sna_accel_leave(struct sna *sna);
void sna_accel_close(struct sna *sna);
void sna_accel_free(struct sna *sna);

View File

@ -17790,6 +17790,32 @@ void sna_accel_watch_flush(struct sna *sna, int enable)
sna->watch_flush += enable;
}
void sna_accel_leave(struct sna *sna)
{
DBG(("%s\n", __FUNCTION__));
/* as root we always have permission to render */
if (geteuid() == 0)
return;
/* as a user, we can only render now if we have a rendernode */
if (intel_has_render_node(sna->scrn))
return;
/* no longer authorized to use our fd */
DBG(("%s: dropping render privileges\n", __FUNCTION__));
kgem_submit(&sna->kgem);
sna->kgem.wedged |= 2;
}
void sna_accel_enter(struct sna *sna)
{
DBG(("%s\n", __FUNCTION__));
sna->kgem.wedged &= kgem_is_wedged(&sna->kgem);
kgem_throttle(&sna->kgem);
}
void sna_accel_close(struct sna *sna)
{
DBG(("%s\n", __FUNCTION__));

View File

@ -892,10 +892,12 @@ static void sna_uevent_fini(struct sna *sna) { }
static void sna_leave_vt(VT_FUNC_ARGS_DECL)
{
SCRN_INFO_PTR(arg);
struct sna *sna = to_sna(scrn);
DBG(("%s\n", __FUNCTION__));
sna_mode_reset(to_sna(scrn));
sna_accel_leave(sna);
sna_mode_reset(sna);
if (intel_put_master(scrn))
xf86DrvMsg(scrn->scrnIndex, X_WARNING,
@ -1208,7 +1210,13 @@ static Bool sna_enter_vt(VT_FUNC_ARGS_DECL)
sna->flags &= ~SNA_REPROBE;
}
return sna_set_desired_mode(sna);
if (!sna_set_desired_mode(sna)) {
intel_put_master(scrn);
return FALSE;
}
sna_accel_enter(sna);
return TRUE;
}
static Bool sna_switch_mode(SWITCH_MODE_ARGS_DECL)