When removing of the BKL the locking around lastclose() was rearranged
and resulted in the holding of the open_count spinlock over the call
into drm_lastclose(). The drivers were not ready for this path to be
atomic - it may indeed involve long waits to release old objects and
cleanup the GPU - and so we ended up scheduling whilst atomic.
[ 54.625598] BUG: scheduling while atomic: X/3546/0x00000002
[ 54.625600] Modules linked in: sco bridge stp llc input_polldev rfcomm bnep l2cap crc16 sch_sfq ipv6 md_mod acpi_cpufreq mperf cryptd aes_x86_64 aes_generic xts gf128mul dm_crypt dm_mod btusb bluetooth usbhid hid zaurus cdc_ether usbnet mii cdc_wdm cdc_acm uvcvideo videodev v4l1_compat v4l2_compat_ioctl32 snd_hda_codec_conexant arc4 pcmcia ecb snd_hda_intel joydev sdhci_pci sdhci snd_hda_codec tpm_tis firewire_ohci mmc_core e1000e uhci_hcd thinkpad_acpi nvram yenta_socket pcmcia_rsrc pcmcia_core tpm wmi sr_mod firewire_core iwlagn ehci_hcd snd_hwdep snd_pcm usbcore tpm_bios thermal led_class snd_timer iwlcore snd soundcore ac snd_page_alloc pcspkr psmouse serio_raw battery sg mac80211 evdev cfg80211 i2c_i801 iTCO_wdt iTCO_vendor_support cdrom processor crc_itu_t rfkill xfs exportfs sd_mod crc_t10dif ahci libahci libata scsi_mod [last unloaded: scsi_wait_scan]
[ 54.625663] Pid: 3546, comm: X Not tainted 2.6.35-04771-g1787985 #301
[ 54.625665] Call Trace:
[ 54.625671] [<
ffffffff8102d599>] __schedule_bug+0x57/0x5c
[ 54.625675] [<
ffffffff81384141>] schedule+0xe5/0x832
[ 54.625679] [<
ffffffff81163e77>] ? put_dec+0x20/0x3c
[ 54.625682] [<
ffffffff81384dd4>] schedule_timeout+0x275/0x29f
[ 54.625686] [<
ffffffff810455e1>] ? process_timeout+0x0/0xb
[ 54.625688] [<
ffffffff81384e17>] schedule_timeout_uninterruptible+0x19/0x1b
[ 54.625691] [<
ffffffff81045893>] msleep+0x16/0x1d
[ 54.625695] [<
ffffffff812a2e53>] i9xx_crtc_dpms+0x273/0x2ae
[ 54.625698] [<
ffffffff812a18be>] intel_crtc_dpms+0x28/0xe7
[ 54.625702] [<
ffffffff811ec0fa>] drm_helper_disable_unused_functions+0xf0/0x118
[ 54.625705] [<
ffffffff811ecde3>] drm_crtc_helper_set_config+0x644/0x7c8
[ 54.625708] [<
ffffffff811f12dd>] ? drm_copy_field+0x40/0x50
[ 54.625711] [<
ffffffff811ebca2>] drm_fb_helper_force_kernel_mode+0x3e/0x85
[ 54.625713] [<
ffffffff811ebcf2>] drm_fb_helper_restore+0x9/0x24
[ 54.625717] [<
ffffffff81290a41>] i915_driver_lastclose+0x2b/0x5c
[ 54.625720] [<
ffffffff811f14a7>] drm_lastclose+0x44/0x2ad
[ 54.625722] [<
ffffffff811f1ed2>] drm_release+0x5c6/0x609
[ 54.625726] [<
ffffffff810d1275>] fput+0x109/0x1c7
[ 54.625728] [<
ffffffff810ce5e4>] filp_close+0x61/0x6b
[ 54.625731] [<
ffffffff810ce680>] sys_close+0x92/0xd4
[ 54.625734] [<
ffffffff81002a2b>] system_call_fastpath+0x16/0x1b
v2: The spinlock is actually superfluous as access to open_count is
entirely serialised by drm_global_mutex and so can be dropped. The
count_lock spinlock instead appears to be used to protect access to
dev->buf_alloc and dev->buf_use.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Dave Airlie <airlied@redhat.com>
retcode = drm_open_helper(inode, filp, dev);
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- spin_lock(&dev->count_lock);
- if (!dev->open_count++) {
- spin_unlock(&dev->count_lock);
+ if (!dev->open_count++)
retcode = drm_setup(dev);
- goto out;
- }
- spin_unlock(&dev->count_lock);
}
-out:
if (!retcode) {
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
*/
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count));
retcode = -EBUSY;
- goto out;
- }
- retcode = drm_lastclose(dev);
+ } else
+ retcode = drm_lastclose(dev);
}
-out:
- spin_unlock(&dev->count_lock);
mutex_unlock(&drm_global_mutex);
return retcode;