isolate - enable device isolation (each device, as far
as possible, will get its own protection
domain)
+++ fullflush - enable flushing of IO/TLB entries when
+++ they are unmapped. Otherwise they are
+++ flushed before they will be reused, which
+++ is a lot of faster
+++
amd_iommu_size= [HW,X86-64]
Define the size of the aperture for the AMD IOMMU
driver. Possible values are:
no delay (0).
Format: integer
+ bootmem_debug [KNL] Enable bootmem allocator debug messages.
+
bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
bttv.radio= Most important insmod options are available as
kernel args too.
Range: 0 - 8192
Default: 64
--- disable_8254_timer
--- enable_8254_timer
--- [IA32/X86_64] Disable/Enable interrupt 0 timer routing
--- over the 8254 in addition to over the IO-APIC. The
--- kernel tries to set a sensible default.
---
hpet= [X86-32,HPET] option to control HPET usage
Format: { enable (default) | disable | force }
disable: disable HPET and use PIT instead
* [no]ncq: Turn on or off NCQ.
+ * nohrst, nosrst, norst: suppress hard, soft
+ and both resets.
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
shapers= [NET]
Maximal number of shapers.
+++ show_msr= [x86] show boot-time MSR settings
+++ Format: { <integer> }
+++ Show boot-time (BIOS-initialized) MSR settings.
+++ The parameter means the number of CPUs to show,
+++ for example 1 means boot CPU only.
+++
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
L: linux-aio@kvack.org
S: Supported
- ABIT UGURU HARDWARE MONITOR DRIVER
+ ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
P: Hans de Goede
M: j.w.r.degoede@hhs.nl
L: lm-sensors@lm-sensors.org
S: Maintained
+ ABIT UGURU 3 HARDWARE MONITOR DRIVER
+ P: Alistair John Strachan
+ M: alistair@devzero.co.uk
+ L: lm-sensors@lm-sensors.org
+ S: Maintained
+
ACENIC DRIVER
P: Jes Sorensen
M: jes@trained-monkey.org
S: Maintained
ACPI
- P: Andi Kleen
- M: ak@linux.intel.com
+ P: Len Brown
M: lenb@kernel.org
L: linux-acpi@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/
S: Supported
ACPI WMI DRIVER
--P: Carlos Corbacho
--M: carlos@strangeworlds.co.uk
--L: linux-acpi@vger.kernel.org
--W: http://www.lesswatts.org/projects/acpi/
--S: Maintained
++P: Carlos Corbacho
++M: carlos@strangeworlds.co.uk
++L: linux-acpi@vger.kernel.org
++W: http://www.lesswatts.org/projects/acpi/
++S: Maintained
AD1889 ALSA SOUND DRIVER
--P: Kyle McMartin
--M: kyle@mcmartin.ca
--P: Thibaut Varene
--M: T-Bone@parisc-linux.org
--W: http://wiki.parisc-linux.org/AD1889
--L: linux-parisc@vger.kernel.org
--S: Maintained
++P: Kyle McMartin
++M: kyle@mcmartin.ca
++P: Thibaut Varene
++M: T-Bone@parisc-linux.org
++W: http://wiki.parisc-linux.org/AD1889
++L: linux-parisc@vger.kernel.org
++S: Maintained
ADM1025 HARDWARE MONITOR DRIVER
P: Jean Delvare
P: Joerg Roedel
M: joerg.roedel@amd.com
L: iommu@lists.linux-foundation.org
+++ T: git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
S: Supported
AMS (Apple Motion Sensor) DRIVER
W: http://www.canb.auug.org.au/~sfr/
S: Supported
+ APPLE BCM5974 MULTITOUCH DRIVER
+ P: Henrik Rydberg
+ M: rydberg@euromail.se
+ L: linux-input@vger.kernel.org
+ S: Maintained
+
APPLE SMC DRIVER
P: Nicolas Boichat
M: nicolas@boichat.ch
S: Maintained
ARM/ATMEL AT91RM9200 ARM ARCHITECTURE
--P: Andrew Victor
--M: linux@maxim.org.za
--L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
--W: http://maxim.org.za/at91_26.html
--S: Maintained
++P: Andrew Victor
++M: linux@maxim.org.za
++L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
++W: http://maxim.org.za/at91_26.html
++S: Maintained
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
P: Lennert Buytenhek
S: Maintained
ARM/HP JORNADA 7XX MACHINE SUPPORT
--P: Kristoffer Ericson
--M: kristoffer.ericson@gmail.com
--W: www.jlime.com
--S: Maintained
++P: Kristoffer Ericson
++M: kristoffer.ericson@gmail.com
++W: www.jlime.com
++S: Maintained
ARM/INTEL IOP32X ARM ARCHITECTURE
P: Lennert Buytenhek
M: syrjala@sci.fi
S: Maintained
- ATL1 ETHERNET DRIVER
+ ATLX ETHERNET DRIVERS
P: Jay Cliburn
M: jcliburn@gmail.com
P: Chris Snook
M: csnook@redhat.com
+ P: Jie Yang
+ M: jie.yang@atheros.com
L: atl1-devel@lists.sourceforge.net
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
L: linux-mtd@lists.infradead.org
S: Maintained
- BLUETOOTH SUBSYSTEM
+ BLUETOOTH DRIVERS
P: Marcel Holtmann
M: marcel@holtmann.org
- P: Maxim Krasnyansky
- M: maxk@qualcomm.com
L: linux-bluetooth@vger.kernel.org
- W: http://bluez.sf.net
- W: http://www.bluez.org
- W: http://www.holtmann.org/linux/bluetooth/
- T: git kernel.org:/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git
- S: Maintained
-
- BLUETOOTH RFCOMM LAYER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- P: Maxim Krasnyansky
- M: maxk@qualcomm.com
- S: Maintained
-
- BLUETOOTH BNEP LAYER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- P: Maxim Krasnyansky
- M: maxk@qualcomm.com
- S: Maintained
-
- BLUETOOTH CMTP LAYER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HIDP LAYER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI UART DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- P: Maxim Krasnyansky
- M: maxk@qualcomm.com
- S: Maintained
-
- BLUETOOTH HCI USB DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- P: Maxim Krasnyansky
- M: maxk@qualcomm.com
- S: Maintained
-
- BLUETOOTH HCI BCM203X DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI BPA10X DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI BFUSB DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI DTL1 DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI BLUECARD DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI BT3C DRIVER
- P: Marcel Holtmann
- M: marcel@holtmann.org
+ W: http://www.bluez.org/
S: Maintained
- BLUETOOTH HCI BTUART DRIVER
+ BLUETOOTH SUBSYSTEM
P: Marcel Holtmann
M: marcel@holtmann.org
- S: Maintained
-
- BLUETOOTH HCI VHCI DRIVER
- P: Maxim Krasnyansky
- M: maxk@qualcomm.com
+ L: linux-bluetooth@vger.kernel.org
+ W: http://www.bluez.org/
+ T: git kernel.org:/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git
S: Maintained
BONDING DRIVER
S: Maintained
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
--P: Jonathan Corbet
++P: Jonathan Corbet
M: corbet@lwn.net
L: video4linux-list@redhat.com
S: Maintained
S: Maintained
CPUSETS
---P: Paul Jackson
P: Paul Menage
---M: pj@sgi.com
M: menage@google.com
L: linux-kernel@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
M: Eng.Linux@digi.com
L: Eng.Linux@digi.com
W: http://www.digi.com
--S: Orphaned
++S: Orphan
DIRECTORY NOTIFICATION
P: Stephen Rothwell
S: Supported
DOCUMENTATION (/Documentation directory)
--P: Michael Kerrisk
--M: mtk.manpages@gmail.com
--P: Randy Dunlap
--M: rdunlap@xenotime.net
--L: linux-doc@vger.kernel.org
--S: Maintained
++P: Michael Kerrisk
++M: mtk.manpages@gmail.com
++P: Randy Dunlap
++M: rdunlap@xenotime.net
++L: linux-doc@vger.kernel.org
++S: Maintained
DOUBLETALK DRIVER
P: James R. Van Zandt
DVB SUBSYSTEM AND DRIVERS
P: LinuxTV.org Project
M: v4l-dvb-maintainer@linuxtv.org
--L: linux-dvb@linuxtv.org (subscription required)
++L: linux-dvb@linuxtv.org (subscription required)
W: http://linuxtv.org/
T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
S: Maintained
EMBEDDED LINUX
P: Paul Gortmaker
M: paul.gortmaker@windriver.com
- P David Woodhouse
+ P: David Woodhouse
M: dwmw2@infradead.org
L: linux-embedded@vger.kernel.org
S: Maintained
P: Rik Faith
M: faith@cs.unc.edu
L: linux-scsi@vger.kernel.org
--S: Odd fixes (e.g., new signatures)
++S: Odd Fixes (e.g., new signatures)
GDT SCSI DISK ARRAY CONTROLLER DRIVER
P: Achim Leubner
HARDWARE MONITORING
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
--S: Orphaned
++S: Orphan
HARDWARE RANDOM NUMBER GENERATOR CORE
--S: Orphaned
++S: Orphan
HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
P: Robert Love
I2C/SMBUS STUB DRIVER
P: Mark M. Hoffman
M: mhoffman@lightlink.com
---L: lm-sensors@lm-sensors.org
+++L: i2c@lm-sensors.org
S: Maintained
I2C SUBSYSTEM
P: Sean Hefty
M: sean.hefty@intel.com
P: Hal Rosenstock
--M: hal.rosenstock@gmail.com
++M: hal.rosenstock@gmail.com
L: general@lists.openfabrics.org
W: http://www.openib.org/
T: git kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
MARVELL YUKON / SYSKONNECT DRIVER
P: Mirko Lindner
--M: mlindner@syskonnect.de
++M: mlindner@syskonnect.de
P: Ralph Roesler
--M: rroesler@syskonnect.de
--W: http://www.syskonnect.com
--S: Supported
++M: rroesler@syskonnect.de
++W: http://www.syskonnect.com
++S: Supported
MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
P: Michael Kerrisk
M: mtk.manpages@gmail.com
--W: http://www.kernel.org/doc/man-pages
--S: Supported
++W: http://www.kernel.org/doc/man-pages
+++L: linux-man@vger.kernel.org
++S: Supported
MARVELL LIBERTAS WIRELESS DRIVER
P: Dan Williams
MEGARAID SCSI DRIVERS
P: Neela Syam Kolli
M: megaraidlinux@lsi.com
--S: linux-scsi@vger.kernel.org
++L: linux-scsi@vger.kernel.org
W: http://megaraid.lsilogic.com
S: Maintained
P: David Brownell
M: dbrownell@users.sourceforge.net
L: linux-kernel@vger.kernel.org
--S: Odd fixes
++S: Odd Fixes
MULTISOUND SOUND DRIVER
P: Andrew Veliath
S: Maintained
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
--P: Felipe Balbi
--M: felipe.balbi@nokia.com
--L: linux-usb@vger.kernel.org
--S: Maintained
++P: Felipe Balbi
++M: felipe.balbi@nokia.com
++L: linux-usb@vger.kernel.org
++S: Maintained
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
P: Andrew Gallatin
S: Supported
NATSEMI ETHERNET DRIVER (DP8381x)
--P: Tim Hockin
++P: Tim Hockin
M: thockin@hockin.org
S: Maintained
M: aia21@cantab.net
L: linux-ntfs-dev@lists.sourceforge.net
L: linux-kernel@vger.kernel.org
- W: http://linux-ntfs.sf.net/
+ W: http://www.linux-ntfs.org/
T: git kernel.org:/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
S: Maintained
S: Maintained
OMNIVISION OV7670 SENSOR DRIVER
--P: Jonathan Corbet
++P: Jonathan Corbet
M: corbet@lwn.net
L: video4linux-list@redhat.com
S: Maintained
S: Supported
PCI HOTPLUG CORE
--P: Kristen Carlson Accardi
++P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com
S: Supported
P: Eric Paris
M: eparis@parisplace.org
L: linux-kernel@vger.kernel.org (kernel issues)
--L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
++L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
W: http://www.nsa.gov/selinux
S: Supported
W: http://linux-visws.sf.net
S: Maintained for 2.6.
+ SGI GRU DRIVER
+ P: Jack Steiner
+ M: steiner@sgi.com
+ S: Maintained
+
+ SGI XP/XPC/XPNET DRIVER
+ P: Dean Nelson
+ M: dcn@sgi.com
+ S: Maintained
+
SIMTEC EB110ATX (Chalice CATS)
P: Ben Dooks
P: Vincent Sanders
SIS 96X I2C/SMBUS DRIVER
P: Mark M. Hoffman
M: mhoffman@lightlink.com
---L: lm-sensors@lm-sensors.org
+++L: i2c@lm-sensors.org
S: Maintained
SIS FRAMEBUFFER DRIVER
S: Maintained
SOC-CAMERA V4L2 SUBSYSTEM
--P: Guennadi Liakhovetski
--M: g.liakhovetski@gmx.de
--L: video4linux-list@redhat.com
--S: Maintained
++P: Guennadi Liakhovetski
++M: g.liakhovetski@gmx.de
++L: video4linux-list@redhat.com
++S: Maintained
SOFTWARE RAID (Multiple Disks) SUPPORT
P: Ingo Molnar
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT
P: Liam Girdwood
---M: liam.girdwood@wolfsonmicro.com
+++M: lrg@slimlogic.co.uk
P: Mark Brown
M: broonie@opensource.wolfsonmicro.com
T: git opensource.wolfsonmicro.com/linux-2.6-asoc
L: alsa-devel@alsa-project.org (subscribers-only)
+++W: http://alsa-project.org/main/index.php/ASoC
S: Supported
SPI SUBSYSTEM
STARMODE RADIO IP (STRIP) PROTOCOL DRIVER
W: http://mosquitonet.Stanford.EDU/strip.html
--S: Unsupported ?
++S: Orphan
STRADIS MPEG-2 DECODER DRIVER
P: Nathan Laredo
S: Maintained
TI FLASH MEDIA INTERFACE DRIVER
--P: Alex Dubov
--M: oakad@yahoo.com
--S: Maintained
++P: Alex Dubov
++M: oakad@yahoo.com
++S: Maintained
TI OMAP MMC INTERFACE DRIVER
P: Carlos Aguiar, Anderson Briglia and Syed Khasim
- M: linux-omap-open-source@linux.omap.com (subscribers only)
+ M: linux-omap@vger.kernel.org
W: http://linux.omap.com
W: http://www.muru.com/linux/omap/
S: Maintained
P: Pete Zaitcev
M: zaitcev@redhat.com
L: linux-kernel@vger.kernel.org
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Supported
USB CDC ETHERNET DRIVER
P: Greg Kroah-Hartman
M: greg@kroah.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
W: http://www.kroah.com/linux-usb/
USB EHCI DRIVER
P: David Brownell
M: dbrownell@users.sourceforge.net
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Odd Fixes
USB ET61X[12]51 DRIVER
P: Luca Risolia
M: luca.risolia@studio.unibo.it
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: video4linux-list@redhat.com
W: http://www.linux-projects.org
S: Maintained
USB GADGET/PERIPHERAL SUBSYSTEM
P: David Brownell
M: dbrownell@users.sourceforge.net
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://www.linux-usb.org/gadget
S: Maintained
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
P: Jiri Kosina
M: jkosina@suse.cz
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
T: git kernel.org:/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
USB ISP116X DRIVER
P: Olav Kongas
M: ok@artecdesign.ee
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB KAWASAKI LSI DRIVER
P: Oliver Neukum
M: oliver@neukum.name
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB MASS STORAGE DRIVER
P: Matthew Dharm
M: mdharm-usb@one-eyed-alien.net
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: usb-storage@lists.one-eyed-alien.net
S: Maintained
W: http://www.one-eyed-alien.net/~mdharm/linux-usb/
USB OHCI DRIVER
P: David Brownell
M: dbrownell@users.sourceforge.net
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Odd Fixes
USB OPTION-CARD DRIVER
P: Matthias Urlichs
M: smurf@smurf.noris.de
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB OV511 DRIVER
P: Mark McClelland
M: mmcclell@bigfoot.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://alpha.dyndns.org/ov511/
S: Maintained
USB PEGASUS DRIVER
P: Petko Manolov
M: petkan@users.sourceforge.net
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: netdev@vger.kernel.org
W: http://pegasus2.sourceforge.net/
S: Maintained
USB PRINTER DRIVER (usblp)
P: Pete Zaitcev
M: zaitcev@redhat.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Supported
USB RTL8150 DRIVER
P: Petko Manolov
M: petkan@users.sourceforge.net
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: netdev@vger.kernel.org
W: http://pegasus2.sourceforge.net/
S: Maintained
USB SE401 DRIVER
P: Jeroen Vreeken
M: pe1rxq@amsat.org
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://www.chello.nl/~j.vreeken/se401/
S: Maintained
USB SERIAL BELKIN F5U103 DRIVER
P: William Greathouse
M: wgreathouse@smva.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB SERIAL CYPRESS M8 DRIVER
P: Lonnie Mendez
M: dignome@gmail.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
W: http://geocities.com/i0xox0i
W: http://firstlight.net/cvs
P: Peter Berger and Al Borchers
M: pberger@brimson.com
M: alborchers@steinerpoint.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB SERIAL DRIVER
P: Greg Kroah-Hartman
M: gregkh@suse.de
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Supported
USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
P: Gary Brubaker
M: xavyer@ix.netcom.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB SERIAL KEYSPAN DRIVER
P: Greg Kroah-Hartman
M: greg@kroah.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://www.kroah.com/linux/
S: Maintained
USB SERIAL WHITEHEAT DRIVER
P: Support Department
M: support@connecttech.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://www.connecttech.com
S: Supported
USB SN9C1xx DRIVER
P: Luca Risolia
M: luca.risolia@studio.unibo.it
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: video4linux-list@redhat.com
W: http://www.linux-projects.org
S: Maintained
USB SUBSYSTEM
P: Greg Kroah-Hartman
M: gregkh@suse.de
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://www.linux-usb.org
T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
S: Supported
USB UHCI DRIVER
P: Alan Stern
M: stern@rowland.harvard.edu
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
S: Maintained
USB "USBNET" DRIVER FRAMEWORK
USB VIDEO CLASS
P: Laurent Pinchart
M: laurent.pinchart@skynet.be
- L: linx-uvc-devel@berlios.de
+ L: linux-uvc-devel@lists.berlios.de
L: video4linux-list@redhat.com
W: http://linux-uvc.berlios.de
S: Maintained
USB W996[87]CF DRIVER
P: Luca Risolia
M: luca.risolia@studio.unibo.it
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: video4linux-list@redhat.com
W: http://www.linux-projects.org
S: Maintained
USB ZC0301 DRIVER
P: Luca Risolia
M: luca.risolia@studio.unibo.it
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: video4linux-list@redhat.com
W: http://www.linux-projects.org
S: Maintained
USB ZD1201 DRIVER
P: Jeroen Vreeken
M: pe1rxq@amsat.org
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
W: http://linux-lc100020.sourceforge.net
S: Maintained
USB ZR364XX DRIVER
P: Antoine Jacquet
M: royale@zerezo.com
--L: linux-usb@vger.kernel.org
++L: linux-usb@vger.kernel.org
L: video4linux-list@redhat.com
W: http://royale.zerezo.com/zr364xx/
S: Maintained
select HAVE_FTRACE
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
+++ select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS
config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
+++ select PCI_MSI
depends on X86_64 && PCI && ACPI
help
With this option you can enable support for AMD IOMMU hardware in
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
+
config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP
+ depends on X86_64 && SMP && BROKEN
default n
help
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
- if MAXSMP
config NR_CPUS
- int
- default "4096"
- endif
-
- if !MAXSMP
- config NR_CPUS
- int "Maximum number of CPUs (2-4096)"
- range 2 4096
+ int "Maximum number of CPUs (2-512)" if !MAXSMP
+ range 2 512
depends on SMP
+ default "4096" if MAXSMP
default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
default "8"
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 4096 and the
+ kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
- endif
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
local memory controller of the CPU and add some more
NUMA awareness to the kernel.
- For i386 this is currently highly experimental and should be only
+ For 32-bit this is currently highly experimental and should be only
used for kernel development. It might also cause boot failures.
- For x86_64 this is recommended on all multiprocessor Opteron systems.
+ For 64-bit this is recommended on all multiprocessor Opteron systems.
If the system is EM64T, you should say N unless your system is
EM64T NUMA.
into virtual nodes when booted with "numa=fake=N", where N is the
number of nodes. This is only useful for debugging.
- if MAXSMP
-
config NODES_SHIFT
- int
- default "9"
- endif
-
- if !MAXSMP
- config NODES_SHIFT
- int "Maximum NUMA Nodes (as a power of 2)"
+ int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
range 1 9 if X86_64
+ default "9" if MAXSMP
default "6" if X86_64
default "4" if X86_NUMAQ
default "3"
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accomodate various tables.
- endif
config HAVE_ARCH_BOOTMEM_NODE
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
--- depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA
+++ depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_ENABLE
def_bool y
--- depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC)
+++ depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
select SPARSEMEM_STATIC if X86_32
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
You can safely say Y even if your machine doesn't have MTRRs, you'll
just add about 9 KB to your kernel.
--- See <file:Documentation/mtrr.txt> for more information.
+++ See <file:Documentation/x86/mtrr.txt> for more information.
config MTRR_SANITIZER
--- bool
+++ def_bool y
prompt "MTRR cleanup support"
depends on MTRR
help
The largest mtrr entry size for a continous block can be set with
mtrr_chunk_size.
--- If unsure, say N.
+++ If unsure, say Y.
config MTRR_SANITIZER_ENABLE_DEFAULT
int "MTRR cleanup enable value (0-1)"
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
--- depends on PROC_FS
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
--- enabled via /proc/<pid>/seccomp, it cannot be disabled
+++ enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
strongly in flux, so no good recommendation can be made.
config CRASH_DUMP
- bool "kernel crash dumps (EXPERIMENTAL)"
+ bool "kernel crash dumps"
depends on X86_64 || (X86_32 && HIGHMEM)
help
Generate crash dump after being started by kexec.
Don't change this unless you know what you are doing.
config HOTPLUG_CPU
--- bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)"
--- depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
+++ bool "Support for hot-pluggable CPUs"
+++ depends on SMP && HOTPLUG && !X86_VOYAGER
---help---
--- Say Y here to experiment with turning CPUs off and on, and to
--- enable suspend on SMP systems. CPUs can be controlled through
--- /sys/devices/system/cpu.
--- Say N if you want to disable CPU hotplug and don't need to
--- suspend.
+++ Say Y here to allow turning CPUs off and on. CPUs can be
+++ controlled through /sys/devices/system/cpu.
+++ ( Note: power management support will enable this option
+++ automatically on SMP systems. )
+++ Say N if you want to disable CPU hotplug.
config COMPAT_VDSO
def_bool y
If unsure, say Y.
+++config CMDLINE_BOOL
+++ bool "Built-in kernel command line"
+++ default n
+++ help
+++ Allow for specifying boot arguments to the kernel at
+++ build time. On some systems (e.g. embedded ones), it is
+++ necessary or convenient to provide some or all of the
+++ kernel boot arguments with the kernel itself (that is,
+++ to not rely on the boot loader to provide them.)
+++
+++ To compile command line arguments into the kernel,
+++ set this option to 'Y', then fill in the
+++ the boot arguments in CONFIG_CMDLINE.
+++
+++ Systems with fully functional boot loaders (i.e. non-embedded)
+++ should leave this option set to 'N'.
+++
+++config CMDLINE
+++ string "Built-in kernel command string"
+++ depends on CMDLINE_BOOL
+++ default ""
+++ help
+++ Enter arguments here that should be compiled into the kernel
+++ image and used at boot time. If the boot loader provides a
+++ command line at boot time, it is appended to this string to
+++ form the full kernel command line, when the system boots.
+++
+++ However, you can use the CONFIG_CMDLINE_OVERRIDE option to
+++ change this behavior.
+++
+++ In most cases, the command line (whether built-in or provided
+++ by the boot loader) should specify the device for the root
+++ file system.
+++
+++config CMDLINE_OVERRIDE
+++ bool "Built-in command line overrides boot loader arguments"
+++ default n
+++ depends on CMDLINE_BOOL
+++ help
+++ Set this option to 'Y' to have the kernel ignore the boot loader
+++ command line, and use ONLY the built-in command line.
+++
+++ This is used to work around broken boot loaders. This should
+++ be set to 'N' under normal conditions.
+++
endmenu
config ARCH_ENABLE_MEMORY_HOTPLUG
config SYSVIPC_COMPAT
def_bool y
--- depends on X86_64 && COMPAT && SYSVIPC
+++ depends on COMPAT && SYSVIPC
endmenu
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
+++ /* A list of preallocated protection domains */
+++ static LIST_HEAD(iommu_pd_list);
+++ static DEFINE_SPINLOCK(iommu_pd_list_lock);
+++
/*
* general struct to manage commands send to an IOMMU
*/
return iommu->cap & IOMMU_CAP_NPCACHE;
}
+++ /****************************************************************************
+++ *
+++ * Interrupt handling functions
+++ *
+++ ****************************************************************************/
+++
+++ static void iommu_print_event(void *__evt)
+++ {
+++ u32 *event = __evt;
+++ int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+++ int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+++ int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+++ int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+++ u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+++
+++ printk(KERN_ERR "AMD IOMMU: Event logged [");
+++
+++ switch (type) {
+++ case EVENT_TYPE_ILL_DEV:
+++ printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
+++ "address=0x%016llx flags=0x%04x]\n",
+++ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+++ address, flags);
+++ break;
+++ case EVENT_TYPE_IO_FAULT:
+++ printk("IO_PAGE_FAULT device=%02x:%02x.%x "
+++ "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+++ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+++ domid, address, flags);
+++ break;
+++ case EVENT_TYPE_DEV_TAB_ERR:
+++ printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+++ "address=0x%016llx flags=0x%04x]\n",
+++ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+++ address, flags);
+++ break;
+++ case EVENT_TYPE_PAGE_TAB_ERR:
+++ printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+++ "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+++ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+++ domid, address, flags);
+++ break;
+++ case EVENT_TYPE_ILL_CMD:
+++ printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+++ break;
+++ case EVENT_TYPE_CMD_HARD_ERR:
+++ printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
+++ "flags=0x%04x]\n", address, flags);
+++ break;
+++ case EVENT_TYPE_IOTLB_INV_TO:
+++ printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
+++ "address=0x%016llx]\n",
+++ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+++ address);
+++ break;
+++ case EVENT_TYPE_INV_DEV_REQ:
+++ printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
+++ "address=0x%016llx flags=0x%04x]\n",
+++ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+++ address, flags);
+++ break;
+++ default:
+++ printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
+++ }
+++ }
+++
+++ static void iommu_poll_events(struct amd_iommu *iommu)
+++ {
+++ u32 head, tail;
+++ unsigned long flags;
+++
+++ spin_lock_irqsave(&iommu->lock, flags);
+++
+++ head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+++ tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+++
+++ while (head != tail) {
+++ iommu_print_event(iommu->evt_buf + head);
+++ head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
+++ }
+++
+++ writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+++
+++ spin_unlock_irqrestore(&iommu->lock, flags);
+++ }
+++
+++ irqreturn_t amd_iommu_int_handler(int irq, void *data)
+++ {
+++ struct amd_iommu *iommu;
+++
+++ list_for_each_entry(iommu, &amd_iommu_list, list)
+++ iommu_poll_events(iommu);
+++
+++ return IRQ_HANDLED;
+++ }
+++
/****************************************************************************
*
* IOMMU command queuing functions
u8 *target;
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
- target = (iommu->cmd_buf + tail);
+ target = iommu->cmd_buf + tail;
memcpy_toio(target, cmd, sizeof(*cmd));
tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
*/
static int iommu_completion_wait(struct amd_iommu *iommu)
{
--- int ret, ready = 0;
+++ int ret = 0, ready = 0;
unsigned status = 0;
struct iommu_cmd cmd;
--- unsigned long i = 0;
+++ unsigned long flags, i = 0;
memset(&cmd, 0, sizeof(cmd));
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
iommu->need_sync = 0;
--- ret = iommu_queue_command(iommu, &cmd);
+++ spin_lock_irqsave(&iommu->lock, flags);
+++
+++ ret = __iommu_queue_command(iommu, &cmd);
if (ret)
--- return ret;
+++ goto out;
while (!ready && (i < EXIT_LOOP_COUNT)) {
++i;
if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
+++out:
+++ spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
{
struct iommu_cmd cmd;
+++ int ret;
BUG_ON(iommu == NULL);
CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
cmd.data[0] = devid;
+++ ret = iommu_queue_command(iommu, &cmd);
+++
iommu->need_sync = 1;
--- return iommu_queue_command(iommu, &cmd);
+++ return ret;
}
/*
u64 address, u16 domid, int pde, int s)
{
struct iommu_cmd cmd;
+++ int ret;
memset(&cmd, 0, sizeof(cmd));
address &= PAGE_MASK;
if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+++ ret = iommu_queue_command(iommu, &cmd);
+++
iommu->need_sync = 1;
--- return iommu_queue_command(iommu, &cmd);
+++ return ret;
}
/*
return 0;
}
+++ /* Flush the whole IO/TLB for a given protection domain */
+++ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
+++ {
+++ u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+++
+++ iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
+++ }
+++
/****************************************************************************
*
* The functions below are used the create the page table mappings for
* efficient allocator.
*
****************************************************************************/
--- static unsigned long dma_mask_to_pages(unsigned long mask)
--- {
--- return (mask >> PAGE_SHIFT) +
--- (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
--- }
/*
* The address allocator core function.
*/
static unsigned long dma_ops_alloc_addresses(struct device *dev,
struct dma_ops_domain *dom,
--- unsigned int pages)
+++ unsigned int pages,
+++ unsigned long align_mask,
+++ u64 dma_mask)
{
--- unsigned long limit = dma_mask_to_pages(*dev->dma_mask);
+++ unsigned long limit;
unsigned long address;
--- unsigned long size = dom->aperture_size >> PAGE_SHIFT;
unsigned long boundary_size;
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
--- limit = limit < size ? limit : size;
+++ limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
+++ dma_mask >> PAGE_SHIFT);
--- if (dom->next_bit >= limit)
+++ if (dom->next_bit >= limit) {
dom->next_bit = 0;
+++ dom->need_flush = true;
+++ }
address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
--- 0 , boundary_size, 0);
--- if (address == -1)
+++ 0 , boundary_size, align_mask);
+++ if (address == -1) {
address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
--- 0, boundary_size, 0);
+++ 0, boundary_size, align_mask);
+++ dom->need_flush = true;
+++ }
if (likely(address != -1)) {
dom->next_bit = address + pages;
if (start_page + pages > last_page)
pages = last_page - start_page;
--- set_bit_string(dom->bitmap, start_page, pages);
+++ iommu_area_reserve(dom->bitmap, start_page, pages);
}
static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
dma_dom->bitmap[0] = 1;
dma_dom->next_bit = 0;
+++ dma_dom->need_flush = false;
+++ dma_dom->target_dev = 0xffff;
+++
/* Intialize the exclusion range if necessary */
if (iommu->exclusion_start &&
iommu->exclusion_start < dma_dom->aperture_size) {
u64 pte_root = virt_to_phys(domain->pt_root);
--- pte_root |= (domain->mode & 0x07) << 9;
--- pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2;
+++ pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
+++ << DEV_ENTRY_MODE_SHIFT;
+++ pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
--- amd_iommu_dev_table[devid].data[0] = pte_root;
--- amd_iommu_dev_table[devid].data[1] = pte_root >> 32;
+++ amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+++ amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
amd_iommu_dev_table[devid].data[2] = domain->id;
amd_iommu_pd_table[devid] = domain;
*
*****************************************************************************/
+++ /*
+++ * This function checks if the driver got a valid device from the caller to
+++ * avoid dereferencing invalid pointers.
+++ */
+++ static bool check_device(struct device *dev)
+++ {
+++ if (!dev || !dev->dma_mask)
+++ return false;
+++
+++ return true;
+++ }
+++
+++ /*
+++ * In this function the list of preallocated protection domains is traversed to
+++ * find the domain for a specific device
+++ */
+++ static struct dma_ops_domain *find_protection_domain(u16 devid)
+++ {
+++ struct dma_ops_domain *entry, *ret = NULL;
+++ unsigned long flags;
+++
+++ if (list_empty(&iommu_pd_list))
+++ return NULL;
+++
+++ spin_lock_irqsave(&iommu_pd_list_lock, flags);
+++
+++ list_for_each_entry(entry, &iommu_pd_list, list) {
+++ if (entry->target_dev == devid) {
+++ ret = entry;
+++ list_del(&ret->list);
+++ break;
+++ }
+++ }
+++
+++ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+++
+++ return ret;
+++ }
+++
/*
* In the dma_ops path we only have the struct device. This function
* finds the corresponding IOMMU, the protection domain and the
struct pci_dev *pcidev;
u16 _bdf;
--- BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask);
+++ *iommu = NULL;
+++ *domain = NULL;
+++ *bdf = 0xffff;
+++
+++ if (dev->bus != &pci_bus_type)
+++ return 0;
pcidev = to_pci_dev(dev);
_bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
/* device not translated by any IOMMU in the system? */
--- if (_bdf > amd_iommu_last_bdf) {
--- *iommu = NULL;
--- *domain = NULL;
--- *bdf = 0xffff;
+++ if (_bdf > amd_iommu_last_bdf)
return 0;
--- }
*bdf = amd_iommu_alias_table[_bdf];
*iommu = amd_iommu_rlookup_table[*bdf];
if (*iommu == NULL)
return 0;
--- dma_dom = (*iommu)->default_dom;
*domain = domain_for_device(*bdf);
if (*domain == NULL) {
+++ dma_dom = find_protection_domain(*bdf);
+++ if (!dma_dom)
+++ dma_dom = (*iommu)->default_dom;
*domain = &dma_dom->domain;
set_device_domain(*iommu, *domain, *bdf);
printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
struct dma_ops_domain *dma_dom,
phys_addr_t paddr,
size_t size,
--- int dir)
+++ int dir,
+++ bool align,
+++ u64 dma_mask)
{
dma_addr_t offset = paddr & ~PAGE_MASK;
dma_addr_t address, start;
unsigned int pages;
+++ unsigned long align_mask = 0;
int i;
pages = iommu_num_pages(paddr, size);
paddr &= PAGE_MASK;
--- address = dma_ops_alloc_addresses(dev, dma_dom, pages);
+++ if (align)
+++ align_mask = (1UL << get_order(size)) - 1;
+++
+++ address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
+++ dma_mask);
if (unlikely(address == bad_dma_address))
goto out;
}
address += offset;
+++ if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
+++ iommu_flush_tlb(iommu, dma_dom->domain.id);
+++ dma_dom->need_flush = false;
+++ } else if (unlikely(iommu_has_npcache(iommu)))
+++ iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
+++
out:
return address;
}
}
dma_ops_free_addresses(dma_dom, dma_addr, pages);
+++
+++ if (amd_iommu_unmap_flush)
+++ iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
}
/*
struct protection_domain *domain;
u16 devid;
dma_addr_t addr;
+++ u64 dma_mask;
+++
+++ if (!check_device(dev))
+++ return bad_dma_address;
+++
+++ dma_mask = *dev->dma_mask;
get_device_resources(dev, &iommu, &domain, &devid);
return (dma_addr_t)paddr;
spin_lock_irqsave(&domain->lock, flags);
--- addr = __map_single(dev, iommu, domain->priv, paddr, size, dir);
+++ addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
+++ dma_mask);
if (addr == bad_dma_address)
goto out;
--- if (iommu_has_npcache(iommu))
--- iommu_flush_pages(iommu, domain->id, addr, size);
---
--- if (iommu->need_sync)
+++ if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
out:
struct protection_domain *domain;
u16 devid;
--- if (!get_device_resources(dev, &iommu, &domain, &devid))
+++ if (!check_device(dev) ||
+++ !get_device_resources(dev, &iommu, &domain, &devid))
/* device not handled by any AMD IOMMU */
return;
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
--- iommu_flush_pages(iommu, domain->id, dma_addr, size);
---
--- if (iommu->need_sync)
+++ if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
struct scatterlist *s;
phys_addr_t paddr;
int mapped_elems = 0;
+++ u64 dma_mask;
+++
+++ if (!check_device(dev))
+++ return 0;
+++
+++ dma_mask = *dev->dma_mask;
get_device_resources(dev, &iommu, &domain, &devid);
paddr = sg_phys(s);
s->dma_address = __map_single(dev, iommu, domain->priv,
--- paddr, s->length, dir);
+++ paddr, s->length, dir, false,
+++ dma_mask);
if (s->dma_address) {
s->dma_length = s->length;
mapped_elems++;
} else
goto unmap;
--- if (iommu_has_npcache(iommu))
--- iommu_flush_pages(iommu, domain->id, s->dma_address,
--- s->dma_length);
}
--- if (iommu->need_sync)
+++ if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
out:
u16 devid;
int i;
--- if (!get_device_resources(dev, &iommu, &domain, &devid))
+++ if (!check_device(dev) ||
+++ !get_device_resources(dev, &iommu, &domain, &devid))
return;
spin_lock_irqsave(&domain->lock, flags);
for_each_sg(sglist, s, nelems, i) {
__unmap_single(iommu, domain->priv, s->dma_address,
s->dma_length, dir);
--- iommu_flush_pages(iommu, domain->id, s->dma_address,
--- s->dma_length);
s->dma_address = s->dma_length = 0;
}
--- if (iommu->need_sync)
+++ if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
struct protection_domain *domain;
u16 devid;
phys_addr_t paddr;
+++ u64 dma_mask = dev->coherent_dma_mask;
+++
+++ if (!check_device(dev))
+++ return NULL;
+
+++ if (!get_device_resources(dev, &iommu, &domain, &devid))
+++ flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
++
+++ flag |= __GFP_ZERO;
virt_addr = (void *)__get_free_pages(flag, get_order(size));
if (!virt_addr)
return 0;
--- memset(virt_addr, 0, size);
paddr = virt_to_phys(virt_addr);
--- get_device_resources(dev, &iommu, &domain, &devid);
---
if (!iommu || !domain) {
*dma_addr = (dma_addr_t)paddr;
return virt_addr;
}
+++ if (!dma_mask)
+++ dma_mask = *dev->dma_mask;
+++
spin_lock_irqsave(&domain->lock, flags);
*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
--- size, DMA_BIDIRECTIONAL);
+++ size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == bad_dma_address) {
free_pages((unsigned long)virt_addr, get_order(size));
goto out;
}
--- if (iommu_has_npcache(iommu))
--- iommu_flush_pages(iommu, domain->id, *dma_addr, size);
---
--- if (iommu->need_sync)
+++ if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
out:
/*
* The exported free_coherent function for dma_ops.
--- * FIXME: fix the generic x86 DMA layer so that it actually calls that
--- * function.
*/
static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr)
struct protection_domain *domain;
u16 devid;
+++ if (!check_device(dev))
+++ return;
+++
get_device_resources(dev, &iommu, &domain, &devid);
if (!iommu || !domain)
spin_lock_irqsave(&domain->lock, flags);
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
--- iommu_flush_pages(iommu, domain->id, dma_addr, size);
--- if (iommu->need_sync)
+++ if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
free_pages((unsigned long)virt_addr, get_order(size));
}
+++ /*
+++ * This function is called by the DMA layer to find out if we can handle a
+++ * particular device. It is part of the dma_ops.
+++ */
+++ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
+++ {
+++ u16 bdf;
+++ struct pci_dev *pcidev;
+++
+++ /* No device or no PCI device */
+++ if (!dev || dev->bus != &pci_bus_type)
+++ return 0;
+++
+++ pcidev = to_pci_dev(dev);
+++
+++ bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
+++
+++ /* Out of our scope? */
+++ if (bdf > amd_iommu_last_bdf)
+++ return 0;
+++
+++ return 1;
+++ }
+++
/*
* The function for pre-allocating protection domains.
*
if (!dma_dom)
continue;
init_unity_mappings_for_device(dma_dom, devid);
--- set_device_domain(iommu, &dma_dom->domain, devid);
--- printk(KERN_INFO "AMD IOMMU: Allocated domain %d for device ",
--- dma_dom->domain.id);
--- print_devid(devid, 1);
+++ dma_dom->target_dev = devid;
+++
+++ list_add_tail(&dma_dom->list, &iommu_pd_list);
}
}
.unmap_single = unmap_single,
.map_sg = map_sg,
.unmap_sg = unmap_sg,
+++ .dma_supported = amd_iommu_dma_supported,
};
/*
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to older i386. */
--- struct device fallback_dev = {
+++ struct device x86_dma_fallback_dev = {
.bus_id = "fallback device",
.coherent_dma_mask = DMA_32BIT_MASK,
--- .dma_mask = &fallback_dev.coherent_dma_mask,
+++ .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
};
+++ EXPORT_SYMBOL(x86_dma_fallback_dev);
int dma_set_mask(struct device *dev, u64 mask)
{
* using 512M as goal
*/
align = 64ULL<<20;
--- size = round_up(dma32_bootmem_size, align);
+++ size = roundup(dma32_bootmem_size, align);
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
512ULL<<20);
if (dma32_bootmem_ptr)
EXPORT_SYMBOL(iommu_num_pages);
#endif
+++ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+++ dma_addr_t *dma_addr, gfp_t flag)
+++ {
+++ unsigned long dma_mask;
+++ struct page *page;
+++ dma_addr_t addr;
+++
+++ dma_mask = dma_alloc_coherent_mask(dev, flag);
+++
+++ flag |= __GFP_ZERO;
+++ again:
+++ page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+++ if (!page)
+++ return NULL;
+++
+++ addr = page_to_phys(page);
+++ if (!is_buffer_dma_capable(dma_mask, addr, size)) {
+++ __free_pages(page, get_order(size));
+++
+++ if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
+++ flag = (flag & ~GFP_DMA32) | GFP_DMA;
+++ goto again;
+++ }
+++
+++ return NULL;
+++ }
+++
+++ *dma_addr = addr;
+++ return page_address(page);
+++ }
+++
/*
* See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
* documentation.
}
EXPORT_SYMBOL(dma_supported);
--- /* Allocate DMA memory on node near device */
--- static noinline struct page *
--- dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
--- {
--- int node;
---
--- node = dev_to_node(dev);
---
--- return alloc_pages_node(node, gfp, order);
--- }
---
--- /*
--- * Allocate memory for a coherent mapping.
--- */
--- void *
--- dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
--- gfp_t gfp)
--- {
--- struct dma_mapping_ops *ops = get_dma_ops(dev);
--- void *memory = NULL;
--- struct page *page;
--- unsigned long dma_mask = 0;
--- dma_addr_t bus;
--- int noretry = 0;
---
--- /* ignore region specifiers */
--- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
---
--- if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
--- return memory;
---
--- if (!dev) {
--- dev = &fallback_dev;
--- gfp |= GFP_DMA;
--- }
--- dma_mask = dev->coherent_dma_mask;
--- if (dma_mask == 0)
--- dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
---
--- /* Device not DMA able */
--- if (dev->dma_mask == NULL)
--- return NULL;
---
--- /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
--- if (gfp & __GFP_DMA)
--- noretry = 1;
---
--- #ifdef CONFIG_X86_64
--- /* Why <=? Even when the mask is smaller than 4GB it is often
--- larger than 16MB and in this case we have a chance of
--- finding fitting memory in the next higher zone first. If
--- not retry with true GFP_DMA. -AK */
--- if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
--- gfp |= GFP_DMA32;
--- if (dma_mask < DMA_32BIT_MASK)
--- noretry = 1;
--- }
--- #endif
---
--- again:
--- page = dma_alloc_pages(dev,
--- noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
--- if (page == NULL)
--- return NULL;
---
--- {
--- int high, mmu;
--- bus = page_to_phys(page);
--- memory = page_address(page);
--- high = (bus + size) >= dma_mask;
--- mmu = high;
--- if (force_iommu && !(gfp & GFP_DMA))
--- mmu = 1;
--- else if (high) {
--- free_pages((unsigned long)memory,
--- get_order(size));
---
--- /* Don't use the 16MB ZONE_DMA unless absolutely
--- needed. It's better to use remapping first. */
--- if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
--- gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
--- goto again;
--- }
---
--- /* Let low level make its own zone decisions */
--- gfp &= ~(GFP_DMA32|GFP_DMA);
---
--- if (ops->alloc_coherent)
--- return ops->alloc_coherent(dev, size,
--- dma_handle, gfp);
--- return NULL;
--- }
---
--- memset(memory, 0, size);
--- if (!mmu) {
--- *dma_handle = bus;
--- return memory;
--- }
--- }
---
--- if (ops->alloc_coherent) {
--- free_pages((unsigned long)memory, get_order(size));
--- gfp &= ~(GFP_DMA|GFP_DMA32);
--- return ops->alloc_coherent(dev, size, dma_handle, gfp);
--- }
---
--- if (ops->map_simple) {
--- *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
--- size,
--- PCI_DMA_BIDIRECTIONAL);
--- if (*dma_handle != bad_dma_address)
--- return memory;
--- }
---
--- if (panic_on_overflow)
--- panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
--- (unsigned long)size);
--- free_pages((unsigned long)memory, get_order(size));
--- return NULL;
--- }
--- EXPORT_SYMBOL(dma_alloc_coherent);
---
--- /*
--- * Unmap coherent memory.
--- * The caller must ensure that the device has finished accessing the mapping.
--- */
--- void dma_free_coherent(struct device *dev, size_t size,
--- void *vaddr, dma_addr_t bus)
--- {
--- struct dma_mapping_ops *ops = get_dma_ops(dev);
---
--- int order = get_order(size);
--- WARN_ON(irqs_disabled()); /* for portability */
--- if (dma_release_from_coherent(dev, order, vaddr))
--- return;
--- if (ops->unmap_single)
--- ops->unmap_single(dev, bus, size, 0);
--- free_pages((unsigned long)vaddr, order);
--- }
--- EXPORT_SYMBOL(dma_free_coherent);
---
static int __init pci_iommu_init(void)
{
calgary_iommu_init();
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <linux/sysdev.h>
+++ #include <linux/io.h>
#include <asm/atomic.h>
--- #include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
--- static int need_flush; /* global flush state. set for each gart wrap */
+++ static bool need_flush; /* global flush state. set for each gart wrap */
-- static unsigned long alloc_iommu(struct device *dev, int size)
++ static unsigned long alloc_iommu(struct device *dev, int size,
++ unsigned long align_mask)
{
unsigned long offset, flags;
unsigned long boundary_size;
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT;
-- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
++ boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
-- size, base_index, boundary_size, 0);
++ size, base_index, boundary_size, align_mask);
if (offset == -1) {
--- need_flush = 1;
+++ need_flush = true;
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
-- size, base_index, boundary_size, 0);
++ size, base_index, boundary_size,
++ align_mask);
}
if (offset != -1) {
next_bit = offset+size;
if (next_bit >= iommu_pages) {
next_bit = 0;
--- need_flush = 1;
+++ need_flush = true;
}
}
if (iommu_fullflush)
--- need_flush = 1;
+++ need_flush = true;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
return offset;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
if (need_flush) {
k8_flush_garts();
--- need_flush = 0;
+++ need_flush = false;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
iommu_leak_pages);
for (i = 0; i < iommu_leak_pages; i += 2) {
printk(KERN_DEBUG "%lu: ", iommu_pages-i);
--- printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
+++ printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
+++ 0);
printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
}
printk(KERN_DEBUG "\n");
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
--- u64 mask = *dev->dma_mask;
--- int high = addr + size > mask;
--- int mmu = high;
---
--- if (force_iommu)
--- mmu = 1;
---
--- return mmu;
+++ return force_iommu ||
+++ !is_buffer_dma_capable(*dev->dma_mask, addr, size);
}
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
--- u64 mask = *dev->dma_mask;
--- int high = addr + size > mask;
--- int mmu = high;
---
--- return mmu;
+++ return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
}
/* Map a single continuous physical area into the IOMMU.
* Caller needs to check if the iommu is needed and flush.
*/
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
-- size_t size, int dir)
++ size_t size, int dir, unsigned long align_mask)
{
unsigned long npages = iommu_num_pages(phys_mem, size);
-- unsigned long iommu_page = alloc_iommu(dev, npages);
++ unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
int i;
if (iommu_page == -1) {
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
--- static dma_addr_t
--- gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
--- {
- dma_addr_t map;
- unsigned long align_mask;
-
- align_mask = (1UL << get_order(size)) - 1;
- map = dma_map_area(dev, paddr, size, dir, align_mask);
-- dma_addr_t map = dma_map_area(dev, paddr, size, dir);
---
--- flush_gart();
---
--- return map;
--- }
---
/* Map a single area into the IOMMU */
static dma_addr_t
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
unsigned long bus;
if (!dev)
--- dev = &fallback_dev;
+++ dev = &x86_dma_fallback_dev;
if (!need_iommu(dev, paddr, size))
return paddr;
-- bus = gart_map_simple(dev, paddr, size, dir);
++ bus = dma_map_area(dev, paddr, size, dir, 0);
++ flush_gart();
return bus;
}
unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) {
-- addr = dma_map_area(dev, addr, s->length, dir);
++ addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_address) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir);
int nelems, struct scatterlist *sout,
unsigned long pages)
{
-- unsigned long iommu_start = alloc_iommu(dev, pages);
++ unsigned long iommu_start = alloc_iommu(dev, pages, 0);
unsigned long iommu_page = iommu_start;
struct scatterlist *s;
int i;
return 0;
if (!dev)
--- dev = &fallback_dev;
+++ dev = &x86_dma_fallback_dev;
out = 0;
start = 0;
return 0;
}
+++ /* allocate and map a coherent mapping */
+++ static void *
+++ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
+++ gfp_t flag)
+++ {
+++ dma_addr_t paddr;
+++ unsigned long align_mask;
+++ struct page *page;
+++
+++ if (force_iommu && !(flag & GFP_DMA)) {
+++ flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+++ page = alloc_pages(flag | __GFP_ZERO, get_order(size));
+++ if (!page)
+++ return NULL;
+++
+++ align_mask = (1UL << get_order(size)) - 1;
+++ paddr = dma_map_area(dev, page_to_phys(page), size,
+++ DMA_BIDIRECTIONAL, align_mask);
+++
+++ flush_gart();
+++ if (paddr != bad_dma_address) {
+++ *dma_addr = paddr;
+++ return page_address(page);
+++ }
+++ __free_pages(page, get_order(size));
+++ } else
+++ return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
+++
+++ return NULL;
+++ }
+++
+++ /* free a coherent mapping */
+++ static void
+++ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
+++ dma_addr_t dma_addr)
+++ {
+++ gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
+++ free_pages((unsigned long)vaddr, get_order(size));
+++ }
+++
static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
struct pci_dev *dev;
void *gatt;
int i, error;
--- unsigned long start_pfn, end_pfn;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0;
info->aper_size = aper_size >> 20;
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
--- gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
+++ gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+++ get_order(gatt_size));
if (!gatt)
panic("Cannot allocate GATT table");
if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
panic("Could not set GART PTEs to uncacheable pages");
--- memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
enable_gart_translations();
if (!error)
error = sysdev_register(&device_gart);
if (error)
--- panic("Could not register gart_sysdev -- would corrupt data on next suspend");
+++ panic("Could not register gart_sysdev -- "
+++ "would corrupt data on next suspend");
flush_gart();
printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
aper_base, aper_size>>10);
--- /* need to map that range */
--- end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
--- if (end_pfn > max_low_pfn_mapped) {
--- start_pfn = (aper_base>>PAGE_SHIFT);
--- init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
--- }
return 0;
nommu:
return -1;
}
--- extern int agp_amd64_init(void);
---
static struct dma_mapping_ops gart_dma_ops = {
.map_single = gart_map_single,
--- .map_simple = gart_map_simple,
.unmap_single = gart_unmap_single,
--- .sync_single_for_cpu = NULL,
--- .sync_single_for_device = NULL,
--- .sync_single_range_for_cpu = NULL,
--- .sync_single_range_for_device = NULL,
--- .sync_sg_for_cpu = NULL,
--- .sync_sg_for_device = NULL,
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
+++ .alloc_coherent = gart_alloc_coherent,
+++ .free_coherent = gart_free_coherent,
};
void gart_iommu_shutdown(void)
{
struct agp_kern_info info;
unsigned long iommu_start;
--- unsigned long aper_size;
+++ unsigned long aper_base, aper_size;
+++ unsigned long start_pfn, end_pfn;
unsigned long scratch;
long i;
(no_agp && init_k8_gatt(&info) < 0)) {
if (max_pfn > MAX_DMA32_PFN) {
printk(KERN_WARNING "More than 4GB of memory "
--- "but GART IOMMU not available.\n"
--- KERN_WARNING "falling back to iommu=soft.\n");
+++ "but GART IOMMU not available.\n");
+++ printk(KERN_WARNING "falling back to iommu=soft.\n");
}
return;
}
+++ /* need to map that range */
+++ aper_size = info.aper_size << 20;
+++ aper_base = info.aper_base;
+++ end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+++ if (end_pfn > max_low_pfn_mapped) {
+++ start_pfn = (aper_base>>PAGE_SHIFT);
+++ init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+++ }
+++
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
--- aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
--- iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
+++ iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(iommu_pages/8));
if (!iommu_gart_bitmap)
panic("Cannot allocate iommu bitmap\n");
--- memset(iommu_gart_bitmap, 0, iommu_pages/8);
#ifdef CONFIG_IOMMU_LEAK
if (leak_trace) {
--- iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
+++ iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
get_order(iommu_pages*sizeof(void *)));
--- if (iommu_leak_tab)
--- memset(iommu_leak_tab, 0, iommu_pages * 8);
--- else
+++ if (!iommu_leak_tab)
printk(KERN_DEBUG
"PCI-DMA: Cannot allocate leak trace area\n");
}
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
--- set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+++ iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
agp_memory_reserved = iommu_size;
printk(KERN_INFO
if (!strncmp(p, "leak", 4)) {
leak_trace = 1;
p += 4;
--- if (*p == '=') ++p;
+++ if (*p == '=')
+++ ++p;
if (isdigit(*p) && get_option(&p, &arg))
iommu_leak_pages = arg;
}
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
---#ifndef _ASM_X86_AMD_IOMMU_H
---#define _ASM_X86_AMD_IOMMU_H
+++#ifndef ASM_X86__AMD_IOMMU_H
+++#define ASM_X86__AMD_IOMMU_H
++
+++ #include <linux/irqreturn.h>
+
#ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void);
extern void amd_iommu_detect(void);
+++ extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
#endif
---#endif
+++#endif /* ASM_X86__AMD_IOMMU_H */
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
---#ifndef __AMD_IOMMU_TYPES_H__
---#define __AMD_IOMMU_TYPES_H__
+++#ifndef ASM_X86__AMD_IOMMU_TYPES_H
+++#define ASM_X86__AMD_IOMMU_TYPES_H
#include <linux/types.h>
#include <linux/list.h>
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
#define MMIO_RANGE_OFFSET 0x0c
+++ #define MMIO_MISC_OFFSET 0x10
/* Masks, shifts and macros to parse the device range capability */
#define MMIO_RANGE_LD_MASK 0xff000000
#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
+++ #define MMIO_MSI_NUM(x) ((x) & 0x1f)
/* Flag masks for the AMD IOMMU exclusion range */
#define MMIO_EXCL_ENABLE_MASK 0x01ULL
/* MMIO status bits */
#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+++ /* event logging constants */
+++ #define EVENT_ENTRY_SIZE 0x10
+++ #define EVENT_TYPE_SHIFT 28
+++ #define EVENT_TYPE_MASK 0xf
+++ #define EVENT_TYPE_ILL_DEV 0x1
+++ #define EVENT_TYPE_IO_FAULT 0x2
+++ #define EVENT_TYPE_DEV_TAB_ERR 0x3
+++ #define EVENT_TYPE_PAGE_TAB_ERR 0x4
+++ #define EVENT_TYPE_ILL_CMD 0x5
+++ #define EVENT_TYPE_CMD_HARD_ERR 0x6
+++ #define EVENT_TYPE_IOTLB_INV_TO 0x7
+++ #define EVENT_TYPE_INV_DEV_REQ 0x8
+++ #define EVENT_DEVID_MASK 0xffff
+++ #define EVENT_DEVID_SHIFT 0
+++ #define EVENT_DOMID_MASK 0xffff
+++ #define EVENT_DOMID_SHIFT 0
+++ #define EVENT_FLAGS_MASK 0xfff
+++ #define EVENT_FLAGS_SHIFT 0x10
+++
/* feature control bits */
#define CONTROL_IOMMU_EN 0x00ULL
#define CONTROL_HT_TUN_EN 0x01ULL
#define DEV_ENTRY_NMI_PASS 0xba
#define DEV_ENTRY_LINT0_PASS 0xbe
#define DEV_ENTRY_LINT1_PASS 0xbf
+++ #define DEV_ENTRY_MODE_MASK 0x07
+++ #define DEV_ENTRY_MODE_SHIFT 0x09
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
+++ /* constants for event buffer handling */
+++ #define EVT_BUFFER_SIZE 8192 /* 512 entries */
+++ #define EVT_LEN_MASK (0x9ULL << 56)
+++
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03
#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
#define IOMMU_PTE_P (1ULL << 0)
+++ #define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
#define IOMMU_PTE_FC (1ULL << 60)
#define IOMMU_PTE_IR (1ULL << 61)
#define MAX_DOMAIN_ID 65536
+++ /* FIXME: move this macro to <linux/pci.h> */
+++ #define PCI_BUS(x) (((x) >> 8) & 0xff)
+++
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
* just calculate its address in constant time.
*/
u64 **pte_pages;
+++
+++ /* This will be set to true when TLB needs to be flushed */
+++ bool need_flush;
+++
+++ /*
+++ * if this is a preallocated domain, keep the device for which it was
+++ * preallocated in this variable
+++ */
+++ u16 target_dev;
};
/*
/* locks the accesses to the hardware */
spinlock_t lock;
--- /* device id of this IOMMU */
--- u16 devid;
+++ /* Pointer to PCI device of this IOMMU */
+++ struct pci_dev *dev;
+++
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
/* capabilities of that IOMMU read from ACPI */
u32 cap;
+++ /* pci domain of this IOMMU */
+++ u16 pci_seg;
+++
/* first device this IOMMU handles. read from PCI */
u16 first_device;
/* last device this IOMMU handles. read from PCI */
/* size of command buffer */
u32 cmd_buf_size;
+++ /* event buffer virtual address */
+++ u8 *evt_buf;
+++ /* size of event buffer */
+++ u32 evt_buf_size;
+++ /* MSI number for event interrupt */
+++ u16 evt_msi_num;
+++
/* if one, we need to send a completion wait command */
int need_sync;
+++ /* true if interrupts for this IOMMU are already enabled */
+++ bool int_enabled;
+++
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
};
/* will be 1 if device isolation is enabled */
extern int amd_iommu_isolate;
+++ /*
+++ * If true, the addresses will be flushed on unmap time, not when
+++ * they are reused
+++ */
+++ extern bool amd_iommu_unmap_flush;
+++
/* takes a PCI device id and prints it out in a readable form */
static inline void print_devid(u16 devid, int nl)
{
return (((u16)bus) << 8) | devfn;
}
---#endif
+++#endif /* ASM_X86__AMD_IOMMU_TYPES_H */
---#ifndef _ASM_X86_BITOPS_H
---#define _ASM_X86_BITOPS_H
+++#ifndef ASM_X86__BITOPS_H
+++#define ASM_X86__BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
#undef ADDR
--- static inline void set_bit_string(unsigned long *bitmap,
--- unsigned long i, int len)
--- {
--- unsigned long end = i + len;
--- while (i < end) {
--- __set_bit(i, bitmap);
--- i++;
--- }
--- }
---
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
---#endif /* _ASM_X86_BITOPS_H */
+++#endif /* ASM_X86__BITOPS_H */
---#ifndef _ASM_DMA_MAPPING_H_
---#define _ASM_DMA_MAPPING_H_
+++#ifndef ASM_X86__DMA_MAPPING_H
+++#define ASM_X86__DMA_MAPPING_H
/*
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
+++ #include <asm-generic/dma-coherent.h>
extern dma_addr_t bad_dma_address;
extern int iommu_merge;
--- extern struct device fallback_dev;
+++ extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
--- extern int force_iommu;
struct dma_mapping_ops {
int (*mapping_error)(struct device *dev,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
size_t size, int direction);
--- /* like map_single, but doesn't check the device mask */
--- dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
--- size_t size, int direction);
void (*unmap_single)(struct device *dev, dma_addr_t addr,
size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev,
return dma_ops;
else
return dev->archdata.dma_ops;
----#endif
++++#endif /* ASM_X86__DMA_MAPPING_H */
}
/* Make sure we keep the same behaviour */
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
---
--- void *dma_alloc_coherent(struct device *dev, size_t size,
--- dma_addr_t *dma_handle, gfp_t flag);
---
--- void dma_free_coherent(struct device *dev, size_t size,
--- void *vaddr, dma_addr_t dma_handle);
---
+++ #define dma_is_consistent(d, h) (1)
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
+++ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+++ dma_addr_t *dma_addr, gfp_t flag);
+++
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
return boot_cpu_data.x86_clflush_size;
}
--- #define dma_is_consistent(d, h) (1)
+++ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
+++ gfp_t gfp)
+++ {
+++ unsigned long dma_mask = 0;
++
- #include <asm-generic/dma-coherent.h>
- #endif /* ASM_X86__DMA_MAPPING_H */
+++ dma_mask = dev->coherent_dma_mask;
+++ if (!dma_mask)
+++ dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
+++
+++ return dma_mask;
+++ }
+++
+++ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
+++ {
+++ #ifdef CONFIG_X86_64
+++ unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+++
+++ if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
+++ gfp |= GFP_DMA32;
+++ #endif
+++ return gfp;
+++ }
+++
+++ static inline void *
+++ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+++ gfp_t gfp)
+++ {
+++ struct dma_mapping_ops *ops = get_dma_ops(dev);
+++ void *memory;
+++
+++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+++
+++ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+++ return memory;
+++
+++ if (!dev) {
+++ dev = &x86_dma_fallback_dev;
+++ gfp |= GFP_DMA;
+++ }
+++
+++ if (!is_device_dma_capable(dev))
+++ return NULL;
+++
+++ if (!ops->alloc_coherent)
+++ return NULL;
+++
+++ return ops->alloc_coherent(dev, size, dma_handle,
+++ dma_alloc_coherent_gfp_flags(dev, gfp));
+++ }
+++
+++ static inline void dma_free_coherent(struct device *dev, size_t size,
+++ void *vaddr, dma_addr_t bus)
+++ {
+++ struct dma_mapping_ops *ops = get_dma_ops(dev);
+++
+++ WARN_ON(irqs_disabled()); /* for portability */
+++
+++ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+++ return;
+++
+++ if (ops->free_coherent)
+++ ops->free_coherent(dev, size, vaddr, bus);
+++ }
+
-- #include <asm-generic/dma-coherent.h>
+ #endif
---#ifndef _ASM_X8664_GART_H
---#define _ASM_X8664_GART_H 1
+++#ifndef ASM_X86__GART_H
+++#define ASM_X86__GART_H
#include <asm/e820.h>
#define AMD64_GARTCACHECTL 0x9c
#define AMD64_GARTEN (1<<0)
+++ extern int agp_amd64_init(void);
+++
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
return 0;
if (aper_base + aper_size > 0x100000000ULL) {
--- printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
+++ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
--- printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
+++ printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
if (aper_size < min_size) {
--- printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
+++ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
aper_size>>20, min_size>>20);
return 0;
}
return 1;
}
---#endif
+++#endif /* ASM_X86__GART_H */
---#ifndef _ASM_X8664_IOMMU_H
---#define _ASM_X8664_IOMMU_H 1
+++#ifndef ASM_X86__IOMMU_H
+++#define ASM_X86__IOMMU_H
extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
+++ extern int dmar_disabled;
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
}
#endif
---#endif
+++#endif /* ASM_X86__IOMMU_H */