-What: /sys/bus/usb/drivers/usbtmc/devices/*/interface_capabilities
-What: /sys/bus/usb/drivers/usbtmc/devices/*/device_capabilities
+What: /sys/bus/usb/drivers/usbtmc/*/interface_capabilities
+What: /sys/bus/usb/drivers/usbtmc/*/device_capabilities
Date: August 2008
Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Description:
The files are read only.
-What: /sys/bus/usb/drivers/usbtmc/devices/*/usb488_interface_capabilities
-What: /sys/bus/usb/drivers/usbtmc/devices/*/usb488_device_capabilities
+What: /sys/bus/usb/drivers/usbtmc/*/usb488_interface_capabilities
+What: /sys/bus/usb/drivers/usbtmc/*/usb488_device_capabilities
Date: August 2008
Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Description:
The files are read only.
-What: /sys/bus/usb/drivers/usbtmc/devices/*/TermChar
+What: /sys/bus/usb/drivers/usbtmc/*/TermChar
Date: August 2008
Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Description:
sent to the device or not.
-What: /sys/bus/usb/drivers/usbtmc/devices/*/TermCharEnabled
+What: /sys/bus/usb/drivers/usbtmc/*/TermCharEnabled
Date: August 2008
Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Description:
published by the USB-IF.
-What: /sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
+What: /sys/bus/usb/drivers/usbtmc/*/auto_abort
Date: August 2008
Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Description:
--- /dev/null
+What: /sys/block/rssd*/registers
+Date: March 2012
+KernelVersion: 3.3
+Contact: Asai Thambi S P <asamymuthupa@micron.com>
+Description: This is a read-only file. Dumps below driver information and
+ hardware registers.
+ - S ACTive
+ - Command Issue
+ - Allocated
+ - Completed
+ - PORT IRQ STAT
+ - HOST IRQ STAT
+
+What: /sys/block/rssd*/status
+Date: April 2012
+KernelVersion: 3.4
+Contact: Asai Thambi S P <asamymuthupa@micron.com>
+Description: This is a read-only file. Indicates the status of the device.
--- /dev/null
+What: /sys/block/<device>/iosched/target_latency
+Date: March 2012
+contact: Tao Ma <boyu.mt@taobao.com>
+Description:
+ The /sys/block/<device>/iosched/target_latency only exists
+ when the user sets cfq to /sys/block/<device>/scheduler.
+ It contains an estimated latency time for the cfq. cfq will
+ use it to calculate the time slice used for every task.
Features:
- accounting anonymous pages, file caches, swap caches usage and limiting them.
- - private LRU and reclaim routine. (system's global LRU and private LRU
- work independently from each other)
+ - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
- optionally, memory+swap usage can be accounted and limited.
- hierarchical accounting
- soft limit
2.2.1 Accounting details
All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
-Some pages which are never reclaimable and will not be on the global LRU
+Some pages which are never reclaimable and will not be on the LRU
are not accounted. We just account pages under usual VM management.
RSS pages are accounted at page_fault unless they've already been accounted
of ASLR. It was only ever intended for debugging, so it should be
removed.
Who: Kees Cook <keescook@chromium.org>
+
+----------------------------
+
+What: setitimer accepts user NULL pointer (value)
+When: 3.6
+Why: setitimer is not returning -EFAULT if user pointer is NULL. This
+ violates the spec.
+Who: Sasikantha Babu <sasikanth.v19@gmail.com>
struct file_system_type {
const char *name;
int fs_flags;
- struct dentry (*mount) (struct file_system_type *, int,
+ struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
struct module *owner;
ALC882/883/885/888/889
======================
- N/A
+ acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G
+ acer-aspire-8930g Acer Aspire 8330G/6935G
+ acer-aspire Acer Aspire others
ALC861/660
==========
they will get a -EPERM error. Thus you can be sure that when
usb_kill_urb() returns, the URB is totally idle.
+There is a lifetime issue to consider. An URB may complete at any
+time, and the completion handler may free the URB. If this happens
+while usb_unlink_urb or usb_kill_urb is running, it will cause a
+memory-access violation. The driver is responsible for avoiding this,
+which often means some sort of lock will be needed to prevent the URB
+from being deallocated while it is still in use.
+
+On the other hand, since usb_unlink_urb may end up calling the
+completion handler, the handler must not take any lock that is held
+when usb_unlink_urb is invoked. The general solution to this problem
+is to increment the URB's reference count while holding the lock, then
+drop the lock and call usb_unlink_urb or usb_kill_urb, and then
+decrement the URB's reference count. You increment the reference
+count by calling
+
+ struct urb *usb_get_urb(struct urb *urb)
+
+(ignore the return value; it is the same as the argument) and
+decrement the reference count by calling usb_free_urb. Of course,
+none of this is necessary if there's no danger of the URB being freed
+by the completion handler.
+
1.7. What about the completion handler?
d5ea89a0 3575914555 S Ci:1:001:0 s a3 00 0000 0003 0004 4 <
d5ea89a0 3575914560 C Ci:1:001:0 0 4 = 01050000
-An output bulk transfer to send a SCSI command 0x5E in a 31-byte Bulk wrapper
-to a storage device at address 5:
+An output bulk transfer to send a SCSI command 0x28 (READ_10) in a 31-byte
+Bulk wrapper to a storage device at address 5:
-dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 5e000000 00000000 00000600 00000000 00000000 00000000 000000
+dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 ad000000 00800000 80010a28 20000000 20000040 00000000 000000
dd65f0e8 4128379808 C Bo:1:005:2 0 31 >
* Raw binary format and API
M: Johan Hedberg <johan.hedberg@gmail.com>
L: linux-bluetooth@vger.kernel.org
W: http://www.bluez.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
S: Maintained
F: drivers/bluetooth/
M: Johan Hedberg <johan.hedberg@gmail.com>
L: linux-bluetooth@vger.kernel.org
W: http://www.bluez.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
S: Maintained
F: net/bluetooth/
F: include/net/bluetooth/
F: drivers/acpi/dock.c
DOCUMENTATION
-M: Randy Dunlap <rdunlap@xenotime.net>
+M: Rob Landley <rob@landley.net>
L: linux-doc@vger.kernel.org
-T: quilt http://xenotime.net/kernel-doc-patches/current/
+T: TBD
S: Maintained
F: Documentation/
F: drivers/net/ethernet/myricom/myri10ge/
NATSEMI ETHERNET DRIVER (DP8381x)
-M: Tim Hockin <thockin@hockin.org>
-S: Maintained
+S: Orphan
F: drivers/net/ethernet/natsemi/natsemi.c
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
F: include/linux/i2c-algo-pca.h
F: include/linux/i2c-pca-platform.h
+PCDP - PRIMARY CONSOLE AND DEBUG PORT
+M: Khalid Aziz <khalid.aziz@hp.com>
+S: Maintained
+F: drivers/firmware/pcdp.*
+
PCI ERROR RECOVERY
M: Linas Vepstas <linasvepstas@gmail.com>
L: linux-pci@vger.kernel.org
F: drivers/staging/olpc_dcon/
STAGING - OZMO DEVICES USB OVER WIFI DRIVER
+M: Rupesh Gujare <rgujare@ozmodevices.com>
M: Chris Kelly <ckelly@ozmodevices.com>
S: Maintained
F: drivers/staging/ozwpan/
VERSION = 3
PATCHLEVEL = 4
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
#include <linux/types.h>
#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
return result;
}
-/*
- * Atomic exchange routines.
- */
-
-#define __ASM__MB
-#define ____xchg(type, args...) __xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
-
-#define xchg_local(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
- sizeof(*(ptr))); \
- })
-
-#define cmpxchg_local(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, \
- sizeof(*(ptr))); \
- })
-
-#define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
-
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB "\tmb\n"
-#endif
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...) __xchg ##type(args)
-#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
-#include <asm/xchg.h>
-
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
- sizeof(*(ptr))); \
- })
-
-#define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr)));\
- })
-
-#define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
-
-#undef __ASM__MB
-#undef ____cmpxchg
-
-#define __HAVE_ARCH_CMPXCHG 1
-
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
--- /dev/null
+#ifndef _ALPHA_CMPXCHG_H
+#define _ALPHA_CMPXCHG_H
+
+/*
+ * Atomic exchange routines.
+ */
+
+#define __ASM__MB
+#define ____xchg(type, args...) __xchg ## type ## _local(args)
+#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
+#include <asm/xchg.h>
+
+#define xchg_local(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+
+#ifdef CONFIG_SMP
+#undef __ASM__MB
+#define __ASM__MB "\tmb\n"
+#endif
+#undef ____xchg
+#undef ____cmpxchg
+#define ____xchg(type, args...) __xchg ##type(args)
+#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
+#include <asm/xchg.h>
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr)));\
+})
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#undef __ASM__MB
+#undef ____cmpxchg
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#endif /* _ALPHA_CMPXCHG_H */
-#ifndef _ALPHA_ATOMIC_H
+#ifndef _ALPHA_CMPXCHG_H
#error Do not include xchg.h directly!
#else
/*
* xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
* except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/system.h.
+ * So this file is included twice from asm/cmpxchg.h.
*/
/*
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
+ if (!atag->u.mem.size)
+ continue;
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
} else if (atag->hdr.tag == ATAG_INITRD2) {
add r0, r0, #0x100
mov r1, r6
sub r2, sp, r6
- blne atags_to_fdt
+ bleq atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000
#interrupt-cells = <2>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
- interrupt-parent;
reg = <0xfffff000 0x200>;
};
#interrupt-cells = <2>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
- interrupt-parent;
reg = <0xfffff000 0x200>;
};
#interrupt-cells = <2>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
- interrupt-parent;
reg = <0xfffff000 0x200>;
};
#interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller;
- interrupt-parent;
reg = <0xa0411000 0x1000>,
<0xa0410100 0x100>;
};
#size-cells = <0>;
#address-cells = <1>;
interrupt-controller;
- interrupt-parent;
reg = <0xfff11000 0x1000>,
<0xfff10100 0x100>;
};
/*
* Handle each interrupt in a single VIC. Returns non-zero if we've
- * handled at least one interrupt. This does a single read of the
- * status register and handles all interrupts in order from LSB first.
+ * handled at least one interrupt. This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
{
u32 stat, irq;
int handled = 0;
- stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
- while (stat) {
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
- stat &= ~(1 << irq);
handled = 1;
}
#define JUMP_LABEL_NOP "nop"
#endif
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
*/
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
- bank->size = size & PAGE_MASK;
+
+#ifndef CONFIG_LPAE
+ if (bank->start + size < bank->start) {
+ printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
+ "32-bit physical address space\n", (long long)start);
+ /*
+ * To ensure bank->start + bank->size is representable in
+ * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
+ * This means we lose a page after masking.
+ */
+ size = ULONG_MAX - bank->start;
+ }
+#endif
+
+ bank->size = size & PAGE_MASK;
/*
* Check whether this memory region has non-zero size or
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
+ *
+ * Only wait for it to finish, if the cpu is active to avoid
+ * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
+ * booting of that cpu.
*/
if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
smp_call_function_single(freqs->cpu, twd_update_frequency,
- NULL, 1);
+ NULL, cpu_active(freqs->cpu));
return NOTIFY_OK;
}
config MACH_EXYNOS4_DT
bool "Samsung Exynos4 Machine using device tree"
+ depends on ARCH_EXYNOS4
select CPU_EXYNOS4210
select USE_OF
select ARM_AMBA
config MACH_EXYNOS5_DT
bool "SAMSUNG EXYNOS5 Machine using device tree"
+ depends on ARCH_EXYNOS5
select SOC_EXYNOS5250
select USE_OF
select ARM_AMBA
#define IRQ_MFC EXYNOS4_IRQ_MFC
#define IRQ_SDO EXYNOS4_IRQ_SDO
+#define IRQ_I2S0 EXYNOS4_IRQ_I2S0
+
#define IRQ_ADC EXYNOS4_IRQ_ADC0
#define IRQ_TC EXYNOS4_IRQ_PEN0
#define EXYNOS4_PA_MDMA1 0x12840000
#define EXYNOS4_PA_PDMA0 0x12680000
#define EXYNOS4_PA_PDMA1 0x12690000
+#define EXYNOS5_PA_MDMA0 0x10800000
+#define EXYNOS5_PA_MDMA1 0x11C10000
+#define EXYNOS5_PA_PDMA0 0x121A0000
+#define EXYNOS5_PA_PDMA1 0x121B0000
#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
/* For EXYNOS5250 */
+#define EXYNOS5_APLL_LOCK EXYNOS_CLKREG(0x00000)
#define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100)
#define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200)
+#define EXYNOS5_CLKMUX_STATCPU EXYNOS_CLKREG(0x00400)
#define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500)
+#define EXYNOS5_CLKDIV_CPU1 EXYNOS_CLKREG(0x00504)
+#define EXYNOS5_CLKDIV_STATCPU0 EXYNOS_CLKREG(0x00600)
+#define EXYNOS5_CLKDIV_STATCPU1 EXYNOS_CLKREG(0x00604)
+
#define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100)
#define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204)
"exynos4210-uart.3", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
- OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.2", NULL),
+ OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
{},
};
};
/* TSP */
-static u8 mxt_init_vals[] = {
- /* MXT_GEN_COMMAND(6) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* MXT_GEN_POWER(7) */
- 0x20, 0xff, 0x32,
- /* MXT_GEN_ACQUIRE(8) */
- 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
- /* MXT_TOUCH_MULTI(9) */
- 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00,
- /* MXT_TOUCH_KEYARRAY(15) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00,
- /* MXT_SPT_GPIOPWM(19) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* MXT_PROCI_GRIPFACE(20) */
- 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
- 0x0f, 0x0a,
- /* MXT_PROCG_NOISE(22) */
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
- 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
- /* MXT_TOUCH_PROXIMITY(23) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00,
- /* MXT_PROCI_ONETOUCH(24) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* MXT_SPT_SELFTEST(25) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- /* MXT_PROCI_TWOTOUCH(27) */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* MXT_SPT_CTECONFIG(28) */
- 0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
-};
-
static struct mxt_platform_data mxt_platform_data = {
- .config = mxt_init_vals,
- .config_length = ARRAY_SIZE(mxt_init_vals),
-
.x_line = 18,
.y_line = 11,
.x_size = 1024,
static struct regulator_init_data __initdata max8997_ldo8_data = {
.constraints = {
- .name = "VUSB/VDAC_3.3V_C210",
+ .name = "VUSB+VDAC_3.3V_C210",
.min_uV = 3300000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
static void __init nuri_map_io(void)
{
+ clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
nuri_camera_init();
nuri_ehci_init();
- clk_xusbxti.rate = 24000000;
/* Last */
platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
+#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/iic.h>
static void __init universal_map_io(void)
{
+ clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
static void __init halibut_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
- mi->nr_banks=1;
- mi->bank[0].start = PHYS_OFFSET;
- mi->bank[0].size = (101*1024*1024);
}
static void __init halibut_map_io(void)
#include <asm/io.h>
#include <asm/mach-types.h>
+#include <asm/system_info.h>
#include <mach/msm_fb.h>
#include <mach/vreg.h>
#include <linux/platform_device.h>
#include <linux/clkdev.h>
+#include <asm/system_info.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
* and unknown state. This function should be called early to
* wait on the ARM9.
*/
-void __init proc_comm_boot_wait(void)
+void __devinit proc_comm_boot_wait(void)
{
void __iomem *base = MSM_SHARED_RAM_BASE;
return 0;
}
-
-#ifdef CONFIG_CPU_FREQ
-/*
- * Walk PRCM rate table and fillout cpufreq freq_table
- * XXX This should be replaced by an OPP layer in the near future
- */
-static struct cpufreq_frequency_table *freq_table;
-
-void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
-{
- const struct prcm_config *prcm;
- int i = 0;
- int tbl_sz = 0;
-
- if (!cpu_is_omap24xx())
- return;
-
- for (prcm = rate_table; prcm->mpu_speed; prcm++) {
- if (!(prcm->flags & cpu_mask))
- continue;
- if (prcm->xtal_speed != sclk->rate)
- continue;
-
- /* don't put bypass rates in table */
- if (prcm->dpll_speed == prcm->xtal_speed)
- continue;
-
- tbl_sz++;
- }
-
- /*
- * XXX Ensure that we're doing what CPUFreq expects for this error
- * case and the following one
- */
- if (tbl_sz == 0) {
- pr_warning("%s: no matching entries in rate_table\n",
- __func__);
- return;
- }
-
- /* Include the CPUFREQ_TABLE_END terminator entry */
- tbl_sz++;
-
- freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * tbl_sz,
- GFP_ATOMIC);
- if (!freq_table) {
- pr_err("%s: could not kzalloc frequency table\n", __func__);
- return;
- }
-
- for (prcm = rate_table; prcm->mpu_speed; prcm++) {
- if (!(prcm->flags & cpu_mask))
- continue;
- if (prcm->xtal_speed != sclk->rate)
- continue;
-
- /* don't put bypass rates in table */
- if (prcm->dpll_speed == prcm->xtal_speed)
- continue;
-
- freq_table[i].index = i;
- freq_table[i].frequency = prcm->mpu_speed / 1000;
- i++;
- }
-
- freq_table[i].index = i;
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- *table = &freq_table[0];
-}
-
-void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
-{
- if (!cpu_is_omap24xx())
- return;
-
- kfree(freq_table);
-}
-
-#endif
.clk_set_rate = omap2_clk_set_rate,
.clk_set_parent = omap2_clk_set_parent,
.clk_disable_unused = omap2_clk_disable_unused,
-#ifdef CONFIG_CPU_FREQ
- /* These will be removed when the OPP code is integrated */
- .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
- .clk_exit_cpufreq_table = omap2_clk_exit_cpufreq_table,
-#endif
};
extern const struct clksel_rate gfx_l3_rates[];
extern const struct clksel_rate dsp_ick_rates[];
-#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_CPU_FREQ)
-extern void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
-extern void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
-#else
-#define omap2_clk_init_cpufreq_table 0
-#define omap2_clk_exit_cpufreq_table 0
-#endif
-
extern const struct clkops clkops_omap2_iclk_dflt_wait;
extern const struct clkops clkops_omap2_iclk_dflt;
extern const struct clkops clkops_omap2_iclk_idle_only;
#include <mach/irqs.h>
#include <mach/dma.h>
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
static u8 pdma0_peri[] = {
DMACH_UART0_RX,
DMACH_UART0_TX,
.gpio_defaults[8] = 0x0100,
.gpio_defaults[9] = 0x0100,
.gpio_defaults[10] = 0x0100,
- .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */
- .ldo[1] = { 0, NULL, &wm8994_ldo2_data },
+ .ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */
+ .ldo[1] = { 0, &wm8994_ldo2_data },
};
/* GPIO I2C PMIC */
.gpio_defaults[8] = 0x0100,
.gpio_defaults[9] = 0x0100,
.gpio_defaults[10] = 0x0100,
- .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */
- .ldo[1] = { 0, NULL, &wm8994_ldo2_data },
+ .ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */
+ .ldo[1] = { 0, &wm8994_ldo2_data },
};
/* GPIO I2C PMIC */
bool "Select the High exception vector"
help
Say Y here to select high exception vector(0xFFFF0000~).
- The exception vector can be vary depending on the platform
+ The exception vector can vary depending on the platform
design in nommu mode. If your platform needs to select
high exception vector, say Y.
Otherwise or if you are unsure, say N, and the low exception
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
#include <asm/sections.h>
#include <asm/page.h>
#include <asm/setup.h>
+#include <asm/traps.h>
#include <asm/mach/arch.h>
#include "mm.h"
*/
void __init paging_init(struct machine_desc *mdesc)
{
+ early_trap_init((void *)CONFIG_VECTORS_BASE);
bootmem_init();
}
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
+#ifndef CONFIG_ARM_THUMBEE
+ mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
+ and r0, r0, #(0xf << 12) @ ThumbEE enabled field
+ teq r0, #(1 << 12) @ check if ThumbEE is present
+ bne 1f
+ mov r5, #0
+ mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
+ mrc p14, 6, r0, c0, c0, 0 @ load TEECR
+ orr r0, r0, #1 @ set the 1st bit in order to
+ mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
+1:
+#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
#ifdef CONFIG_CPU_ENDIAN_BE8
.ops = &clkops_null,
};
-#ifdef CONFIG_CPU_FREQ
-void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
-{
- unsigned long flags;
-
- if (!arch_clock || !arch_clock->clk_init_cpufreq_table)
- return;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- arch_clock->clk_init_cpufreq_table(table);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-
-void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
-{
- unsigned long flags;
-
- if (!arch_clock || !arch_clock->clk_exit_cpufreq_table)
- return;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- arch_clock->clk_exit_cpufreq_table(table);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-#endif
-
/*
*
*/
#endif
};
-struct cpufreq_frequency_table;
-
struct clk_functions {
int (*clk_enable)(struct clk *clk);
void (*clk_disable)(struct clk *clk);
void (*clk_allow_idle)(struct clk *clk);
void (*clk_deny_idle)(struct clk *clk);
void (*clk_disable_unused)(struct clk *clk);
-#ifdef CONFIG_CPU_FREQ
- void (*clk_init_cpufreq_table)(struct cpufreq_frequency_table **);
- void (*clk_exit_cpufreq_table)(struct cpufreq_frequency_table **);
-#endif
};
extern int mpurate;
extern unsigned long followparent_recalc(struct clk *clk);
extern void clk_enable_init_clocks(void);
unsigned long omap_fixed_divisor_recalc(struct clk *clk);
-#ifdef CONFIG_CPU_FREQ
-extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
-extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
-#endif
extern struct clk *omap_clk_get_by_name(const char *name);
extern int omap_clk_enable_autoidle_all(void);
extern int omap_clk_disable_autoidle_all(void);
config SAMSUNG_PM_DEBUG
bool "S3C2410 PM Suspend debug"
depends on PM
+ select DEBUG_LL
help
Say Y here if you want verbose debugging from the PM Suspend and
Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
/* This number is used when no interrupt has been assigned */
#define NO_IRQ 0
-struct irq_data;
-extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
-extern irq_hw_number_t virq_to_hw(unsigned int virq);
-
extern void __init init_pic_c64xplus(void);
extern void init_IRQ(void);
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}
-
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
- return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
-irq_hw_number_t virq_to_hw(unsigned int virq)
-{
- struct irq_data *irq_data = irq_get_irq_data(virq);
- return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
-}
-EXPORT_SYMBOL_GPL(virq_to_hw);
-#include <asm/intrinsics.h>
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+/*
+ * Compare/Exchange, forked from asm/intrinsics.h
+ * which was:
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer(void);
+
+#define __xchg(x, ptr, size) \
+({ \
+ unsigned long __xchg_result; \
+ \
+ switch (size) { \
+ case 1: \
+ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ break; \
+ \
+ case 2: \
+ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ break; \
+ \
+ case 4: \
+ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ break; \
+ \
+ case 8: \
+ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ break; \
+ default: \
+ ia64_xchg_called_with_bad_pointer(); \
+ } \
+ __xchg_result; \
+})
+
+#define xchg(ptr, x) \
+((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer(void);
+
+#define ia64_cmpxchg(sem, ptr, old, new, size) \
+({ \
+ __u64 _o_, _r_; \
+ \
+ switch (size) { \
+ case 1: \
+ _o_ = (__u8) (long) (old); \
+ break; \
+ case 2: \
+ _o_ = (__u16) (long) (old); \
+ break; \
+ case 4: \
+ _o_ = (__u32) (long) (old); \
+ break; \
+ case 8: \
+ _o_ = (__u64) (long) (old); \
+ break; \
+ default: \
+ break; \
+ } \
+ switch (size) { \
+ case 1: \
+ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
+ break; \
+ \
+ case 2: \
+ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
+ break; \
+ \
+ case 4: \
+ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
+ break; \
+ \
+ case 8: \
+ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
+ break; \
+ \
+ default: \
+ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
+ break; \
+ } \
+ (__typeof__(old)) _r_; \
+})
+
+#define cmpxchg_acq(ptr, o, n) \
+ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n) \
+ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg64
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v) \
+do { \
+ if (_cmpxchg_bugcheck_count-- <= 0) { \
+ void *ip; \
+ extern int printk(const char *fmt, ...); \
+ ip = (void *) ia64_getreg(_IA64_REG_IP); \
+ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
+ break; \
+ } \
+} while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_CMPXCHG_H */
return -EFAULT;
{
- register unsigned long r8 __asm ("r8") = 0;
+ register unsigned long r8 __asm ("r8");
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov ar.ccv=%3;; \n"
- "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
+ " mov %0=r0 \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (prev)
+ : "=r" (r8), "=r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
#else
# include <asm/gcc_intrin.h>
#endif
+#include <asm/cmpxchg.h>
#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalid xchg().
- */
-extern void ia64_xchg_called_with_bad_pointer (void);
-
-#define __xchg(x,ptr,size) \
-({ \
- unsigned long __xchg_result; \
- \
- switch (size) { \
- case 1: \
- __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
- break; \
- \
- case 2: \
- __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
- break; \
- \
- case 4: \
- __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
- break; \
- \
- case 8: \
- __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
- break; \
- default: \
- ia64_xchg_called_with_bad_pointer(); \
- } \
- __xchg_result; \
-})
-
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern long ia64_cmpxchg_called_with_bad_pointer (void);
-
-#define ia64_cmpxchg(sem,ptr,old,new,size) \
-({ \
- __u64 _o_, _r_; \
- \
- switch (size) { \
- case 1: _o_ = (__u8 ) (long) (old); break; \
- case 2: _o_ = (__u16) (long) (old); break; \
- case 4: _o_ = (__u32) (long) (old); break; \
- case 8: _o_ = (__u64) (long) (old); break; \
- default: break; \
- } \
- switch (size) { \
- case 1: \
- _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
- break; \
- \
- case 2: \
- _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
- break; \
- \
- case 4: \
- _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
- break; \
- \
- case 8: \
- _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
- break; \
- \
- default: \
- _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
- break; \
- } \
- (__typeof__(old)) _r_; \
-})
-
-#define cmpxchg_acq(ptr, o, n) \
- ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr, o, n) \
- ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
-
-/* for compatibility with other platforms: */
-#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
-
-#define cmpxchg_local cmpxchg
-#define cmpxchg64_local cmpxchg64
-
-#ifdef CONFIG_IA64_DEBUG_CMPXCHG
-# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
-# define CMPXCHG_BUGCHECK(v) \
- do { \
- if (_cmpxchg_bugcheck_count-- <= 0) { \
- void *ip; \
- extern int printk(const char *fmt, ...); \
- ip = (void *) ia64_getreg(_IA64_REG_IP); \
- printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
- break; \
- } \
- } while (0)
-#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
-# define CMPXCHG_BUGCHECK_DECL
-# define CMPXCHG_BUGCHECK(v)
-#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
-
#endif
#ifdef __KERNEL__
/* Same thing, used by the generic IRQ code */
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
-struct irq_data;
-extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq);
/**
andi. r10,r10,MSR_EE /* Did EE change? */
beq 1f
- /* Save handler and return address into the 2 unused words
- * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
- * else can be recovered from the pt_regs except r3 which for
- * normal interrupts has been set to pt_regs and for syscalls
- * is an argument, so we temporarily use ORIG_GPR3 to save it
- */
- stw r9,8(r1)
- stw r11,12(r1)
- stw r3,ORIG_GPR3(r1)
/*
* The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
* If from user mode there is only one stack frame on the stack, and
* accessing CALLER_ADDR1 will cause oops. So we need create a dummy
* stack frame to make trace_hardirqs_off happy.
+ *
+ * This is handy because we also need to save a bunch of GPRs,
+ * r3 can be different from GPR3(r1) at this point, r9 and r11
+ * contains the old MSR and handler address respectively,
+ * r4 & r5 can contain page fault arguments that need to be passed
+ * along as well. r12, CCR, CTR, XER etc... are left clobbered as
+ * they aren't useful past this point (aren't syscall arguments),
+ * the rest is restored from the exception frame.
*/
+ stwu r1,-32(r1)
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,16(r1)
+ stw r4,20(r1)
+ stw r5,24(r1)
andi. r12,r12,MSR_PR
- beq 11f
- stwu r1,-16(r1)
+ b 11f
bl trace_hardirqs_off
- addi r1,r1,16
b 12f
-
11:
bl trace_hardirqs_off
12:
+ lwz r5,24(r1)
+ lwz r4,20(r1)
+ lwz r3,16(r1)
+ lwz r11,12(r1)
+ lwz r9,8(r1)
+ addi r1,r1,32
lwz r0,GPR0(r1)
- lwz r3,ORIG_GPR3(r1)
- lwz r4,GPR4(r1)
- lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
- lwz r9,8(r1)
- lwz r11,12(r1)
1: mtctr r11
mtlr r9
bctr /* jump to handler */
local_irq_restore(flags);
}
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
- return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
ctrl |= CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl);
- ti->local_flags |= TLF_RUNLATCH;
+ ti->local_flags |= _TLF_RUNLATCH;
}
/* Called with hard IRQs off */
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
- ti->local_flags &= ~TLF_RUNLATCH;
+ ti->local_flags &= ~_TLF_RUNLATCH;
ctrl = mfspr(SPRN_CTRLF);
ctrl &= ~CTRL_RUNLATCH;
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
- msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
+ msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
if (!msic->irq_domain) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name);
ppc_md.get_irq = beatic_get_irq;
/* Allocate an irq host */
- beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
+ beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
BUG_ON(beatic_host == NULL);
irq_set_default_host(beatic_host);
}
{
int rc = -ENOMEM;
- psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
+ psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
if (psurge_host)
psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
unsigned cpu;
struct irq_domain *host;
- host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
+ host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_host(host);
- irq_set_virq_count(PS3_PLUG_MAX + 1);
for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
return vio_register_driver(&ds_driver);
}
-subsys_initcall(ds_init);
+fs_initcall(ds_init);
.text
.align 32
-__handle_softirq:
- call do_softirq
- nop
- ba,a,pt %xcc, __handle_softirq_continue
- nop
__handle_preemption:
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
- bne,pn %icc, __handle_softirq
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-__handle_softirq_continue:
rtrap_xcall:
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock #op "b %b0, %1\n" \
- : "+r" (__ret), "+m" (*(ptr)) \
+ : "+q" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_W: \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
asm volatile (lock "addb %b1, %0\n" \
- : "+m" (*(ptr)) : "ri" (inc) \
+ : "+m" (*(ptr)) : "qi" (inc) \
: "memory", "cc"); \
break; \
case __X86_CASE_W: \
current_thread_info()->sig_on_uaccess_error = 1;
/*
- * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
+ * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
* 64-bit, so we don't need to special-case it here. For all the
- * vsyscalls, 0 means "don't write anything" not "write it at
+ * vsyscalls, NULL means "don't write anything" not "write it at
* address 0".
*/
ret = -EFAULT;
ret = sys_getcpu((unsigned __user *)regs->di,
(unsigned __user *)regs->si,
- 0);
+ NULL);
break;
}
return;
}
-/* Decode moffset16/32/64 */
-static void __get_moffset(struct insn *insn)
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
{
switch (insn->addr_bytes) {
case 2:
insn->moffset2.value = get_next(int, insn);
insn->moffset2.nbytes = 4;
break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
insn->moffset1.got = insn->moffset2.got = 1;
+ return 1;
+
err_out:
- return;
+ return 0;
}
-/* Decode imm v32(Iz) */
-static void __get_immv32(struct insn *insn)
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
insn->immediate.value = get_next(int, insn);
insn->immediate.nbytes = 4;
break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
+ return 1;
+
err_out:
- return;
+ return 0;
}
-/* Decode imm v64(Iv/Ov) */
-static void __get_immv(struct insn *insn)
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
insn->immediate2.value = get_next(int, insn);
insn->immediate2.nbytes = 4;
break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
insn->immediate1.got = insn->immediate2.got = 1;
+ return 1;
err_out:
- return;
+ return 0;
}
/* Decode ptr16:16/32(Ap) */
-static void __get_immptr(struct insn *insn)
+static int __get_immptr(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
break;
case 8:
/* ptr16:64 is not exist (no segment) */
- return;
+ return 0;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
insn->immediate2.value = get_next(unsigned short, insn);
insn->immediate2.nbytes = 2;
insn->immediate1.got = insn->immediate2.got = 1;
+ return 1;
err_out:
- return;
+ return 0;
}
/**
insn_get_displacement(insn);
if (inat_has_moffset(insn->attr)) {
- __get_moffset(insn);
+ if (!__get_moffset(insn))
+ goto err_out;
goto done;
}
insn->immediate2.nbytes = 4;
break;
case INAT_IMM_PTR:
- __get_immptr(insn);
+ if (!__get_immptr(insn))
+ goto err_out;
break;
case INAT_IMM_VWORD32:
- __get_immv32(insn);
+ if (!__get_immv32(insn))
+ goto err_out;
break;
case INAT_IMM_VWORD:
- __get_immv(insn);
+ if (!__get_immv(insn))
+ goto err_out;
break;
default:
- break;
+ /* Here, insn must have an immediate, but failed */
+ goto err_out;
}
if (inat_has_second_immediate(insn->attr)) {
insn->immediate2.value = get_next(char, insn);
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, long max)
+static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
{
long res = 0;
* too? If so, that's ok - we got as much as the user asked for.
*/
if (res >= count)
- return count;
+ return res;
/*
* Nope: we hit the address space limit, and we still had more
if (!q)
return NULL;
- q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+ q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret;
- (*request_count)++;
+ if (rq->q == q)
+ (*request_count)++;
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
struct bio_list bl;
struct bio *bio;
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
bio_list_init(&bl);
unsigned int cfq_slice_idle;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
+ unsigned int cfq_target_latency;
/*
* Fallback dummy cfqq for extreme OOM conditions
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
- return cfq_target_latency * cfqg->weight / st->total_weight;
+ return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
}
static inline unsigned
* to have higher weight. A more accurate thing would be to
* calculate system wide asnc/sync ratio.
*/
- tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+ tmp = cfqd->cfq_target_latency *
+ cfqg_busy_async_queues(cfqd, cfqg);
tmp = tmp/cfqd->busy_queues;
slice = min_t(unsigned, slice, tmp);
cfqd->cfq_back_penalty = cfq_back_penalty;
cfqd->cfq_slice[0] = cfq_slice_async;
cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_group_idle = cfq_group_idle;
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
CFQ_ATTR(slice_idle),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
+ CFQ_ATTR(target_latency),
__ATTR_NULL
};
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi",
- acpi_irq)) {
+ if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
* enable/disable the bus clock at runtime PM suspend/resume as this
- * does not result in loss of context. However, disabling vcore power
- * would do, so we leave that to the driver.
+ * does not result in loss of context.
*/
static int amba_pm_runtime_suspend(struct device *dev)
{
clk_put(pclk);
}
-static int amba_get_enable_vcore(struct amba_device *pcdev)
-{
- struct regulator *vcore = regulator_get(&pcdev->dev, "vcore");
- int ret;
-
- pcdev->vcore = vcore;
-
- if (IS_ERR(vcore)) {
- /* It is OK not to supply a vcore regulator */
- if (PTR_ERR(vcore) == -ENODEV)
- return 0;
- return PTR_ERR(vcore);
- }
-
- ret = regulator_enable(vcore);
- if (ret) {
- regulator_put(vcore);
- pcdev->vcore = ERR_PTR(-ENODEV);
- }
-
- return ret;
-}
-
-static void amba_put_disable_vcore(struct amba_device *pcdev)
-{
- struct regulator *vcore = pcdev->vcore;
-
- if (!IS_ERR(vcore)) {
- regulator_disable(vcore);
- regulator_put(vcore);
- }
-}
-
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
int ret;
do {
- ret = amba_get_enable_vcore(pcdev);
- if (ret)
- break;
-
ret = amba_get_enable_pclk(pcdev);
if (ret)
break;
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- amba_put_disable_vcore(pcdev);
} while (0);
return ret;
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- amba_put_disable_vcore(pcdev);
return ret;
}
#include <linux/sys_soc.h>
#include <linux/err.h>
-static DEFINE_IDR(soc_ida);
+static DEFINE_IDA(soc_ida);
static DEFINE_SPINLOCK(soc_lock);
static ssize_t soc_info_get(struct device *dev,
static int __init soc_bus_register(void)
{
- spin_lock_init(&soc_lock);
-
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode"
- depends on BCMA && MIPS
+ depends on BCMA && MIPS && BCMA_HOST_PCI
help
PCI core hostmode operation (external PCI bus).
*/
#include "bcma_private.h"
+#include <linux/pci.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
#include <asm/paccess.h>
sh->can_queue = cciss_tape_cmds;
sh->sg_tablesize = h->maxsgentries;
sh->max_cmd_len = MAX_COMMAND_SIZE;
+ sh->max_sectors = h->cciss_max_sectors;
((struct cciss_scsi_adapter_data_t *)
h->scsi_ctlr)->scsi_host = sh;
/* track how many SG entries we are using */
if (request_nsgs > h->maxSG)
h->maxSG = request_nsgs;
- c->Header.SGTotal = (__u8) request_nsgs + chained;
+ c->Header.SGTotal = (u16) request_nsgs + chained;
if (request_nsgs > h->max_cmd_sgentries)
c->Header.SGList = h->max_cmd_sgentries;
else
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
- depends on HOTPLUG_PCI_PCIE
+ depends on PCI
help
This enables the block driver for Micron PCIe SSDs.
#include <linux/idr.h>
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
+#include <linux/export.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
#define HW_PORT_PRIV_DMA_SZ \
(HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+#define HOST_CAP_NZDMA (1 << 19)
#define HOST_HSORG 0xFC
#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
int group = 0, commandslot = 0, commandindex = 0;
struct mtip_cmd *command;
struct mtip_port *port = dd->port;
+ static int in_progress;
+
+ if (in_progress)
+ return;
+
+ in_progress = 1;
for (group = 0; group < 4; group++) {
for (commandslot = 0; commandslot < 32; commandslot++) {
up(&port->cmd_slot);
- atomic_set(&dd->drv_cleanup_done, true);
+ set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+ in_progress = 0;
}
/*
&& time_before(jiffies, timeout))
mdelay(1);
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+ return -1;
+
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
return -1;
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+
+ /* Set the command's timeout value.*/
+ port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
}
/*
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
- writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
+ writel(readl(port->dd->mmio + PORT_IRQ_STAT),
+ port->dd->mmio + PORT_IRQ_STAT);
+
+ /* Clear any pending interrupts on the HBA. */
+ writel(readl(port->dd->mmio + HOST_IRQ_STAT),
+ port->dd->mmio + HOST_IRQ_STAT);
/* Enable port interrupts */
writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
&& time_before(jiffies, timeout))
;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return;
+
/*
* Chip quirk: escalate to hba reset if
* PxCMD.CR not clear after 500 ms
while (time_before(jiffies, timeout))
;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return;
+
/* Clear PxSCTL.DET */
writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
port->mmio + PORT_SCR_CTL);
&& time_before(jiffies, timeout))
;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return;
+
if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
dev_warn(&port->dd->pdev->dev,
"COM reset failed\n");
- /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
- writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+ mtip_init_port(port);
+ mtip_start_port(port);
- /* Enable the DMA engine */
- mtip_enable_engine(port, 1);
+}
+
+/*
+ * Helper function for tag logging
+ */
+static void print_tags(struct driver_data *dd,
+ char *msg,
+ unsigned long *tagbits,
+ int cnt)
+{
+ unsigned char tagmap[128];
+ int group, tagmap_len = 0;
+
+ memset(tagmap, 0, sizeof(tagmap));
+ for (group = SLOTBITS_IN_LONGS; group > 0; group--)
+ tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ",
+ tagbits[group-1]);
+ dev_warn(&dd->pdev->dev,
+ "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
}
/*
int tag, cmdto_cnt = 0;
unsigned int bit, group;
unsigned int num_command_slots = port->dd->slot_groups * 32;
+ unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
return;
- if (atomic_read(&port->dd->resumeflag) == true) {
+ if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
mod_timer(&port->cmd_timer,
jiffies + msecs_to_jiffies(30000));
return;
}
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
for (tag = 0; tag < num_command_slots; tag++) {
/*
command = &port->commands[tag];
fis = (struct host_to_dev_fis *) command->command;
- dev_warn(&port->dd->pdev->dev,
- "Timeout for command tag %d\n", tag);
-
+ set_bit(tag, tagaccum);
cmdto_cnt++;
if (cmdto_cnt == 1)
- set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
/*
* Clear the completed bit. This should prevent
}
}
- if (cmdto_cnt) {
- dev_warn(&port->dd->pdev->dev,
- "%d commands timed out: restarting port",
- cmdto_cnt);
+ if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
+
mtip_restart_port(port);
- clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
}
+ if (port->ic_pause_timer) {
+ to = port->ic_pause_timer + msecs_to_jiffies(1000);
+ if (time_after(jiffies, to)) {
+ if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ port->ic_pause_timer = 0;
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ }
+
+
+ }
+ }
+
/* Restart the timer */
mod_timer(&port->cmd_timer,
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
complete(waiting);
}
-/*
- * Helper function for tag logging
- */
-static void print_tags(struct driver_data *dd,
- char *msg,
- unsigned long *tagbits)
+static void mtip_null_completion(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
{
- unsigned int tag, count = 0;
-
- for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
- if (test_bit(tag, tagbits))
- count++;
- }
- if (count)
- dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
+ return;
}
+static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
+ dma_addr_t buffer_dma, unsigned int sectors);
+static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
+ struct smart_attr *attrib);
/*
* Handle an error.
*
*/
static void mtip_handle_tfe(struct driver_data *dd)
{
- int group, tag, bit, reissue;
+ int group, tag, bit, reissue, rv;
struct mtip_port *port;
- struct mtip_cmd *command;
+ struct mtip_cmd *cmd;
u32 completed;
struct host_to_dev_fis *fis;
unsigned long tagaccum[SLOTBITS_IN_LONGS];
+ unsigned int cmd_cnt = 0;
+ unsigned char *buf;
+ char *fail_reason = NULL;
+ int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
dev_warn(&dd->pdev->dev, "Taskfile error\n");
/* Stop the timer to prevent command timeouts. */
del_timer(&port->cmd_timer);
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
/* Set eh_active */
- set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
- /* clear the tag accumulator */
- memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
-
/* Process successfully completed commands */
for (bit = 0; bit < 32 && completed; bit++) {
if (!(completed & (1<<bit)))
if (tag == MTIP_TAG_INTERNAL)
continue;
- command = &port->commands[tag];
- if (likely(command->comp_func)) {
+ cmd = &port->commands[tag];
+ if (likely(cmd->comp_func)) {
set_bit(tag, tagaccum);
- atomic_set(&port->commands[tag].active, 0);
- command->comp_func(port,
+ cmd_cnt++;
+ atomic_set(&cmd->active, 0);
+ cmd->comp_func(port,
tag,
- command->comp_data,
+ cmd->comp_data,
0);
} else {
dev_err(&port->dd->pdev->dev,
}
}
}
- print_tags(dd, "TFE tags completed:", tagaccum);
+
+ print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
/* Restart the port */
mdelay(20);
mtip_restart_port(port);
+ /* Trying to determine the cause of the error */
+ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+ dd->port->log_buf,
+ dd->port->log_buf_dma, 1);
+ if (rv) {
+ dev_warn(&dd->pdev->dev,
+ "Error in READ LOG EXT (10h) command\n");
+ /* non-critical error, don't fail the load */
+ } else {
+ buf = (unsigned char *)dd->port->log_buf;
+ if (buf[259] & 0x1) {
+ dev_info(&dd->pdev->dev,
+ "Write protect bit is set.\n");
+ set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+ fail_all_ncq_write = 1;
+ fail_reason = "write protect";
+ }
+ if (buf[288] == 0xF7) {
+ dev_info(&dd->pdev->dev,
+ "Exceeded Tmax, drive in thermal shutdown.\n");
+ set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+ fail_all_ncq_cmds = 1;
+ fail_reason = "thermal shutdown";
+ }
+ if (buf[288] == 0xBF) {
+ dev_info(&dd->pdev->dev,
+ "Drive indicates rebuild has failed.\n");
+ fail_all_ncq_cmds = 1;
+ fail_reason = "rebuild failed";
+ }
+ }
+
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
for (bit = 0; bit < 32; bit++) {
reissue = 1;
tag = (group << 5) + bit;
+ cmd = &port->commands[tag];
/* If the active bit is set re-issue the command */
- if (atomic_read(&port->commands[tag].active) == 0)
+ if (atomic_read(&cmd->active) == 0)
continue;
- fis = (struct host_to_dev_fis *)
- port->commands[tag].command;
+ fis = (struct host_to_dev_fis *)cmd->command;
/* Should re-issue? */
if (tag == MTIP_TAG_INTERNAL ||
fis->command == ATA_CMD_SET_FEATURES)
reissue = 0;
+ else {
+ if (fail_all_ncq_cmds ||
+ (fail_all_ncq_write &&
+ fis->command == ATA_CMD_FPDMA_WRITE)) {
+ dev_warn(&dd->pdev->dev,
+ " Fail: %s w/tag %d [%s].\n",
+ fis->command == ATA_CMD_FPDMA_WRITE ?
+ "write" : "read",
+ tag,
+ fail_reason != NULL ?
+ fail_reason : "unknown");
+ atomic_set(&cmd->active, 0);
+ if (cmd->comp_func) {
+ cmd->comp_func(port, tag,
+ cmd->comp_data,
+ -ENODATA);
+ }
+ continue;
+ }
+ }
/*
* First check if this command has
* exceeded its retries.
*/
- if (reissue &&
- (port->commands[tag].retries-- > 0)) {
+ if (reissue && (cmd->retries-- > 0)) {
set_bit(tag, tagaccum);
- /* Update the timeout value. */
- port->commands[tag].comp_time =
- jiffies + msecs_to_jiffies(
- MTIP_NCQ_COMMAND_TIMEOUT_MS);
/* Re-issue the command. */
mtip_issue_ncq_command(port, tag);
/* Retire a command that will not be reissued */
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
- atomic_set(&port->commands[tag].active, 0);
+ atomic_set(&cmd->active, 0);
- if (port->commands[tag].comp_func)
- port->commands[tag].comp_func(
+ if (cmd->comp_func)
+ cmd->comp_func(
port,
tag,
- port->commands[tag].comp_data,
+ cmd->comp_data,
PORT_IRQ_TF_ERR);
else
dev_warn(&port->dd->pdev->dev,
tag);
}
}
- print_tags(dd, "TFE tags reissued:", tagaccum);
+ print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
/* clear eh_active */
- clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
mod_timer(&port->cmd_timer,
struct mtip_port *port = dd->port;
struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
- if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
(cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL))) {
if (cmd->comp_func) {
}
}
- dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
-
return;
}
/* don't proceed further */
return IRQ_HANDLED;
}
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag))
+ return rv;
mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
}
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
}
+static bool mtip_pause_ncq(struct mtip_port *port,
+ struct host_to_dev_fis *fis)
+{
+ struct host_to_dev_fis *reply;
+ unsigned long task_file_data;
+
+ reply = port->rxfis + RX_FIS_D2H_REG;
+ task_file_data = readl(port->mmio+PORT_TFDATA);
+
+ if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT))
+ return false;
+
+ if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
+ set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ port->ic_pause_timer = jiffies;
+ return true;
+ } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
+ (fis->features == 0x03)) {
+ set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ port->ic_pause_timer = jiffies;
+ return true;
+ } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
+ ((fis->command == 0xFC) &&
+ (fis->features == 0x27 || fis->features == 0x72 ||
+ fis->features == 0x62 || fis->features == 0x26))) {
+ /* Com reset after secure erase or lowlevel format */
+ mtip_restart_port(port);
+ return false;
+ }
+
+ return false;
+}
+
/*
* Wait for port to quiesce
*
to = jiffies + msecs_to_jiffies(timeout);
do {
- if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return -EFAULT;
/*
* Ignore s_active bit 0 of array element 0.
* This bit will always be set
* -EAGAIN Time out waiting for command to complete.
*/
static int mtip_exec_internal_command(struct mtip_port *port,
- void *fis,
+ struct host_to_dev_fis *fis,
int fis_len,
dma_addr_t buffer,
int buf_len,
{
struct mtip_cmd_sg *command_sg;
DECLARE_COMPLETION_ONSTACK(wait);
- int rv = 0;
+ int rv = 0, ready2go = 1;
struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
+ unsigned long to;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
return -EFAULT;
}
- /* Only one internal command should be running at a time */
- if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ to = jiffies + msecs_to_jiffies(timeout);
+ do {
+ ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
+ port->allocated);
+ if (ready2go)
+ break;
+ mdelay(100);
+ } while (time_before(jiffies, to));
+ if (!ready2go) {
dev_warn(&port->dd->pdev->dev,
- "Internal command already active\n");
+ "Internal cmd active. new cmd [%02X]\n", fis->command);
return -EBUSY;
}
- set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ port->ic_pause_timer = 0;
+
+ if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (atomic == GFP_KERNEL) {
- /* wait for io to complete if non atomic */
- if (mtip_quiesce_io(port, 5000) < 0) {
- dev_warn(&port->dd->pdev->dev,
- "Failed to quiesce IO\n");
- release_slot(port, MTIP_TAG_INTERNAL);
- clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
- return -EBUSY;
+ if (fis->command != ATA_CMD_STANDBYNOW1) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, 5000) < 0) {
+ dev_warn(&port->dd->pdev->dev,
+ "Failed to quiesce IO\n");
+ release_slot(port, MTIP_TAG_INTERNAL);
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ return -EBUSY;
+ }
}
/* Set the completion function and data for the command. */
} else {
/* Clear completion - we're going to poll */
int_cmd->comp_data = NULL;
- int_cmd->comp_func = NULL;
+ int_cmd->comp_func = mtip_null_completion;
}
/* Copy the command to the command table */
"Internal command did not complete [%d] "
"within timeout of %lu ms\n",
atomic, timeout);
+ if (mtip_check_surprise_removal(port->dd->pdev) ||
+ test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ rv = -ENXIO;
+ goto exec_ic_exit;
+ }
rv = -EAGAIN;
}
& (1 << MTIP_TAG_INTERNAL)) {
dev_warn(&port->dd->pdev->dev,
"Retiring internal command but CI is 1.\n");
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ hba_reset_nosleep(port->dd);
+ rv = -ENXIO;
+ } else {
+ mtip_restart_port(port);
+ rv = -EAGAIN;
+ }
+ goto exec_ic_exit;
}
} else {
/* Spin for <timeout> checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
-
- while ((readl(
- port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL))
- && time_before(jiffies, timeout))
- ;
+ while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))
+ && time_before(jiffies, timeout)) {
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
+ rv = -ENXIO;
+ goto exec_ic_exit;
+ }
+ if ((fis->command != ATA_CMD_STANDBYNOW1) &&
+ test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ rv = -ENXIO;
+ goto exec_ic_exit;
+ }
+ }
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
dev_err(&port->dd->pdev->dev,
- "Internal command did not complete [%d]\n",
- atomic);
+ "Internal command did not complete [atomic]\n");
rv = -EAGAIN;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ hba_reset_nosleep(port->dd);
+ rv = -ENXIO;
+ } else {
+ mtip_restart_port(port);
+ rv = -EAGAIN;
+ }
}
}
-
+exec_ic_exit:
/* Clear the allocated and active bits for the internal command. */
atomic_set(&int_cmd->active, 0);
release_slot(port, MTIP_TAG_INTERNAL);
- clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ if (rv >= 0 && mtip_pause_ncq(port, fis)) {
+ /* NCQ paused */
+ return rv;
+ }
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
return rv;
int rv = 0;
struct host_to_dev_fis fis;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return -EFAULT;
+
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
{
int rv;
struct host_to_dev_fis fis;
+ unsigned long start;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.opts = 1 << 7;
fis.command = ATA_CMD_STANDBYNOW1;
- /* Execute the command. Use a 15-second timeout for large drives. */
+ start = jiffies;
rv = mtip_exec_internal_command(port,
&fis,
5,
0,
0,
0,
- GFP_KERNEL,
+ GFP_ATOMIC,
15000);
+ dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
+ jiffies_to_msecs(jiffies - start));
+ if (rv)
+ dev_warn(&port->dd->pdev->dev,
+ "STANDBY IMMEDIATE command failed.\n");
+
+ return rv;
+}
+
+/*
+ * Issue a READ LOG EXT command to the device.
+ *
+ * @port pointer to the port structure.
+ * @page page number to fetch
+ * @buffer pointer to buffer
+ * @buffer_dma dma address corresponding to @buffer
+ * @sectors page length to fetch, in sectors
+ *
+ * return value
+ * @rv return value from mtip_exec_internal_command()
+ */
+static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
+ dma_addr_t buffer_dma, unsigned int sectors)
+{
+ struct host_to_dev_fis fis;
+
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_READ_LOG_EXT;
+ fis.sect_count = sectors & 0xFF;
+ fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
+ fis.lba_low = page;
+ fis.lba_mid = 0;
+ fis.device = ATA_DEVICE_OBS;
+
+ memset(buffer, 0, sectors * ATA_SECT_SIZE);
+
+ return mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ buffer_dma,
+ sectors * ATA_SECT_SIZE,
+ 0,
+ GFP_ATOMIC,
+ MTIP_INTERNAL_COMMAND_TIMEOUT_MS);
+}
+
+/*
+ * Issue a SMART READ DATA command to the device.
+ *
+ * @port pointer to the port structure.
+ * @buffer pointer to buffer
+ * @buffer_dma dma address corresponding to @buffer
+ *
+ * return value
+ * @rv return value from mtip_exec_internal_command()
+ */
+static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
+ dma_addr_t buffer_dma)
+{
+ struct host_to_dev_fis fis;
+
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_SMART;
+ fis.features = 0xD0;
+ fis.sect_count = 1;
+ fis.lba_mid = 0x4F;
+ fis.lba_hi = 0xC2;
+ fis.device = ATA_DEVICE_OBS;
+
+ return mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ buffer_dma,
+ ATA_SECT_SIZE,
+ 0,
+ GFP_ATOMIC,
+ 15000);
+}
+
+/*
+ * Get the value of a smart attribute
+ *
+ * @port pointer to the port structure
+ * @id attribute number
+ * @attrib pointer to return attrib information corresponding to @id
+ *
+ * return value
+ * -EINVAL NULL buffer passed or unsupported attribute @id.
+ * -EPERM Identify data not valid, SMART not supported or not enabled
+ */
+static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
+ struct smart_attr *attrib)
+{
+ int rv, i;
+ struct smart_attr *pattr;
+
+ if (!attrib)
+ return -EINVAL;
+
+ if (!port->identify_valid) {
+ dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
+ return -EPERM;
+ }
+ if (!(port->identify[82] & 0x1)) {
+ dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
+ return -EPERM;
+ }
+ if (!(port->identify[85] & 0x1)) {
+ dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
+ return -EPERM;
+ }
+
+ memset(port->smart_buf, 0, ATA_SECT_SIZE);
+ rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
+ if (rv) {
+ dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
+ return rv;
+ }
+
+ pattr = (struct smart_attr *)(port->smart_buf + 2);
+ for (i = 0; i < 29; i++, pattr++)
+ if (pattr->attr_id == id) {
+ memcpy(attrib, pattr, sizeof(struct smart_attr));
+ break;
+ }
+
+ if (i == 29) {
+ dev_warn(&port->dd->pdev->dev,
+ "Query for invalid SMART attribute ID\n");
+ rv = -EINVAL;
+ }
return rv;
}
fis.cyl_hi = command[5];
fis.device = command[6] & ~0x10; /* Clear the dev bit*/
-
- dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
- "nsect %x, sect %x, lcyl %x, "
- "hcyl %x, sel %x\n",
+ dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
__func__,
command[0],
command[1],
command[4] = reply->cyl_low;
command[5] = reply->cyl_hi;
- dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
- "err %x , cyl_lo %x cyl_hi %x\n",
+ dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
__func__,
command[0],
command[1],
}
dbg_printk(MTIP_DRV_NAME
- "%s: User Command: cmd %x, sect %x, "
+ " %s: User Command: cmd %x, sect %x, "
"feat %x, sectcnt %x\n",
__func__,
command[0],
command[2] = command[3];
dbg_printk(MTIP_DRV_NAME
- "%s: Completion Status: stat %x, "
+ " %s: Completion Status: stat %x, "
"err %x, cmd %x\n",
__func__,
command[0],
}
dbg_printk(MTIP_DRV_NAME
- "taskfile: cmd %x, feat %x, nsect %x,"
+ " %s: cmd %x, feat %x, nsect %x,"
" sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
" head/dev %x\n",
+ __func__,
fis.command,
fis.features,
fis.sect_count,
switch (fis.command) {
case ATA_CMD_DOWNLOAD_MICRO:
- /* Change timeout for Download Microcode to 60 seconds.*/
- timeout = 60000;
+ /* Change timeout for Download Microcode to 2 minutes */
+ timeout = 120000;
break;
case ATA_CMD_SEC_ERASE_UNIT:
/* Change timeout for Security Erase Unit to 4 minutes.*/
timeout = 10000;
break;
case ATA_CMD_SMART:
- /* Change timeout for vendor unique command to 10 secs */
- timeout = 10000;
+ /* Change timeout for vendor unique command to 15 secs */
+ timeout = 15000;
break;
default:
timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
req_task->hob_ports[1] = reply->features_ex;
req_task->hob_ports[2] = reply->sect_cnt_ex;
}
-
- /* Com rest after secure erase or lowlevel format */
- if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
- ((fis.command == 0xFC) &&
- (fis.features == 0x27 || fis.features == 0x72 ||
- fis.features == 0x62 || fis.features == 0x26))) &&
- !(reply->command & 1)) {
- mtip_restart_port(dd->port);
- }
-
dbg_printk(MTIP_DRV_NAME
- "%s: Completion: stat %x,"
+ " %s: Completion: stat %x,"
"err %x, sect_cnt %x, lbalo %x,"
"lbamid %x, lbahi %x, dev %x\n",
__func__,
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
+ int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
/* Map the scatter list for DMA access */
- if (dir == READ)
- nents = dma_map_sg(&dd->pdev->dev, command->sg,
- nents, DMA_FROM_DEVICE);
- else
- nents = dma_map_sg(&dd->pdev->dev, command->sg,
- nents, DMA_TO_DEVICE);
+ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
command->scatter_ents = nents;
*/
command->comp_data = dd;
command->comp_func = mtip_async_complete;
- command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ command->direction = dma_dir;
/*
* Set the completion function and data for the command passed
* To prevent this command from being issued
* if an internal command is in progress or error handling is active.
*/
- if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) ||
- test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
+ if (port->flags & MTIP_PF_PAUSE_IO) {
set_bit(tag, port->cmds_to_issue);
- set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
return;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, tag);
- /* Set the command's timeout value.*/
- port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
- MTIP_NCQ_COMMAND_TIMEOUT_MS);
+ return;
}
/*
down(&dd->port->cmd_slot);
*tag = get_slot(dd->port);
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
+ up(&dd->port->cmd_slot);
+ return NULL;
+ }
if (unlikely(*tag < 0))
return NULL;
* return value
* The size, in bytes, of the data copied into buf.
*/
-static ssize_t hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_registers(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int size = 0;
int n;
- size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
+ size += sprintf(&buf[size], "S ACTive:\n");
for (n = 0; n < dd->slot_groups; n++)
size += sprintf(&buf[size], "0x%08x\n",
group_allocated);
}
- size += sprintf(&buf[size], "completed:\n");
+ size += sprintf(&buf[size], "Completed:\n");
for (n = 0; n < dd->slot_groups; n++)
size += sprintf(&buf[size], "0x%08x\n",
readl(dd->port->completed[n]));
- size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
+ size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
+ size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
readl(dd->mmio + HOST_IRQ_STAT));
return size;
}
-static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
+
+static ssize_t mtip_hw_show_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct driver_data *dd = dev_to_disk(dev)->private_data;
+ int size = 0;
+
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "thermal_shutdown\n");
+ else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "write_protect\n");
+ else
+ size += sprintf(buf, "%s", "online\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
/*
* Create the sysfs related attributes.
if (sysfs_create_file(kobj, &dev_attr_registers.attr))
dev_warn(&dd->pdev->dev,
- "Error creating registers sysfs entry\n");
+ "Error creating 'registers' sysfs entry\n");
+ if (sysfs_create_file(kobj, &dev_attr_status.attr))
+ dev_warn(&dd->pdev->dev,
+ "Error creating 'status' sysfs entry\n");
return 0;
}
return -EINVAL;
sysfs_remove_file(kobj, &dev_attr_registers.attr);
+ sysfs_remove_file(kobj, &dev_attr_status.attr);
return 0;
}
"FTL rebuild in progress. Polling for completion.\n");
start = jiffies;
- dd->ftlrebuildflag = 1;
timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
do {
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag)))
+ return -EFAULT;
if (mtip_check_surprise_removal(dd->pdev))
return -EFAULT;
dev_warn(&dd->pdev->dev,
"FTL rebuild complete (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
- dd->ftlrebuildflag = 0;
mtip_block_initialize(dd);
- break;
+ return 0;
}
ssleep(10);
} while (time_before(jiffies, timeout));
/* Check for timeout */
- if (dd->ftlrebuildflag) {
- dev_err(&dd->pdev->dev,
+ dev_err(&dd->pdev->dev,
"Timed out waiting for FTL rebuild to complete (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
- return -EFAULT;
- }
-
- return 0;
+ return -EFAULT;
}
/*
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
- !test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
+ !(port->flags & MTIP_PF_PAUSE_IO));
if (kthread_should_stop())
break;
- set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
- if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag)))
+ break;
+
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
slot_start = num_cmd_slots;
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, slot);
- /* Set the command's timeout value.*/
- port->commands[slot].comp_time = jiffies +
- msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
-
clear_bit(slot, port->cmds_to_issue);
}
- clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
- } else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) {
- mtip_ftl_rebuild_poll(dd);
- clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags);
+ clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
+ } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+ if (!mtip_ftl_rebuild_poll(dd))
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
+ &dd->dd_flag);
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
- clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
- if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags))
+ if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
break;
}
return 0;
int i;
int rv;
unsigned int num_command_slots;
+ unsigned long timeout, timetaken;
+ unsigned char *buf;
+ struct smart_attr attr242;
dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
/* Allocate memory for the command list. */
dd->port->command_list =
dmam_alloc_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
&dd->port->command_list_dma,
GFP_KERNEL);
if (!dd->port->command_list) {
/* Clear the memory we have allocated. */
memset(dd->port->command_list,
0,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
/* Setup the addresse of the RX FIS. */
dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
dd->port->identify_dma = dd->port->command_tbl_dma +
HW_CMD_TBL_AR_SZ;
- /* Setup the address of the sector buffer. */
+ /* Setup the address of the sector buffer - for some non-ncq cmds */
dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
+ /* Setup the address of the log buf - for read log command */
+ dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
+ dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
+
+ /* Setup the address of the smart buf - for smart read data command */
+ dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
+ dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
+
+
/* Point the command headers at the command tables. */
for (i = 0; i < num_command_slots; i++) {
dd->port->commands[i].command_header =
dd->port->mmio + i*0x80 + PORT_SDBV;
}
- /* Reset the HBA. */
- if (mtip_hba_reset(dd) < 0) {
- dev_err(&dd->pdev->dev,
- "Card did not reset within timeout\n");
- rv = -EIO;
+ timetaken = jiffies;
+ timeout = jiffies + msecs_to_jiffies(30000);
+ while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
+ time_before(jiffies, timeout)) {
+ mdelay(100);
+ }
+ if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ timetaken = jiffies - timetaken;
+ dev_warn(&dd->pdev->dev,
+ "Surprise removal detected at %u ms\n",
+ jiffies_to_msecs(timetaken));
+ rv = -ENODEV;
+ goto out2 ;
+ }
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
+ timetaken = jiffies - timetaken;
+ dev_warn(&dd->pdev->dev,
+ "Removal detected at %u ms\n",
+ jiffies_to_msecs(timetaken));
+ rv = -EFAULT;
goto out2;
}
+ /* Conditionally reset the HBA. */
+ if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
+ if (mtip_hba_reset(dd) < 0) {
+ dev_err(&dd->pdev->dev,
+ "Card did not reset within timeout\n");
+ rv = -EIO;
+ goto out2;
+ }
+ } else {
+ /* Clear any pending interrupts on the HBA */
+ writel(readl(dd->mmio + HOST_IRQ_STAT),
+ dd->mmio + HOST_IRQ_STAT);
+ }
+
mtip_init_port(dd->port);
mtip_start_port(dd->port);
mod_timer(&dd->port->cmd_timer,
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
+ rv = -EFAULT;
+ goto out3;
+ }
+
if (mtip_get_identify(dd->port, NULL) < 0) {
rv = -EFAULT;
goto out3;
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
- set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags);
+ set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
return MTIP_FTL_REBUILD_MAGIC;
}
mtip_dump_identify(dd->port);
+
+ /* check write protect, over temp and rebuild statuses */
+ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+ dd->port->log_buf,
+ dd->port->log_buf_dma, 1);
+ if (rv) {
+ dev_warn(&dd->pdev->dev,
+ "Error in READ LOG EXT (10h) command\n");
+ /* non-critical error, don't fail the load */
+ } else {
+ buf = (unsigned char *)dd->port->log_buf;
+ if (buf[259] & 0x1) {
+ dev_info(&dd->pdev->dev,
+ "Write protect bit is set.\n");
+ set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+ }
+ if (buf[288] == 0xF7) {
+ dev_info(&dd->pdev->dev,
+ "Exceeded Tmax, drive in thermal shutdown.\n");
+ set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+ }
+ if (buf[288] == 0xBF) {
+ dev_info(&dd->pdev->dev,
+ "Drive indicates rebuild has failed.\n");
+ /* TODO */
+ }
+ }
+
+ /* get write protect progess */
+ memset(&attr242, 0, sizeof(struct smart_attr));
+ if (mtip_get_smart_attr(dd->port, 242, &attr242))
+ dev_warn(&dd->pdev->dev,
+ "Unable to check write protect progress\n");
+ else
+ dev_info(&dd->pdev->dev,
+ "Write protect progress: %d%% (%d blocks)\n",
+ attr242.cur, attr242.data);
return rv;
out3:
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
dd->port->command_list,
dd->port->command_list_dma);
out1:
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (atomic_read(&dd->drv_cleanup_done) != true) {
+ if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
- mtip_standby_immediate(dd->port);
+ if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
+ if (mtip_standby_immediate(dd->port))
+ dev_warn(&dd->pdev->dev,
+ "STANDBY IMMEDIATE failed\n");
/* de-initialize the port. */
mtip_deinit_port(dd->port);
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
dd->port->command_list,
dd->port->command_list_dma);
/* Free the memory allocated for the for structure. */
if (!dd)
return -ENOTTY;
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
+ return -ENOTTY;
+
switch (cmd) {
case BLKFLSBUF:
return -ENOTTY;
if (!dd)
return -ENOTTY;
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
+ return -ENOTTY;
+
switch (cmd) {
case BLKFLSBUF:
return -ENOTTY;
int nents = 0;
int tag = 0;
+ if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag))) {
+ bio_endio(bio, -ENXIO);
+ return;
+ }
+ if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
+ bio_endio(bio, -ENODATA);
+ return;
+ }
+ if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
+ &dd->dd_flag) &&
+ bio_data_dir(bio))) {
+ bio_endio(bio, -ENODATA);
+ return;
+ }
+ }
+
if (unlikely(!bio_has_data(bio))) {
blk_queue_flush(queue, 0);
bio_endio(bio, 0);
if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
dev_warn(&dd->pdev->dev,
- "Maximum number of SGL entries exceeded");
+ "Maximum number of SGL entries exceeded\n");
bio_io_error(bio);
mtip_hw_release_scatterlist(dd, tag);
return;
kobject_put(kobj);
}
- if (dd->mtip_svc_handler)
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
return rv; /* service thread created for handling rebuild */
+ }
start_service_thread:
sprintf(thd_name, "mtip_svc_thd_%02d", index);
dd, thd_name);
if (IS_ERR(dd->mtip_svc_handler)) {
- printk(KERN_ERR "mtip32xx: service thread failed to start\n");
+ dev_err(&dd->pdev->dev, "service thread failed to start\n");
dd->mtip_svc_handler = NULL;
rv = -EFAULT;
goto kthread_run_error;
}
+ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
+ rv = wait_for_rebuild;
+
return rv;
kthread_run_error:
struct kobject *kobj;
if (dd->mtip_svc_handler) {
- set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags);
+ set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
- /* Clean up the sysfs attributes managed by the protocol layer. */
- kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
- if (kobj) {
- mtip_hw_sysfs_exit(dd, kobj);
- kobject_put(kobj);
+ /* Clean up the sysfs attributes, if created */
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
}
/*
* from /dev
*/
del_gendisk(dd->disk);
+
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+
blk_cleanup_queue(dd->queue);
dd->disk = NULL;
dd->queue = NULL;
/* Delete our gendisk structure, and cleanup the blk queue. */
del_gendisk(dd->disk);
+
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+
blk_cleanup_queue(dd->queue);
dd->disk = NULL;
dd->queue = NULL;
return -ENOMEM;
}
- /* Set the atomic variable as 1 in case of SRSI */
- atomic_set(&dd->drv_cleanup_done, true);
-
- atomic_set(&dd->resumeflag, false);
-
/* Attach the private data to this PCI device. */
pci_set_drvdata(pdev, dd);
* instance number.
*/
instance++;
-
+ if (rv != MTIP_FTL_REBUILD_MAGIC)
+ set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
goto done;
block_initialize_err:
pci_set_drvdata(pdev, NULL);
return rv;
done:
- /* Set the atomic variable as 0 in case of SRSI */
- atomic_set(&dd->drv_cleanup_done, true);
-
return rv;
}
struct driver_data *dd = pci_get_drvdata(pdev);
int counter = 0;
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
if (mtip_check_surprise_removal(pdev)) {
- while (atomic_read(&dd->drv_cleanup_done) == false) {
+ while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
counter++;
msleep(20);
if (counter == 10) {
}
}
}
- /* Set the atomic variable as 1 in case of SRSI */
- atomic_set(&dd->drv_cleanup_done, true);
/* Clean up the block layer. */
mtip_block_remove(dd);
return -EFAULT;
}
- atomic_set(&dd->resumeflag, true);
+ set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
/* Disable ports & interrupts then send standby immediate */
rv = mtip_block_suspend(dd);
dev_err(&pdev->dev, "Unable to resume\n");
err:
- atomic_set(&dd->resumeflag, false);
+ clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
return rv;
}
*/
static int __init mtip_init(void)
{
+ int error;
+
printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
/* Allocate a major block device number to use with this driver. */
- mtip_major = register_blkdev(0, MTIP_DRV_NAME);
- if (mtip_major < 0) {
+ error = register_blkdev(0, MTIP_DRV_NAME);
+ if (error <= 0) {
printk(KERN_ERR "Unable to register block device (%d)\n",
- mtip_major);
+ error);
return -EBUSY;
}
+ mtip_major = error;
/* Register our PCI operations. */
- return pci_register_driver(&mtip_pci_driver);
+ error = pci_register_driver(&mtip_pci_driver);
+ if (error)
+ unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+
+ return error;
}
/*
/* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
-/* # of times to retry timed out IOs */
-#define MTIP_MAX_RETRIES 5
+/* # of times to retry timed out/failed IOs */
+#define MTIP_MAX_RETRIES 2
/* Various timeout values in ms */
#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
#define __force_bit2int (unsigned int __force)
/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_FLAG_IC_ACTIVE_BIT 0
-#define MTIP_FLAG_EH_ACTIVE_BIT 1
-#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2
-#define MTIP_FLAG_ISSUE_CMDS_BIT 4
-#define MTIP_FLAG_REBUILD_BIT 5
-#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8
+#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
+#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
+#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
+#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
+#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+ (1 << MTIP_PF_EH_ACTIVE_BIT) | \
+ (1 << MTIP_PF_SE_ACTIVE_BIT) | \
+ (1 << MTIP_PF_DM_ACTIVE_BIT))
+
+#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
+#define MTIP_PF_ISSUE_CMDS_BIT 5
+#define MTIP_PF_REBUILD_BIT 6
+#define MTIP_PF_SVC_THD_STOP_BIT 8
+
+/* below are bit numbers in 'dd_flag' defined in driver_data */
+#define MTIP_DDF_REMOVE_PENDING_BIT 1
+#define MTIP_DDF_OVER_TEMP_BIT 2
+#define MTIP_DDF_WRITE_PROTECT_BIT 3
+#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_DDF_OVER_TEMP_BIT) | \
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+
+#define MTIP_DDF_CLEANUP_BIT 5
+#define MTIP_DDF_RESUME_BIT 6
+#define MTIP_DDF_INIT_DONE_BIT 7
+#define MTIP_DDF_REBUILD_FAILED_BIT 8
+
+__packed struct smart_attr{
+ u8 attr_id;
+ u16 flags;
+ u8 cur;
+ u8 worst;
+ u32 data;
+ u8 res[3];
+};
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
* when the command slot and all associated data structures
* are no longer needed.
*/
+ u16 *log_buf;
+ dma_addr_t log_buf_dma;
+
+ u8 *smart_buf;
+ dma_addr_t smart_buf_dma;
+
unsigned long allocated[SLOTBITS_IN_LONGS];
/*
* used to queue commands when an internal command is in progress
* Timer used to complete commands that have been active for too long.
*/
struct timer_list cmd_timer;
+ unsigned long ic_pause_timer;
/*
* Semaphore used to block threads if there are no
* command slots available.
unsigned slot_groups; /* number of slot groups the product supports */
- atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
-
unsigned long index; /* Index to determine the disk name */
- unsigned int ftlrebuildflag; /* FTL rebuild flag */
-
- atomic_t resumeflag; /* Atomic variable to track suspend/resume */
+ unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
};
cap_str_10, cap_str_2);
set_capacity(vblk->disk, capacity);
+ revalidate_disk(vblk->disk);
done:
mutex_unlock(&vblk->config_lock);
}
return err;
}
+/*
+ * Legacy naming scheme used for virtio devices. We are stuck with it for
+ * virtio blk but don't ever use it for any new driver.
+ */
+static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
q->queuedata = vblk;
- if (index < 26) {
- sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
- } else if (index < (26 + 1) * 26) {
- sprintf(vblk->disk->disk_name, "vd%c%c",
- 'a' + index / 26 - 1, 'a' + index % 26);
- } else {
- const unsigned int m1 = (index / 26 - 1) / 26 - 1;
- const unsigned int m2 = (index / 26 - 1) % 26;
- const unsigned int m3 = index % 26;
- sprintf(vblk->disk->disk_name, "vd%c%c%c",
- 'a' + m1, 'a' + m2, 'a' + m3);
- }
+ virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
vblk->disk->major = major;
vblk->disk->first_minor = index_to_minor(index);
static void xen_blkbk_unmap(struct pending_req *req)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0;
grant_handle_t handle;
int ret;
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
+ pages[invcount] = virt_to_page(vaddr(req, i));
invcount++;
}
- ret = HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, unmap, invcount);
+ ret = gnttab_unmap_refs(unmap, pages, invcount, false);
BUG_ON(ret);
- /*
- * Note, we use invcount, so nr->pages, so we can't index
- * using vaddr(req, i).
- */
- for (i = 0; i < invcount; i++) {
- ret = m2p_remove_override(
- virt_to_page(unmap[i].host_addr), false);
- if (ret) {
- pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
- (unsigned long)unmap[i].host_addr);
- continue;
- }
- }
}
static int xen_blkbk_map(struct blkif_request *req,
pending_req->blkif->domid);
}
- ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
+ ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
BUG_ON(ret);
/*
if (ret)
continue;
- ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
- blkbk->pending_page(pending_req, i), NULL);
- if (ret) {
- pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
- (unsigned long)map[i].dev_bus_addr, ret);
- /* We could switch over to GNTTABOP_copy */
- continue;
- }
-
seg[i].buf = map[i].dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
}
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
+ unsigned long secure;
blkif->st_ds_req++;
xen_blkif_get(blkif);
- if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
- blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
- unsigned long secure = (blkif->vbd.discard_secure &&
- (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
- BLKDEV_DISCARD_SECURE : 0;
- err = blkdev_issue_discard(bdev,
- req->u.discard.sector_number,
- req->u.discard.nr_sectors,
- GFP_KERNEL, secure);
- } else
- err = -EOPNOTSUPP;
+ secure = (blkif->vbd.discard_secure &&
+ (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
+ BLKDEV_DISCARD_SECURE : 0;
+
+ err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
+ req->u.discard.nr_sectors,
+ GFP_KERNEL, secure);
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
int i, mmap_pages;
int rc = 0;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
BLKIF_PROTOCOL_X86_64 = 3,
};
-enum blkif_backend_type {
- BLKIF_BACKEND_PHY = 1,
- BLKIF_BACKEND_FILE = 2,
-};
-
struct xen_vbd {
/* What the domain refers to this vbd as. */
blkif_vdev_t handle;
unsigned int irq;
/* Comms information. */
enum blkif_protocol blk_protocol;
- enum blkif_backend_type blk_backend_type;
union blkif_back_rings blk_rings;
void *blk_ring;
/* The VBD attached to this interface. */
err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
"%d", state);
if (err)
- xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
+ dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
return err;
}
-int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
+static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
- char *type;
int err;
int state = 0;
+ struct block_device *bdev = be->blkif->vbd.bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
- type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
- if (!IS_ERR(type)) {
- if (strncmp(type, "file", 4) == 0) {
- state = 1;
- blkif->blk_backend_type = BLKIF_BACKEND_FILE;
+ if (blk_queue_discard(q)) {
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-granularity", "%u",
+ q->limits.discard_granularity);
+ if (err) {
+ dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
+ return;
}
- if (strncmp(type, "phy", 3) == 0) {
- struct block_device *bdev = be->blkif->vbd.bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- if (blk_queue_discard(q)) {
- err = xenbus_printf(xbt, dev->nodename,
- "discard-granularity", "%u",
- q->limits.discard_granularity);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writing discard-granularity");
- goto kfree;
- }
- err = xenbus_printf(xbt, dev->nodename,
- "discard-alignment", "%u",
- q->limits.discard_alignment);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writing discard-alignment");
- goto kfree;
- }
- state = 1;
- blkif->blk_backend_type = BLKIF_BACKEND_PHY;
- }
- /* Optional. */
- err = xenbus_printf(xbt, dev->nodename,
- "discard-secure", "%d",
- blkif->vbd.discard_secure);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writting discard-secure");
- goto kfree;
- }
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-alignment", "%u",
+ q->limits.discard_alignment);
+ if (err) {
+ dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
+ return;
+ }
+ state = 1;
+ /* Optional. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-secure", "%d",
+ blkif->vbd.discard_secure);
+ if (err) {
+ dev_warn(dev-dev, "writing discard-secure (%d)", err);
+ return;
}
- } else {
- err = PTR_ERR(type);
- xenbus_dev_fatal(dev, err, "reading type");
- goto out;
}
-
err = xenbus_printf(xbt, dev->nodename, "feature-discard",
"%d", state);
if (err)
- xenbus_dev_fatal(dev, err, "writing feature-discard");
-kfree:
- kfree(type);
-out:
- return err;
+ dev_warn(&dev->dev, "writing feature-discard (%d)", err);
}
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state)
err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
"%d", state);
if (err)
- xenbus_dev_fatal(dev, err, "writing feature-barrier");
+ dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
return err;
}
return;
}
- err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
- if (err)
- goto abort;
+ /* If we can't advertise it is OK. */
+ xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
- err = xen_blkbk_discard(xbt, be);
+ xen_blkbk_discard(xbt, be);
- /* If we can't advertise it is OK. */
- err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+ xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
*/
struct blkfront_info
{
+ spinlock_t io_lock;
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
int is_ready;
};
-static DEFINE_SPINLOCK(blkif_io_lock);
-
static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);
spin_lock(&minor_lock);
if (find_next_bit(minors, end, minor) >= end) {
- for (; minor < end; ++minor)
- __set_bit(minor, minors);
+ bitmap_set(minors, minor, nr);
rc = 0;
} else
rc = -EBUSY;
BUG_ON(end > nr_minors);
spin_lock(&minor_lock);
- for (; minor < end; ++minor)
- __clear_bit(minor, minors);
+ bitmap_clear(minors, minor, nr);
spin_unlock(&minor_lock);
}
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
- rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ rq = blk_init_queue(do_blkif_request, &info->io_lock);
if (rq == NULL)
return -1;
if (info->rq == NULL)
return;
- spin_lock_irqsave(&blkif_io_lock, flags);
+ spin_lock_irqsave(&info->io_lock, flags);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
{
struct blkfront_info *info = container_of(work, struct blkfront_info, work);
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
}
static void blkif_free(struct blkfront_info *info, int suspend)
{
/* Prevent new requests being issued until we fix things up. */
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error;
- spin_lock_irqsave(&blkif_io_lock, flags);
+ spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
kick_pending_request_queues(info);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
}
mutex_init(&info->mutex);
+ spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
xenbus_switch_state(info->xbdev, XenbusStateConnected);
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
return 0;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
add_disk(info->gd);
mutex_lock(&blkfront_mutex);
bdev = bdget_disk(disk, 0);
- bdput(bdev);
if (bdev->bd_openers)
goto out;
}
out:
+ bdput(bdev);
mutex_unlock(&blkfront_mutex);
return 0;
}
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3004) },
+ { USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x04CA, 0x3005) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
/* Broadcom SoftSailing reporting vendor specific */
- { USB_DEVICE(0x05ac, 0x21e1) },
+ { USB_DEVICE(0x0a5c, 0x21e1) },
/* Apple MacBookPro 7,1 */
{ USB_DEVICE(0x05ac, 0x8213) },
/* Broadcom BCM20702A0 */
{ USB_DEVICE(0x0a5c, 0x21e3) },
{ USB_DEVICE(0x0a5c, 0x21e6) },
+ { USB_DEVICE(0x0a5c, 0x21e8) },
{ USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x413c, 0x8197) },
/* Atheros 3012 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
hci_uart_close(hdev);
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
- hu->proto->close(hu);
if (hdev) {
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
+ hu->proto->close(hu);
}
kfree(hu);
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
- printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
- printk("\n");
+ printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk(KERN_CONT "\n");
temp = hpetp->hp_tick_freq;
remainder = do_div(temp, 1000000);
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
- uuid[8] = 0;
- }
- if (uuid[8] == 0)
generate_random_uuid(uuid);
+ } else {
+ static DEFINE_SPINLOCK(bootid_spinlock);
+
+ spin_lock(&bootid_spinlock);
+ if (!uuid[8])
+ generate_random_uuid(uuid);
+ spin_unlock(&bootid_spinlock);
+ }
sprintf(buf, "%pU", uuid);
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
-#include <linux/async.h>
#include <asm/io.h>
/*
/* Number of reads we try to get two different values */
#define ACPI_PM_READ_CHECKS 10000
-static void __init acpi_pm_clocksource_async(void *unused, async_cookie_t cookie)
+static int __init init_acpi_pm_clocksource(void)
{
cycle_t value1, value2;
unsigned int i, j = 0;
+ if (!pmtmr_ioport)
+ return -ENODEV;
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
- usleep_range(100 * j, 100 * j + 100);
+ udelay(100 * j);
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
" 0x%#llx, 0x%#llx - aborting.\n",
value1, value2);
pmtmr_ioport = 0;
- return;
+ return -EINVAL;
}
if (i == ACPI_PM_READ_CHECKS) {
printk(KERN_INFO "PM-Timer failed consistency check "
" (0x%#llx) - aborting.\n", value1);
pmtmr_ioport = 0;
- return;
+ return -ENODEV;
}
}
if (verify_pmtmr_rate() != 0){
pmtmr_ioport = 0;
- return;
+ return -ENODEV;
}
- clocksource_register_hz(&clocksource_acpi_pm,
+ return clocksource_register_hz(&clocksource_acpi_pm,
PMTMR_TICKS_PER_SEC);
}
-static int __init init_acpi_pm_clocksource(void)
-{
- if (!pmtmr_ioport)
- return -ENODEV;
-
- async_schedule(acpi_pm_clocksource_async, NULL);
- return 0;
-}
-
/* We use fs_initcall because we want the PCI fixups to have run
* but we still need to load before device_initcall
*/
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
+ depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
select CPU_FREQ_TABLE
config GPIO_SODAVILLE
bool "Intel Sodaville GPIO support"
- depends on X86 && PCI && OF && BROKEN
+ depends on X86 && PCI && OF
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
help
if (ret < 0)
memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
- for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+ for (bank = 0, bit = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
bank++, bit = 0) {
pending = dev->irq_stat[bank] & dev->irq_mask[bank];
#endif
};
-static struct samsung_gpio_chip exynos5_gpios_1[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_1[] = {
{
.chip = {
.base = EXYNOS5_GPA0(0),
.to_irq = samsung_gpiolib_to_irq,
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos5_gpios_2[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_2[] = {
{
.chip = {
.base = EXYNOS5_GPE0(0),
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos5_gpios_3[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_3[] = {
{
.chip = {
.base = EXYNOS5_GPV0(0),
.label = "GPV4",
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos5_gpios_4[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_4[] = {
{
.chip = {
.base = EXYNOS5_GPZ(0),
.label = "GPZ",
},
},
-#endif
};
+#endif
#if defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF)
struct sdv_gpio_chip_data {
int irq_base;
void __iomem *gpio_pub_base;
- struct irq_domain id;
+ struct irq_domain *id;
struct irq_chip_generic *gc;
struct bgpio_chip bgpio;
};
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct sdv_gpio_chip_data *sd = gc->private;
void __iomem *type_reg;
- u32 irq_offs = d->irq - sd->irq_base;
u32 reg;
- if (irq_offs < 8)
+ if (d->hwirq < 8)
type_reg = sd->gpio_pub_base + GPIT1R0;
else
type_reg = sd->gpio_pub_base + GPIT1R1;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- reg &= ~BIT(4 * (irq_offs % 8));
+ reg &= ~BIT(4 * (d->hwirq % 8));
break;
case IRQ_TYPE_LEVEL_LOW:
- reg |= BIT(4 * (irq_offs % 8));
+ reg |= BIT(4 * (d->hwirq % 8));
break;
default:
u32 irq_bit = __fls(irq_stat);
irq_stat &= ~BIT(irq_bit);
- generic_handle_irq(sd->irq_base + irq_bit);
+ generic_handle_irq(irq_find_mapping(sd->id, irq_bit));
}
return IRQ_HANDLED;
}
static struct irq_domain_ops irq_domain_sdv_ops = {
- .dt_translate = sdv_xlate,
+ .xlate = sdv_xlate,
};
static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
if (ret)
goto out_free_desc;
- sd->id.irq_base = sd->irq_base;
- sd->id.of_node = of_node_get(pdev->dev.of_node);
- sd->id.ops = &irq_domain_sdv_ops;
-
/*
* This gpio irq controller latches level irqs. Testing shows that if
* we unmask & ACK the IRQ before the source of the interrupt is gone
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- irq_domain_add(&sd->id);
+ sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
+ sd->irq_base, 0, &irq_domain_sdv_ops, sd);
+ if (!sd->id)
+ goto out_free_irq;
return 0;
out_free_irq:
free_irq(pdev->irq, sd);
{
struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev);
- irq_domain_del(&sd->id);
free_irq(pdev->irq, sd);
irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr, end_addr;
+ dma_addr_t start_addr;
unsigned int npages, page_size, i = 0;
struct scatterlist *sgl;
int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (flags & EXYNOS_BO_NONCONTIG) {
+ if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return -EINVAL;
}
}
if (buf->size >= SZ_1M) {
- npages = (buf->size >> SECTION_SHIFT) + 1;
+ npages = buf->size >> SECTION_SHIFT;
page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
- npages = (buf->size >> 16) + 1;
+ npages = buf->size >> 16;
page_size = SZ_64K;
} else {
- npages = (buf->size >> PAGE_SHIFT) + 1;
+ npages = buf->size >> PAGE_SHIFT;
page_size = PAGE_SIZE;
}
return -ENOMEM;
}
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
- }
-
- start_addr = buf->dma_addr;
- end_addr = buf->dma_addr + buf->size;
-
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
- if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err2;
- }
-
- start_addr = buf->dma_addr;
- end_addr = buf->dma_addr + buf->size;
+ buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL);
+ if (!buf->kvaddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
if (!buf->pages) {
}
sgl = buf->sgt->sgl;
+ start_addr = buf->dma_addr;
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
sg_set_page(sgl, buf->pages[i], page_size, 0);
sg_dma_address(sgl) = start_addr;
start_addr += page_size;
- if (end_addr - start_addr < page_size)
- break;
sgl = sg_next(sgl);
i++;
}
- buf->pages[i] = phys_to_page(start_addr);
-
- sgl = sg_next(sgl);
- sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
-
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr,
(unsigned long)buf->dma_addr,
* non-continuous memory would be released by exynos
* gem framework.
*/
- if (flags & EXYNOS_BO_NONCONTIG) {
+ if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return;
}
*
* P.S. note that this driver is considered for modularization.
*/
- ret = subdrv->probe(dev, subdrv->manager.dev);
+ ret = subdrv->probe(dev, subdrv->dev);
if (ret)
return ret;
}
- if (subdrv->is_local)
+ if (!subdrv->manager)
return 0;
+ subdrv->manager->dev = subdrv->dev;
+
/* create and initialize a encoder for this sub driver. */
- encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
+ encoder = exynos_drm_encoder_create(dev, subdrv->manager,
(1 << MAX_CRTC) - 1);
if (!encoder) {
DRM_ERROR("failed to create encoder\n");
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->open) {
- ret = subdrv->open(dev, subdrv->manager.dev, file);
+ ret = subdrv->open(dev, subdrv->dev, file);
if (ret)
goto err;
}
err:
list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
if (subdrv->close)
- subdrv->close(dev, subdrv->manager.dev, file);
+ subdrv->close(dev, subdrv->dev, file);
}
return ret;
}
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close)
- subdrv->close(dev, subdrv->manager.dev, file);
+ subdrv->close(dev, subdrv->dev, file);
}
}
EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
* Exynos drm sub driver structure.
*
* @list: sub driver has its own list object to register to exynos drm driver.
+ * @dev: pointer to device object for subdrv device driver.
* @drm_dev: pointer to drm_device and this pointer would be set
* when sub driver calls exynos_drm_subdrv_register().
- * @is_local: appear encoder and connector disrelated device.
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ * and we can access a hardware drawing on this manager.
* @probe: this callback would be called by exynos drm driver after
* subdrv is registered to it.
* @remove: this callback is used to release resources created
* by probe callback.
* @open: this would be called with drm device file open.
* @close: this would be called with drm device file close.
- * @manager: subdrv has its own manager to control a hardware appropriately
- * and we can access a hardware drawing on this manager.
* @encoder: encoder object owned by this sub driver.
* @connector: connector object owned by this sub driver.
*/
struct exynos_drm_subdrv {
struct list_head list;
+ struct device *dev;
struct drm_device *drm_dev;
- bool is_local;
+ struct exynos_drm_manager *manager;
int (*probe)(struct drm_device *drm_dev, struct device *dev);
void (*remove)(struct drm_device *dev);
void (*close)(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file);
- struct exynos_drm_manager manager;
struct drm_encoder *encoder;
struct drm_connector *connector;
};
static void fimd_apply(struct device *subdrv_dev)
{
struct fimd_context *ctx = get_fimd_context(subdrv_dev);
- struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+ struct exynos_drm_manager *mgr = ctx->subdrv.manager;
struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
struct fimd_win_data *win_data;
.disable = fimd_win_disable,
};
+static struct exynos_drm_manager fimd_manager = {
+ .pipe = -1,
+ .ops = &fimd_manager_ops,
+ .overlay_ops = &fimd_overlay_ops,
+ .display_ops = &fimd_display_ops,
+};
+
static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
{
struct exynos_drm_private *dev_priv = drm_dev->dev_private;
struct fimd_context *ctx = (struct fimd_context *)dev_id;
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
struct drm_device *drm_dev = subdrv->drm_dev;
- struct exynos_drm_manager *manager = &subdrv->manager;
+ struct exynos_drm_manager *manager = subdrv->manager;
u32 val;
val = readl(ctx->regs + VIDINTCON1);
static int fimd_power_on(struct fimd_context *ctx, bool enable)
{
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct device *dev = subdrv->manager.dev;
+ struct device *dev = subdrv->dev;
DRM_DEBUG_KMS("%s\n", __FILE__);
subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->manager = &fimd_manager;
subdrv->probe = fimd_subdrv_probe;
subdrv->remove = fimd_subdrv_remove;
- subdrv->manager.pipe = -1;
- subdrv->manager.ops = &fimd_manager_ops;
- subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display_ops = &fimd_display_ops;
- subdrv->manager.dev = dev;
mutex_init(&ctx->lock);
return out_msg;
}
-static unsigned int mask_gem_flags(unsigned int flags)
+static int check_gem_flags(unsigned int flags)
{
- return flags &= EXYNOS_BO_NONCONTIG;
+ if (flags & ~(EXYNOS_BO_MASK)) {
+ DRM_ERROR("invalid flags.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
+{
+ if (!IS_NONCONTIG_BUFFER(flags)) {
+ if (size >= SZ_1M)
+ return roundup(size, SECTION_SIZE);
+ else if (size >= SZ_64K)
+ return roundup(size, SZ_64K);
+ else
+ goto out;
+ }
+out:
+ return roundup(size, PAGE_SIZE);
}
static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
struct exynos_drm_gem_buf *buf;
int ret;
- size = roundup(size, PAGE_SIZE);
- DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+ if (!size) {
+ DRM_ERROR("invalid size.\n");
+ return ERR_PTR(-EINVAL);
+ }
- flags = mask_gem_flags(flags);
+ size = roundup_gem_size(size, flags);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = check_gem_flags(flags);
+ if (ret)
+ return ERR_PTR(ret);
buf = exynos_drm_init_buf(dev, size);
if (!buf)
exynos_gem_obj = exynos_drm_gem_init(dev, size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err;
+ goto err_fini_buf;
}
exynos_gem_obj->buffer = buf;
ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
- goto err;
+ goto err_fini_buf;
}
} else {
ret = exynos_drm_alloc_buf(dev, buf, flags);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
- goto err;
+ goto err_fini_buf;
}
}
return exynos_gem_obj;
-err:
+
+err_fini_buf:
exynos_drm_fini_buf(dev, buf);
return ERR_PTR(ret);
}
#define to_exynos_gem_obj(x) container_of(x,\
struct exynos_drm_gem_obj, base)
+#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
+
/*
* exynos drm gem buffer structure.
*
struct drm_hdmi_context, subdrv);
/* these callback points shoud be set by specific drivers. */
-static struct exynos_hdmi_display_ops *hdmi_display_ops;
-static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
-static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+static struct exynos_hdmi_ops *hdmi_ops;
+static struct exynos_mixer_ops *mixer_ops;
struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *mixer_ctx;
};
-void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
- *display_ops)
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display_ops)
- hdmi_display_ops = display_ops;
+ if (ops)
+ hdmi_ops = ops;
}
-void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
- *manager_ops)
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (manager_ops)
- hdmi_manager_ops = manager_ops;
-}
-
-void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
- *overlay_ops)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (overlay_ops)
- hdmi_overlay_ops = overlay_ops;
+ if (ops)
+ mixer_ops = ops;
}
static bool drm_hdmi_is_connected(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_display_ops && hdmi_display_ops->is_connected)
- return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+ if (hdmi_ops && hdmi_ops->is_connected)
+ return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
return false;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_display_ops && hdmi_display_ops->get_edid)
- return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
- connector, edid, len);
+ if (hdmi_ops && hdmi_ops->get_edid)
+ return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
+ len);
return 0;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_display_ops && hdmi_display_ops->check_timing)
- return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
- timing);
+ if (hdmi_ops && hdmi_ops->check_timing)
+ return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
return 0;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_display_ops && hdmi_display_ops->power_on)
- return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+ if (hdmi_ops && hdmi_ops->power_on)
+ return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
return 0;
}
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct exynos_drm_manager *manager = &subdrv->manager;
+ struct exynos_drm_manager *manager = subdrv->manager;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
- return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
- manager->pipe);
+ if (mixer_ops && mixer_ops->enable_vblank)
+ return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
+ manager->pipe);
return 0;
}
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
- return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+ if (mixer_ops && mixer_ops->disable_vblank)
+ return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup)
- hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector,
- mode, adjusted_mode);
+ if (hdmi_ops && hdmi_ops->mode_fixup)
+ hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
+ adjusted_mode);
}
static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
- hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+ if (hdmi_ops && hdmi_ops->mode_set)
+ hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
}
static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol)
- hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width,
- height);
+ if (hdmi_ops && hdmi_ops->get_max_resol)
+ hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
}
static void drm_hdmi_commit(struct device *subdrv_dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_manager_ops && hdmi_manager_ops->commit)
- hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+ if (hdmi_ops && hdmi_ops->commit)
+ hdmi_ops->commit(ctx->hdmi_ctx->ctx);
}
static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (hdmi_manager_ops && hdmi_manager_ops->disable)
- hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+ if (hdmi_ops && hdmi_ops->disable)
+ hdmi_ops->disable(ctx->hdmi_ctx->ctx);
break;
default:
DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
- hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+ if (mixer_ops && mixer_ops->win_mode_set)
+ mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
}
static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
- hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+ if (mixer_ops && mixer_ops->win_commit)
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
}
static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
- hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+ if (mixer_ops && mixer_ops->win_disable)
+ mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.disable = drm_mixer_disable,
};
+static struct exynos_drm_manager hdmi_manager = {
+ .pipe = -1,
+ .ops = &drm_hdmi_manager_ops,
+ .overlay_ops = &drm_hdmi_overlay_ops,
+ .display_ops = &drm_hdmi_display_ops,
+};
static int hdmi_subdrv_probe(struct drm_device *drm_dev,
struct device *dev)
subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
- subdrv->manager.pipe = -1;
- subdrv->manager.ops = &drm_hdmi_manager_ops;
- subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
- subdrv->manager.display_ops = &drm_hdmi_display_ops;
- subdrv->manager.dev = dev;
platform_set_drvdata(pdev, subdrv);
void *ctx;
};
-struct exynos_hdmi_display_ops {
+struct exynos_hdmi_ops {
+ /* display */
bool (*is_connected)(void *ctx);
int (*get_edid)(void *ctx, struct drm_connector *connector,
u8 *edid, int len);
int (*check_timing)(void *ctx, void *timing);
int (*power_on)(void *ctx, int mode);
-};
-struct exynos_hdmi_manager_ops {
+ /* manager */
void (*mode_fixup)(void *ctx, struct drm_connector *connector,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*disable)(void *ctx);
};
-struct exynos_hdmi_overlay_ops {
+struct exynos_mixer_ops {
+ /* manager */
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+
+ /* overlay */
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
};
-extern struct platform_driver hdmi_driver;
-extern struct platform_driver mixer_driver;
-
-void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
- *display_ops);
-void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
- *manager_ops);
-void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
- *overlay_ops);
-
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
#endif
static const uint32_t formats[] = {
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV12M,
+ DRM_FORMAT_NV12MT,
};
static int
static void vidi_apply(struct device *subdrv_dev)
{
struct vidi_context *ctx = get_vidi_context(subdrv_dev);
- struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+ struct exynos_drm_manager *mgr = ctx->subdrv.manager;
struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
struct vidi_win_data *win_data;
.disable = vidi_win_disable,
};
+static struct exynos_drm_manager vidi_manager = {
+ .pipe = -1,
+ .ops = &vidi_manager_ops,
+ .overlay_ops = &vidi_overlay_ops,
+ .display_ops = &vidi_display_ops,
+};
+
static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
{
struct exynos_drm_private *dev_priv = drm_dev->dev_private;
struct vidi_context *ctx = container_of(work, struct vidi_context,
work);
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct exynos_drm_manager *manager = &subdrv->manager;
+ struct exynos_drm_manager *manager = subdrv->manager;
if (manager->pipe < 0)
return;
static int vidi_power_on(struct vidi_context *ctx, bool enable)
{
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct device *dev = subdrv->manager.dev;
+ struct device *dev = subdrv->dev;
DRM_DEBUG_KMS("%s\n", __FILE__);
ctx->raw_edid = (struct edid *)fake_edid_info;
subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->manager = &vidi_manager;
subdrv->probe = vidi_subdrv_probe;
subdrv->remove = vidi_subdrv_remove;
- subdrv->manager.pipe = -1;
- subdrv->manager.ops = &vidi_manager_ops;
- subdrv->manager.overlay_ops = &vidi_overlay_ops;
- subdrv->manager.display_ops = &vidi_display_ops;
- subdrv->manager.dev = dev;
mutex_init(&ctx->lock);
#include "exynos_hdmi.h"
-#define HDMI_OVERLAY_NUMBER 3
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
static bool hdmi_is_connected(void *ctx)
{
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
if (val)
u8 *edid, int len)
{
struct edid *raw_edid;
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
static int hdmi_check_timing(void *ctx, void *timing)
{
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
struct fb_videomode *check_timing = timing;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
return 0;
}
-static struct exynos_hdmi_display_ops display_ops = {
- .is_connected = hdmi_is_connected,
- .get_edid = hdmi_get_edid,
- .check_timing = hdmi_check_timing,
- .power_on = hdmi_display_power_on,
-};
-
static void hdmi_set_acr(u32 freq, u8 *acr)
{
u32 n, cts;
struct drm_display_mode *adjusted_mode)
{
struct drm_display_mode *m;
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
int index;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
static void hdmi_mode_set(void *ctx, void *mode)
{
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
int conf_idx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
static void hdmi_commit(void *ctx)
{
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
static void hdmi_disable(void *ctx)
{
- struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
}
}
-static struct exynos_hdmi_manager_ops manager_ops = {
+static struct exynos_hdmi_ops hdmi_ops = {
+ /* display */
+ .is_connected = hdmi_is_connected,
+ .get_edid = hdmi_get_edid,
+ .check_timing = hdmi_check_timing,
+ .power_on = hdmi_display_power_on,
+
+ /* manager */
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.get_max_resol = hdmi_get_max_resol,
static irqreturn_t hdmi_irq_handler(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
- struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+ struct hdmi_context *hdata = ctx->ctx;
u32 intc_flag;
intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
DRM_DEBUG_KMS("%s\n", __func__);
- hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+ hdmi_resource_poweroff(ctx->ctx);
return 0;
}
DRM_DEBUG_KMS("%s\n", __func__);
- hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+ hdmi_resource_poweron(ctx->ctx);
return 0;
}
hdata->irq = res->start;
/* register specific callbacks to common hdmi. */
- exynos_drm_display_ops_register(&display_ops);
- exynos_drm_manager_ops_register(&manager_ops);
+ exynos_hdmi_ops_register(&hdmi_ops);
hdmi_resource_poweron(hdata);
static int __devexit hdmi_remove(struct platform_device *pdev)
{
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
- struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+ struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
-#define HDMI_OVERLAY_NUMBER 3
+#define MIXER_WIN_NR 3
+#define MIXER_DEFAULT_WIN 0
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
};
struct mixer_context {
- struct fb_videomode *default_timing;
- unsigned int default_win;
- unsigned int default_bpp;
unsigned int irq;
int pipe;
bool interlace;
- bool vp_enabled;
struct mixer_resources mixer_res;
- struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
+ struct hdmi_win_data win_data[MIXER_WIN_NR];
};
static const u8 filter_y_horiz_tap8[] = {
win = overlay->zpos;
if (win == DEFAULT_ZPOS)
- win = mixer_ctx->default_win;
+ win = MIXER_DEFAULT_WIN;
- if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("overlay plane[%d] is wrong\n", win);
return;
}
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
if (win == DEFAULT_ZPOS)
- win = mixer_ctx->default_win;
+ win = MIXER_DEFAULT_WIN;
- if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("overlay plane[%d] is wrong\n", win);
return;
}
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
if (win == DEFAULT_ZPOS)
- win = mixer_ctx->default_win;
+ win = MIXER_DEFAULT_WIN;
- if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ if (win < 0 || win > MIXER_WIN_NR) {
DRM_ERROR("overlay plane[%d] is wrong\n", win);
return;
}
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static struct exynos_hdmi_overlay_ops overlay_ops = {
+static struct exynos_mixer_ops mixer_ops = {
+ /* manager */
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+
+ /* overlay */
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
- struct mixer_context *ctx =
- (struct mixer_context *)drm_hdmi_ctx->ctx;
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
struct mixer_resources *res = &ctx->mixer_res;
u32 val, val_base;
DRM_DEBUG_KMS("resume - start\n");
- mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+ mixer_resource_poweron(ctx->ctx);
return 0;
}
DRM_DEBUG_KMS("suspend - start\n");
- mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+ mixer_resource_poweroff(ctx->ctx);
return 0;
}
static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct platform_device *pdev)
{
- struct mixer_context *mixer_ctx =
- (struct mixer_context *)ctx->ctx;
+ struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
goto fail;
/* register specific callback point to common hdmi. */
- exynos_drm_overlay_ops_register(&overlay_ops);
+ exynos_mixer_ops_register(&mixer_ops);
mixer_resource_poweron(ctx);
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx =
platform_get_drvdata(pdev);
- struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
dev_info(dev, "remove successful\n");
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
int i915_enable_rc6 __read_mostly = -1;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
{
list_del_init(&obj->ring_list);
obj->last_rendering_seqno = 0;
+ obj->last_fenced_seqno = 0;
}
static void
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
+ obj->last_fenced_ring = NULL;
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GEN6_UCGCTL1 0x9400
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
}
static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ bool was_interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
+ atomic_read(&obj->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_finish_gpu(obj);
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return ret;
}
- if (old_fb) {
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&dev_priv->mm.wedged) ||
- atomic_read(&obj->pending_flip) == 0);
-
- /* Big Hammer, we also need to ensure that any pending
- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
- * current scanout is retired before unpinning the old
- * framebuffer.
- *
- * This should only fail upon a hung GPU, in which case we
- * can safely continue.
- */
- ret = i915_gem_object_finish_gpu(obj);
- (void) ret;
- }
+ if (old_fb)
+ intel_finish_fb(old_fb);
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
+ /* Flush any pending WAITs before we disable the pipe. Note that
+ * we need to drop the struct_mutex in order to acquire it again
+ * during the lowlevel dpms routines around a couple of the
+ * operations. It does not look trivial nor desirable to move
+ * that locking higher. So instead we leave a window for the
+ * submission of further commands on the fb before we can actually
+ * disable it. This race with userspace exists anyway, and we can
+ * only rely on the pipe being disabled by userspace after it
+ * receives the hotplug notification and has flushed any pending
+ * batches.
+ */
+ if (crtc->fb) {
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
+
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
return (max_link_clock * max_lanes * 8) / 10;
}
+static bool
+intel_dp_adjust_dithering(struct intel_dp *intel_dp,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
+
+ mode_rate = intel_dp_link_required(mode->clock, 24);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(mode->clock, 18);
+ if (mode_rate > max_rate)
+ return false;
+
+ if (adjusted_mode)
+ adjusted_mode->private_flags
+ |= INTEL_MODE_DP_FORCE_6BPC;
+
+ return true;
+ }
+
+ return true;
+}
+
static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
- int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
return MODE_PANEL;
}
- mode_rate = intel_dp_link_required(mode->clock, 24);
- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-
- if (mode_rate > max_rate) {
- mode_rate = intel_dp_link_required(mode->clock, 18);
- if (mode_rate > max_rate)
- return MODE_CLOCK_HIGH;
- else
- mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
- }
+ if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+ return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ int bpp;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+ return false;
+
+ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
bus->has_gpio = intel_gpio_setup(bus, i);
/* XXX force bit banging until GMBUS is fully debugged */
- if (bus->has_gpio && IS_GEN2(dev))
+ if (bus->has_gpio)
bus->force_bit = true;
}
* of the buffer.
*/
ring->effective_size = ring->size;
- if (IS_I830(ring->dev))
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
return 0;
/* must disable */
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
- sprctl |= SPRITE_DEST_KEY;
/* Sizes are 0 based */
src_w--;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
+ /* some R4xx chips have the wrong frev */
+ if (rdev->family <= CHIP_RV410)
+ frev = 1;
+
switch (frev) {
case 1:
switch (crev) {
* or the chip could hang on a subsequent access
*/
if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
- udelay(5000);
+ mdelay(5);
}
/* This function is required to workaround a hardware bug in some (all?)
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
- udelay(15000);
+ mdelay(15);
WREG32(SRBM_SOFT_RESET, 0);
RREG32(SRBM_SOFT_RESET);
}
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
- DRM_UDELAY(15000);
+ mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)dev_priv->me_fw->data;
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
- DRM_UDELAY(15000);
+ mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)dev_priv->pfp_fw->data;
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
- DRM_UDELAY(15000);
+ mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
tmp &= ~(R300_SCLK_FORCE_VAP);
tmp |= RADEON_SCLK_FORCE_CP;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
- udelay(15000);
+ mdelay(15);
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp &= ~(R300_SCLK_FORCE_TCL |
tmp |= (RADEON_ENGIN_DYNCLK_MODE |
(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
- udelay(15000);
+ mdelay(15);
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
tmp |= RADEON_SCLK_DYN_START_CNTL;
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
- udelay(15000);
+ mdelay(15);
/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
to lockup randomly, leave them as set by BIOS.
tmp |= RADEON_SCLK_MORE_FORCEON;
}
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
- udelay(15000);
+ mdelay(15);
}
/* RV200::A11 A12, RV250::A11 A12 */
tmp |= RADEON_TCL_BYPASS_DISABLE;
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
}
- udelay(15000);
+ mdelay(15);
/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
- udelay(15000);
+ mdelay(15);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
- udelay(15000);
+ mdelay(15);
}
} else {
/* Turn everything OFF (ForceON to everything) */
}
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
- udelay(16000);
+ mdelay(16);
if ((rdev->family == CHIP_R300) ||
(rdev->family == CHIP_R350)) {
R300_SCLK_FORCE_GA |
R300_SCLK_FORCE_CBA);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
- udelay(16000);
+ mdelay(16);
}
if (rdev->flags & RADEON_IS_IGP) {
tmp &= ~(RADEON_FORCEON_MCLKA |
RADEON_FORCEON_YCLKA);
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
- udelay(16000);
+ mdelay(16);
}
if ((rdev->family == CHIP_RV200) ||
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp |= RADEON_SCLK_MORE_FORCEON;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
- udelay(16000);
+ mdelay(16);
}
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
- udelay(16000);
+ mdelay(16);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
case 4:
val = RBIOS16(index);
index += 2;
- udelay(val * 1000);
+ mdelay(val);
break;
case 6:
slave_addr = id & 0xff;
udelay(150);
break;
case 2:
- udelay(1000);
+ mdelay(1);
break;
case 3:
while (tmp--) {
/*mclk_cntl |= 0x00001111;*//* ??? */
WREG32_PLL(RADEON_MCLK_CNTL,
mclk_cntl);
- udelay(10000);
+ mdelay(10);
#endif
WREG32_PLL
(RADEON_CLK_PWRMGT_CNTL,
tmp &
~RADEON_CG_NO1_DEBUG_0);
- udelay(10000);
+ mdelay(10);
}
break;
default:
struct radeon_i2c_chan *i2c;
int ret;
+ /* don't add the mm_i2c bus unless hw_i2c is enabled */
+ if (rec->mm_i2c && (radeon_hw_i2c == 0))
+ return NULL;
+
i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
- udelay(1000);
+ mdelay(1);
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
(backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
- udelay(panel_pwr_delay * 1000);
+ mdelay(panel_pwr_delay);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
break;
case DRM_MODE_DPMS_STANDBY:
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
}
- udelay(panel_pwr_delay * 1000);
+ mdelay(panel_pwr_delay);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
- udelay(panel_pwr_delay * 1000);
+ mdelay(panel_pwr_delay);
break;
}
WREG32(RADEON_DAC_MACRO_CNTL, tmp);
- udelay(2000);
+ mdelay(2);
if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
found = connector_status_connected;
tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
WREG32(RADEON_DAC_CNTL2, tmp);
- udelay(10000);
+ mdelay(10);
if (ASIC_IS_R300(rdev)) {
if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
* for locking on FreeBSD.
*/
if (cmdbuf->size) {
- kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL);
+ kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
if (kcmd_addr == NULL)
return -ENOMEM;
cmdbuf->vb_addr = kvb_addr;
}
if (cmdbuf->nbox) {
- kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
- GFP_KERNEL);
+ kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
+ GFP_KERNEL);
if (kbox_addr == NULL) {
ret = -ENOMEM;
goto done;
break;
default:
BUG();
+ val = "";
}
return sprintf(buf, "%s\n", val);
* If a negative value is stored in any of the referenced registers, this value
* reflects an error code which will be returned.
*/
-static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+static int pmbus_get_boolean(struct pmbus_data *data, int index)
{
u8 s1 = (index >> 24) & 0xff;
u8 s2 = (index >> 16) & 0xff;
u8 reg = (index >> 8) & 0xff;
u8 mask = index & 0xff;
- int status;
+ int ret, status;
u8 regval;
status = data->status[reg];
regval = status & mask;
if (!s1 && !s2)
- *val = !!regval;
+ ret = !!regval;
else {
long v1, v2;
struct pmbus_sensor *sensor1, *sensor2;
v1 = pmbus_reg2data(data, sensor1);
v2 = pmbus_reg2data(data, sensor2);
- *val = !!(regval && v1 >= v2);
+ ret = !!(regval && v1 >= v2);
}
- return 0;
+ return ret;
}
static ssize_t pmbus_show_boolean(struct device *dev,
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pmbus_data *data = pmbus_update_device(dev);
int val;
- int err;
- err = pmbus_get_boolean(data, attr->index, &val);
- if (err)
- return err;
+ val = pmbus_get_boolean(data, attr->index);
+ if (val < 0)
+ return val;
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
return err;
}
-static int __init smsc47b397_find(unsigned short *addr)
+static int __init smsc47b397_find(void)
{
u8 id, rev;
char *name;
+ unsigned short addr;
superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
rev = superio_inb(SUPERIO_REG_DEVREV);
superio_select(SUPERIO_REG_LD8);
- *addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
+ addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
| superio_inb(SUPERIO_REG_BASE_LSB);
pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
- name, *addr, rev);
+ name, addr, rev);
superio_exit();
- return 0;
+ return addr;
}
static int __init smsc47b397_init(void)
unsigned short address;
int ret;
- ret = smsc47b397_find(&address);
- if (ret)
+ ret = smsc47b397_find();
+ if (ret < 0)
return ret;
+ address = ret;
ret = platform_driver_register(&smsc47b397_driver);
if (ret)
.attrs = smsc47m1_attributes,
};
-static int __init smsc47m1_find(unsigned short *addr,
- struct smsc47m1_sio_data *sio_data)
+static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
{
u8 val;
+ unsigned short addr;
superio_enter();
val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
}
superio_select();
- *addr = (superio_inb(SUPERIO_REG_BASE) << 8)
+ addr = (superio_inb(SUPERIO_REG_BASE) << 8)
| superio_inb(SUPERIO_REG_BASE + 1);
- if (*addr == 0) {
+ if (addr == 0) {
pr_info("Device address not set, will not use\n");
superio_exit();
return -ENODEV;
}
superio_exit();
- return 0;
+ return addr;
}
/* Restore device to its initial state */
unsigned short address;
struct smsc47m1_sio_data sio_data;
- if (smsc47m1_find(&address, &sio_data))
- return -ENODEV;
+ err = smsc47m1_find(&sio_data);
+ if (err < 0)
+ return err;
+ address = err;
/* Sets global pdev as a side effect */
err = smsc47m1_device_add(address, &sio_data);
if (err)
- goto exit;
+ return err;
err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
if (err)
exit_device:
platform_device_unregister(pdev);
smsc47m1_restore(&sio_data);
-exit:
return err;
}
srq_attr.attr.max_wr = sdev->srq_size;
srq_attr.attr.max_sge = 1;
srq_attr.attr.srq_limit = 0;
+ srq_attr.srq_type = IB_SRQT_BASIC;
sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
if (IS_ERR(sdev->srq))
input_dev = input_allocate_device();
if (!onkey || !input_dev) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_free_mem;
}
onkey->input = input_dev;
unsigned char *packet = psmouse->packet;
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
+ __set_bit(INPUT_PROP_POINTER, dev->propbit);
__set_bit(EV_KEY, dev->evbit);
__set_bit(EV_ABS, dev->evbit);
__clear_bit(EV_REL, dev->evbit);
*/
psmouse_warn(psmouse, "couldn't query resolution data.\n");
}
-
+ /* v4 is clickpad, with only one button. */
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ __clear_bit(BTN_RIGHT, dev->keybit);
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
*/
static int elantech_reconnect(struct psmouse *psmouse)
{
+ psmouse_reset(psmouse);
+
if (elantech_detect(psmouse, 0))
return -1;
if (!etd)
return -ENOMEM;
+ psmouse_reset(psmouse);
+
etd->parity[0] = 1;
for (i = 1; i < 256; i++)
etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
+#include <linux/gpio.h>
#include <linux/gpio_mouse.h>
-#include <asm/gpio.h>
/*
* Timer function which is run every scan_ms ms when the device is opened.
}
} else {
/* SFAC packet */
+ if ((packet[0] & (FSP_PB0_LBTN|FSP_PB0_PHY_BTN)) ==
+ FSP_PB0_LBTN) {
+ /* On-pad click in SFAC mode should be handled
+ * by userspace. On-pad clicks in MFMC mode
+ * are real clickpad clicks, and not ignored.
+ */
+ packet[0] &= ~FSP_PB0_LBTN;
+ }
/* no multi-finger information */
ad->last_mt_fgr = 0;
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
- printk(KERN_WARNING "trackpoint.c: failed to get extended button data\n");
+ psmouse_warn(psmouse, "failed to get extended button data\n");
button_info = 0;
}
error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
if (error) {
- printk(KERN_ERR
- "trackpoint.c: failed to create sysfs attributes, error: %d\n",
- error);
+ psmouse_err(psmouse,
+ "failed to create sysfs attributes, error: %d\n",
+ error);
kfree(psmouse->private);
psmouse->private = NULL;
return -1;
}
- printk(KERN_INFO "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
- firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f);
+ psmouse_info(psmouse,
+ "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+ firmware_id,
+ (button_info & 0xf0) >> 4, button_info & 0x0f);
return 0;
}
/*
- * drivers/input/touchscreen/tps6507x_ts.c
- *
* Touchscreen driver for the tps6507x chip.
*
* Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps6507x-tsc");
+MODULE_ALIAS("platform:tps6507x-ts");
struct cardstate *cs = tty->driver_data;
if (!cs) { /* happens if we didn't find cs in open */
- printk(KERN_DEBUG "%s: no cardstate\n", __func__);
+ gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
return;
}
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
- bitmap->flags |= BITMAP_HOSTENDIAN;
- sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
-
kunmap_atomic(sb);
return 0;
* re-add of a missing device */
start = mddev->recovery_cp;
+ mutex_lock(&mddev->bitmap_info.mutex);
err = bitmap_init_from_disk(bitmap, start);
+ mutex_unlock(&mddev->bitmap_info.mutex);
if (err)
goto out;
struct r1conf *conf = mddev->private;
int primary;
int i;
+ int vcnt;
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
break;
}
r1_bio->read_disk = primary;
+ vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
- int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
int size;
struct r10conf *conf = mddev->private;
int i, first;
struct bio *tbio, *fbio;
+ int vcnt;
atomic_set(&r10_bio->remaining, 1);
first = i;
fbio = r10_bio->devs[i].bio;
+ vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
/* now find blocks with errors */
for (i=0 ; i < conf->copies ; i++) {
int j, d;
- int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
tbio = r10_bio->devs[i].bio;
*/
for (i = 0; i < conf->copies; i++) {
int j, d;
- int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
tbio = r10_bio->devs[i].repl_bio;
if (!tbio || !tbio->bi_end_io)
.constraints = {
.name = "db8500-vape",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .always_on = true,
},
.consumer_supplies = db8500_vape_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
}
if (mtd->type == MTD_ABSENT) {
- put_mtd_device(mtd);
ret = -ENODEV;
- goto out;
+ goto out1;
}
mtd_ino = iget_locked(mnt->mnt_sb, devnum);
if (!mtd_ino) {
- put_mtd_device(mtd);
ret = -ENOMEM;
- goto out;
+ goto out1;
}
if (mtd_ino->i_state & I_NEW) {
mtd_ino->i_private = mtd;
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
- iput(mtd_ino);
- put_mtd_device(mtd);
ret = -EACCES;
- goto out;
+ goto out2;
}
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
- iput(mtd_ino);
- put_mtd_device(mtd);
ret = -ENOMEM;
- goto out;
+ goto out2;
}
mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
+ mutex_unlock(&mtd_mutex);
+ return 0;
+out2:
+ iput(mtd_ino);
+out1:
+ put_mtd_device(mtd);
out:
mutex_unlock(&mtd_mutex);
simple_release_fs(&mnt, &count);
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_flags & PS_WAIT_FOR_TX_ACK)
- goto unlock;
-
- if (sc->ps_idle)
+ if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
- PS_WAIT_FOR_PSPOLL_DATA)))
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK)))
mode = ATH9K_PM_NETWORK_SLEEP;
else
goto unlock;
set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
- /*
- * Register the extra components.
- */
- rt2x00rfkill_register(rt2x00dev);
-
return 0;
}
rt2x00link_register(rt2x00dev);
rt2x00leds_register(rt2x00dev);
rt2x00debug_register(rt2x00dev);
+ rt2x00rfkill_register(rt2x00dev);
return 0;
__le16 fc = hdr->frame_control;
txrate = ieee80211_get_tx_rate(hw, info);
- tcb_desc->hw_rate = txrate->hw_value;
+ if (txrate)
+ tcb_desc->hw_rate = txrate->hw_value;
+ else
+ tcb_desc->hw_rate = 0;
if (ieee80211_is_data(fc)) {
/*
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
ring = &rtlpci->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
- if (pskb)
+ if (pskb) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
+ (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
+ pskb->len, PCI_DMA_TODEVICE);
kfree_skb(pskb);
+ }
/*NB: the beacon data buffer must be 32-bit aligned. */
pskb = ieee80211_beacon_get(hw, mac->vif);
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- static int header_print;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
for (tid = 0; tid < 8; tid++)
skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
- /* Only load firmware for first MAC */
- if (header_print)
- return 0;
-
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
rtlpriv->max_fw_size = 0x8000;
pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
- header_print++;
/* request fw */
err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
return status;
}
-static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
+static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
u8 request;
u16 wvalue;
u16 index;
- u32 *data;
- u32 ret;
+ __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
- data = kmalloc(sizeof(u32), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- ret = le32_to_cpu(*data);
- kfree(data);
- return ret;
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ return le32_to_cpu(*data);
}
static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- struct device *dev = rtlpriv->io.dev;
-
- return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
+ return (u8)_usb_read_sync(rtlpriv, addr, 1);
}
static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- struct device *dev = rtlpriv->io.dev;
-
- return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
+ return (u16)_usb_read_sync(rtlpriv, addr, 2);
}
static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- struct device *dev = rtlpriv->io.dev;
-
- return _usb_read_sync(to_usb_device(dev), addr, 4);
+ return _usb_read_sync(rtlpriv, addr, 4);
}
static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
return -ENOMEM;
}
rtlpriv = hw->priv;
+ rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
+ GFP_KERNEL);
+ if (!rtlpriv->usb_data)
+ return -ENOMEM;
+ rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
SET_IEEE80211_DEV(hw, &intf->dev);
udev = interface_to_usbdev(intf);
/* rtl_deinit_rfkill(hw); */
rtl_usb_deinit(hw);
rtl_deinit_core(hw);
+ kfree(rtlpriv->usb_data);
rtlpriv->cfg->ops->deinit_sw_leds(hw);
rtlpriv->cfg->ops->deinit_sw_vars(hw);
_rtl_usb_io_handler_release(hw);
#define QOS_QUEUE_NUM 4
#define RTL_MAC80211_NUM_QUEUE 5
#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
-
+#define RTL_USB_MAX_RX_COUNT 100
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
interface or hardware */
unsigned long status;
+ /* data buffer pointer for USB reads */
+ __le32 *usb_data;
+ int usb_data_index;
+
/*This must be the last item so
that it points to the data allocated
beyond this structure like:
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- if (gpiospec->args[0] > gc->ngpio)
+ if (gpiospec->args[0] >= gc->ngpio)
return -EINVAL;
if (flags)
return 0;
}
+static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
+ u32 saved_val, int retry)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, offset, &val);
+ if (val == saved_val)
+ return;
+
+ for (;;) {
+ dev_dbg(&pdev->dev, "restoring config space at offset "
+ "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
+ pci_write_config_dword(pdev, offset, saved_val);
+ if (retry-- <= 0)
+ return;
+
+ pci_read_config_dword(pdev, offset, &val);
+ if (val == saved_val)
+ return;
+
+ mdelay(1);
+ }
+}
+
+static void pci_restore_config_space_range(struct pci_dev *pdev,
+ int start, int end, int retry)
+{
+ int index;
+
+ for (index = end; index >= start; index--)
+ pci_restore_config_dword(pdev, 4 * index,
+ pdev->saved_config_space[index],
+ retry);
+}
+
+static void pci_restore_config_space(struct pci_dev *pdev)
+{
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+ pci_restore_config_space_range(pdev, 10, 15, 0);
+ /* Restore BARs before the command register. */
+ pci_restore_config_space_range(pdev, 4, 9, 10);
+ pci_restore_config_space_range(pdev, 0, 3, 0);
+ } else {
+ pci_restore_config_space_range(pdev, 0, 15, 0);
+ }
+}
+
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
void pci_restore_state(struct pci_dev *dev)
{
- int i;
- u32 val;
- int tries;
-
if (!dev->state_saved)
return;
pci_restore_pcie_state(dev);
pci_restore_ats_state(dev);
- /*
- * The Base Address register should be programmed before the command
- * register(s)
- */
- for (i = 15; i >= 0; i--) {
- pci_read_config_dword(dev, i * 4, &val);
- tries = 10;
- while (tries && val != dev->saved_config_space[i]) {
- dev_dbg(&dev->dev, "restoring config "
- "space at offset %#x (was %#x, writing %#x)\n",
- i, val, (int)dev->saved_config_space[i]);
- pci_write_config_dword(dev,i * 4,
- dev->saved_config_space[i]);
- pci_read_config_dword(dev, i * 4, &val);
- mdelay(10);
- tries--;
- }
- }
+ pci_restore_config_space(dev);
+
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
pci_restore_iov_state(dev);
.name = "rtc-efi",
.owner = THIS_MODULE,
},
- .probe = efi_rtc_probe,
.remove = __exit_p(efi_rtc_remove),
};
dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
/* Enable the clockwatch on ST Variants */
- if ((ldata->hw_designer == AMBA_VENDOR_ST) &&
- (ldata->hw_revision > 1))
+ if (ldata->hw_designer == AMBA_VENDOR_ST)
writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
ldata->base + RTC_CR);
static int __devinit r9701_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
+ struct rtc_time dt;
unsigned char tmp;
int res;
return -ENODEV;
}
+ /*
+ * The device seems to be present. Now check if the registers
+ * contain invalid values. If so, try to write a default date:
+ * 2000/1/1 00:00:00
+ */
+ r9701_get_datetime(&spi->dev, &dt);
+ if (rtc_valid_tm(&dt)) {
+ dev_info(&spi->dev, "trying to repair invalid date/time\n");
+ dt.tm_sec = 0;
+ dt.tm_min = 0;
+ dt.tm_hour = 0;
+ dt.tm_mday = 1;
+ dt.tm_mon = 0;
+ dt.tm_year = 100;
+
+ if (r9701_set_datetime(&spi->dev, &dt)) {
+ dev_err(&spi->dev, "cannot repair RTC register\n");
+ return -ENODEV;
+ }
+ }
+
rtc = rtc_device_register("r9701",
&spi->dev, &r9701_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
TYPE_S3C64XX,
};
+struct s3c_rtc_drv_data {
+ int cpu_type;
+};
+
/* I have yet to find an S3C implementation with more than one
* of these rtc blocks in */
static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
{
#ifdef CONFIG_OF
+ struct s3c_rtc_drv_data *data;
if (pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
- return match->data;
+ data = (struct s3c_rtc_drv_data *) match->data;
+ return data->cpu_type;
}
#endif
return platform_get_device_id(pdev)->driver_data;
#define s3c_rtc_resume NULL
#endif
+static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
+ [TYPE_S3C2410] = { TYPE_S3C2410 },
+ [TYPE_S3C2416] = { TYPE_S3C2416 },
+ [TYPE_S3C2443] = { TYPE_S3C2443 },
+ [TYPE_S3C64XX] = { TYPE_S3C64XX },
+};
+
#ifdef CONFIG_OF
static const struct of_device_id s3c_rtc_dt_match[] = {
{
- .compatible = "samsung,s3c2410-rtc"
- .data = TYPE_S3C2410,
+ .compatible = "samsung,s3c2410-rtc",
+ .data = &s3c_rtc_drv_data_array[TYPE_S3C2410],
}, {
- .compatible = "samsung,s3c2416-rtc"
- .data = TYPE_S3C2416,
+ .compatible = "samsung,s3c2416-rtc",
+ .data = &s3c_rtc_drv_data_array[TYPE_S3C2416],
}, {
- .compatible = "samsung,s3c2443-rtc"
- .data = TYPE_S3C2443,
+ .compatible = "samsung,s3c2443-rtc",
+ .data = &s3c_rtc_drv_data_array[TYPE_S3C2443],
}, {
- .compatible = "samsung,s3c6410-rtc"
- .data = TYPE_S3C64XX,
+ .compatible = "samsung,s3c6410-rtc",
+ .data = &s3c_rtc_drv_data_array[TYPE_S3C64XX],
},
{},
};
#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10
#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20
#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40
+#define BIT_RTC_CTRL_REG_RTC_V_OPT 0x80
/* RTC_STATUS_REG bitfields */
#define BIT_RTC_STATUS_REG_RUN_M 0x02
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
u8 save_control;
+ u8 rtc_control;
ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret);
return ret;
+ }
+ /* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */
+ if (twl_class_is_6030()) {
+ if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) {
+ save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M;
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ if (ret < 0) {
+ dev_err(dev, "%s clr GET_TIME, error %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ }
- save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
+ /* Copy RTC counting registers to static registers or latches */
+ rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M;
- ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
- if (ret < 0)
+ /* for twl6030/32 enable read access to static shadowed registers */
+ if (twl_class_is_6030())
+ rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT;
+
+ ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG);
+ if (ret < 0) {
+ dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret);
return ret;
+ }
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
(rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
- dev_err(dev, "rtc_read_time error %d\n", ret);
+ dev_err(dev, "%s: reading data, error %d\n", __func__, ret);
return ret;
}
+ /* for twl6030 restore original state of rtc control register */
+ if (twl_class_is_6030()) {
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ if (ret < 0) {
+ dev_err(dev, "%s: restore CTRL_REG, error %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
tm->tm_sec = bcd2bin(rtc_data[0]);
tm->tm_min = bcd2bin(rtc_data[1]);
tm->tm_hour = bcd2bin(rtc_data[2]);
scsi_eh_restore_cmnd(scmd, &ses);
- if (sdrv->eh_action)
+ if (sdrv && sdrv->eh_action)
rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
return rtn;
dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
rx_buf_count);
if (t->tx_buf)
- dma_unmap_single(NULL, t->tx_dma, t->len,
+ dma_unmap_single(&spi->dev, t->tx_dma, t->len,
DMA_TO_DEVICE);
return -ENOMEM;
}
if (spicfg->io_type == SPI_IO_TYPE_DMA) {
if (t->tx_buf)
- dma_unmap_single(NULL, t->tx_dma, t->len,
+ dma_unmap_single(&spi->dev, t->tx_dma, t->len,
DMA_TO_DEVICE);
- dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
+ dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
DMA_FROM_DEVICE);
clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
+ struct fsl_spi_platform_data *pdata;
bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ pdata = spi->dev.parent->parent->platform_data;
+
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
pdata->cs_control(spi, !pol);
struct spi_bitbang bitbang;
struct completion xfer_done;
- void *base;
+ void __iomem *base;
int irq;
struct clk *clk;
unsigned long spi_clk;
}
ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
- if (ret < 0)
- num_cs = mxc_platform_info->num_chipselect;
+ if (ret < 0) {
+ if (mxc_platform_info)
+ num_cs = mxc_platform_info->num_chipselect;
+ else
+ return ret;
+ }
master = spi_alloc_master(&pdev->dev,
sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
for (i = 0; i < master->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
- if (cs_gpio < 0)
+ if (cs_gpio < 0 && mxc_platform_info)
cs_gpio = mxc_platform_info->chipselect[i];
spi_imx->chipselect[i] = cs_gpio;
struct pl022 *pl022 = dev_get_drvdata(dev);
clk_disable(pl022->clk);
- amba_vcore_disable(pl022->adev);
return 0;
}
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- amba_vcore_enable(pl022->adev);
clk_enable(pl022->clk);
return 0;
config ANDROID_PERSISTENT_RAM
bool
+ depends on HAVE_MEMBLOCK
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
config ANDROID_RAM_CONSOLE
bool "Android RAM buffer console"
- depends on !S390 && !UML
+ depends on !S390 && !UML && HAVE_MEMBLOCK
select ANDROID_PERSISTENT_RAM
default n
struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
{
struct persistent_ram_zone *prz;
- int ret;
+ int ret = -ENOMEM;
prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
if (!prz) {
pr_err("persistent_ram: failed to allocate persistent ram zone\n");
- return ERR_PTR(-ENOMEM);
+ goto err;
}
INIT_LIST_HEAD(&prz->node);
ret = persistent_ram_buffer_init(dev_name(dev), prz);
if (ret) {
pr_err("persistent_ram: failed to initialize buffer\n");
- return ERR_PTR(ret);
+ goto err;
}
prz->ecc = ecc;
ret = persistent_ram_init_ecc(prz, prz->buffer_size);
if (ret)
- return ERR_PTR(ret);
+ goto err;
if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
if (buffer_size(prz) > prz->buffer_size ||
atomic_set(&prz->buffer->size, 0);
return prz;
+err:
+ kfree(prz);
+ return ERR_PTR(ret);
}
struct persistent_ram_zone * __init
struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
struct timed_gpio *cur_gpio;
struct timed_gpio_data *gpio_data, *gpio_dat;
- int i, j, ret = 0;
+ int i, ret;
if (!pdata)
return -EBUSY;
gpio_dat->dev.get_time = gpio_get_time;
gpio_dat->dev.enable = gpio_enable;
ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
- if (ret >= 0) {
- ret = timed_output_dev_register(&gpio_dat->dev);
- if (ret < 0)
- gpio_free(cur_gpio->gpio);
- }
+ if (ret < 0)
+ goto err_out;
+ ret = timed_output_dev_register(&gpio_dat->dev);
if (ret < 0) {
- for (j = 0; j < i; j++) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
- kfree(gpio_data);
- return ret;
+ gpio_free(cur_gpio->gpio);
+ goto err_out;
}
gpio_dat->gpio = cur_gpio->gpio;
platform_set_drvdata(pdev, gpio_data);
return 0;
+
+err_out:
+ while (--i >= 0) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+ kfree(gpio_data);
+
+ return ret;
}
static int timed_gpio_remove(struct platform_device *pdev)
ret = -ENODEV;
goto error_ret;
}
+ i++;
}
error_ret:
mutex_unlock(&iio_map_list_lock);
static int ak8975_write_data(struct i2c_client *client,
u8 reg, u8 val, u8 mask, u8 shift)
{
- struct ak8975_data *data = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
u8 regval;
int ret;
*/
static int ak8975_setup(struct i2c_client *client)
{
- struct ak8975_data *data = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
u8 device_id;
int ret;
goto exit_gpio;
}
data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
/* Perform some basic start-of-day setup of the device. */
err = ak8975_setup(client);
if (err < 0) {
goto exit_free_iio;
}
- i2c_set_clientdata(client, indio_dev);
data->client = client;
mutex_init(&data->lock);
data->eoc_irq = client->irq;
/* Called when we have found a new HMC5843. */
static void hmc5843_init_client(struct i2c_client *client)
{
- struct hmc5843_data *data = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct hmc5843_data *data = iio_priv(indio_dev);
+
hmc5843_set_meas_conf(client, data->meas_conf);
hmc5843_set_rate(client, data->rate);
hmc5843_configure(client, data->operating_mode);
int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap)
{
int errno = -EFAULT;
- const struct firmware *firmware;
+ const struct firmware *firmware = NULL;
unsigned char *cmd_buf = NULL;
char *fw1, *fw2;
struct usb_device *dev = bus_adap->usb_dev;
static int pdev_probe(struct platform_device *device)
{
DBG("%s", device->name);
- if (platform_driver_register(&omap_dmm_driver))
- dev_err(&device->dev, "DMM registration failed\n");
-
return drm_platform_init(&omap_drm_driver, device);
}
static int __init omap_drm_init(void)
{
DBG("init");
+ if (platform_driver_register(&omap_dmm_driver)) {
+ /* we can continue on without DMM.. so not fatal */
+ dev_err(NULL, "DMM registration failed\n");
+ }
return platform_driver_register(&pdev);
}
- code review by USB developer community.
- testing with as many devices as possible.
-Please send any patches for this driver to Chris Kelly <ckelly@ozmodevices.com>
+Please send any patches for this driver to
+Rupesh Gujare <rgujare@ozmodevices.com>
+Chris Kelly <ckelly@ozmodevices.com>
and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
-# Dependency on CONFIG_BROKEN is because there is a commit dependency
-# on a cleancache naming change to be submitted by Konrad Wilk
-# a39c00ded70339603ffe1b0ffdf3ade85bcf009a "Merge branch 'stable/cleancache.v13'
-# into linux-next. Once this commit is present, BROKEN can be removed
config RAMSTER
bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
- depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && BROKEN
+ depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
log_blk++;
- for (seg_no = 0; seg_no < sizeof(ms_start_idx)/2; seg_no++) {
+ for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
+ seg_no++) {
if (log_blk < ms_start_idx[seg_no+1])
break;
}
rtsx_init_chip(dev->chip);
+ /* set the supported max_lun and max_id for the scsi host
+ * NOTE: the minimal value of max_id is 1 */
+ host->max_id = 1;
+ host->max_lun = dev->chip->max_lun;
+
/* Start up our control thread */
th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
if (IS_ERR(th)) {
int sg_cnt, i, resid;
int err = 0;
long timeleft;
+ struct scatterlist *sg_ptr;
u32 val = TRIG_DMA;
if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
resid = size;
-
+ sg_ptr = sg;
chip->sgi = 0;
/* Usually the next entry will be @sg@ + 1, but if this sg element
* is part of a chained scatterlist, it could jump to the start of
* the proper sg
*/
for (i = 0; i < *index; i++)
- sg = sg_next(sg);
+ sg_ptr = sg_next(sg_ptr);
for (i = *index; i < sg_cnt; i++) {
dma_addr_t addr;
unsigned int len;
u8 option;
- addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
+ addr = sg_dma_address(sg_ptr);
+ len = sg_dma_len(sg_ptr);
RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
if (!resid)
break;
- sg = sg_next(sg);
+ sg_ptr = sg_next(sg_ptr);
}
RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
current->pid);
if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&call_status->status)) {
- dev_warn(&sep->pdev->dev,
+ dev_dbg(&sep->pdev->dev,
"[PID%d] dcb prep needed before send msg\n",
current->pid);
error = -EPROTO;
}
if (!arg) {
- dev_warn(&sep->pdev->dev,
+ dev_dbg(&sep->pdev->dev,
"[PID%d] dcb null arg\n", current->pid);
- error = EINVAL;
+ error = -EINVAL;
goto end_function;
}
static int vector_num;
static int level[PIO2_CARDS_MAX];
static int level_num;
-static const char *variant[PIO2_CARDS_MAX];
+static char *variant[PIO2_CARDS_MAX];
static int variant_num;
-static int loopback;
+static bool loopback;
static int pio2_match(struct vme_dev *);
static int __devinit pio2_probe(struct vme_dev *);
return (false);
}
+ if (uKeyLength > MAX_KEY_LEN)
+ return false;
+
pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = true;
for(ii=0;ii<ETH_ALEN;ii++)
pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
result = -ENOMEM;
break;
}
- pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
+ pNodeList = kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
break;
}
}
if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
+ kfree(pNodeList);
result = -EFAULT;
break;
}
return (FALSE);
}
+ if (uKeyLength > MAX_KEY_LEN)
+ return false;
+
pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
for (ii = 0; ii < ETH_ALEN; ii++)
pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
}
temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
- if ((temp & 0x88) == 0x80)
+ if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
data = 0; /* DDR */
else
data = 1; /* DDRII */
pVBInfo->pXGINew_CR97 = &XG20_CR97;
if (ChipType == XG27) {
+ unsigned char temp;
pVBInfo->MCLKData
= (struct SiS_MCLKData *) XGI27New_MCLKData;
pVBInfo->CR40 = XGI27_cr41;
pVBInfo->pCRDE = XG27_CRDE;
pVBInfo->pSR40 = &XG27_SR40;
pVBInfo->pSR41 = &XG27_SR41;
+ pVBInfo->SR15 = XG27_SR13;
+ /*Z11m DDR*/
+ temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
+ /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
+ if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
+ pVBInfo->pXGINew_CR97 = &Z11m_CR97;
}
if (ChipType >= XG20) {
{0x5c, 0x23, 0x01, 166}
};
+static unsigned char XG27_SR13[4][8] = {
+ {0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
+ {0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
+ {0x32, 0x32, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR18 */
+ {0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00} /* SR1B */
+};
+
static unsigned char XGI340_SR13[4][8] = {
{0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
{0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
{0x20, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 0 CR41 */
{0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 1 CR8A */
{0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 2 CR8B */
- {0xB5, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7],
+ {0xB3, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7],
CR99[2:0],
CR45[3:0]*/
{0xf0, 0xf5, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 4 CR59 */
static unsigned char XG27_SR40 = 0x04 ;
static unsigned char XG27_SR41 = 0x00 ;
+static unsigned char Z11m_CR97 = 0x80 ;
+
static struct XGI330_VCLKDataStruct XGI_VCLKData[] = {
/* SR2B,SR2C,SR2D */
{0x1B, 0xE1, 25}, /* 00 (25.175MHz) */
return off + obj_idx * class_size;
}
+static void reset_page(struct page *page)
+{
+ clear_bit(PG_private, &page->flags);
+ clear_bit(PG_private_2, &page->flags);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+ page->freelist = NULL;
+ reset_page_mapcount(page);
+}
+
static void free_zspage(struct page *first_page)
{
- struct page *nextp, *tmp;
+ struct page *nextp, *tmp, *head_extra;
BUG_ON(!is_first_page(first_page));
BUG_ON(first_page->inuse);
- nextp = (struct page *)page_private(first_page);
+ head_extra = (struct page *)page_private(first_page);
- clear_bit(PG_private, &first_page->flags);
- clear_bit(PG_private_2, &first_page->flags);
- set_page_private(first_page, 0);
- first_page->mapping = NULL;
- first_page->freelist = NULL;
- reset_page_mapcount(first_page);
+ reset_page(first_page);
__free_page(first_page);
/* zspage with only 1 system page */
- if (!nextp)
+ if (!head_extra)
return;
- list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) {
+ list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
list_del(&nextp->lru);
- clear_bit(PG_private_2, &nextp->flags);
- nextp->index = 0;
+ reset_page(nextp);
__free_page(nextp);
}
+ reset_page(head_extra);
+ __free_page(head_extra);
}
/* Initialize a newly allocated zspage */
do {
struct uart_8250_port *up;
struct uart_port *port;
- bool skip;
up = list_entry(l, struct uart_8250_port, list);
port = &up->port;
- skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
- if (!skip && port->handle_irq(port)) {
+ if (port->handle_irq(port)) {
handled = 1;
end = NULL;
} else if (end == NULL)
spin_unlock_irqrestore(&port->lock, flags);
/*
- * If the interrupt is not reasserted, setup a timer to
- * kick the UART on a regular basis.
+ * If the interrupt is not reasserted, or we otherwise
+ * don't trust the iir, setup a timer to kick the UART
+ * on a regular basis.
*/
- if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
+ if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
+ up->port.flags & UPF_BUG_THRE) {
up->bugs |= UART_BUG_THRE;
pr_debug("ttyS%d - using backup timer\n",
serial_index(port));
const struct pciserial_board *board,
struct uart_port *port, int idx)
{
- port->flags |= UPF_IIR_ONCE;
+ port->flags |= UPF_BUG_THRE;
return skip_tx_en_setup(priv, board, port, idx);
}
return pci_default_setup(priv, board, port, idx);
}
-static int try_enable_msi(struct pci_dev *dev)
-{
- /* use msi if available, but fallback to legacy otherwise */
- pci_enable_msi(dev);
- return 0;
-}
-
-static void disable_msi(struct pci_dev *dev)
-{
- pci_disable_msi(dev);
-}
-
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
.device = PCI_DEVICE_ID_INTEL_PATSBURG_KT,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .init = try_enable_msi,
.setup = kt_serial_setup,
- .exit = disable_msi,
},
/*
* ITE
config SERIAL_OMAP_CONSOLE
bool "Console on OMAP serial port"
- depends on SERIAL_OMAP
+ depends on SERIAL_OMAP=y
select SERIAL_CORE_CONSOLE
help
Select this option if you would like to use omap serial port as
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res_mem)
port->mapbase = res_mem->start;
- else if (platp->mapbase)
+ else if (platp)
port->mapbase = platp->mapbase;
else
return -EINVAL;
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res_irq)
port->irq = res_irq->start;
- else if (platp->irq)
+ else if (platp)
port->irq = platp->irq;
/* Check platform data first so we can override device node data */
goto unmap;
}
- /* Ensure interrupts from this UART are masked and cleared */
- writew(0, uap->port.membase + UART011_IMSC);
- writew(0xffff, uap->port.membase + UART011_ICR);
-
uap->vendor = vendor;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->port.line = i;
pl011_dma_probe(uap);
+ /* Ensure interrupts from this UART are masked and cleared */
+ writew(0, uap->port.membase + UART011_IMSC);
+ writew(0xffff, uap->port.membase + UART011_ICR);
+
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
amba_ports[i] = uap;
{
UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
+ UART_PUT_CR(port, ATMEL_US_RXEN);
+
if (atmel_use_dma_rx(port)) {
/* enable PDC controller */
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
*/
static void atmel_stop_rx(struct uart_port *port)
{
+ UART_PUT_CR(port, ATMEL_US_RXDIS);
+
if (atmel_use_dma_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
return -ENODEV;
}
- if (!request_mem_region(mem->start, resource_size(mem),
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
pdev->dev.driver->name)) {
dev_err(&pdev->dev, "memory region already claimed\n");
return -EBUSY;
}
dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (!dma_rx) {
- ret = -EINVAL;
- goto err;
- }
+ if (!dma_rx)
+ return -ENXIO;
dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (!dma_tx) {
- ret = -EINVAL;
- goto err;
- }
+ if (!dma_tx)
+ return -ENXIO;
+
+ up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
- up = kzalloc(sizeof(*up), GFP_KERNEL);
- if (up == NULL) {
- ret = -ENOMEM;
- goto do_release_region;
- }
up->pdev = pdev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
up->port.line);
ret = -ENODEV;
- goto err;
+ goto err_port_line;
}
sprintf(up->name, "OMAP UART%d", up->port.line);
up->port.mapbase = mem->start;
- up->port.membase = ioremap(mem->start, resource_size(mem));
+ up->port.membase = devm_ioremap(&pdev->dev, mem->start,
+ resource_size(mem));
if (!up->port.membase) {
dev_err(&pdev->dev, "can't ioremap UART\n");
ret = -ENOMEM;
- goto err;
+ goto err_ioremap;
}
up->port.flags = omap_up_info->flags;
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
- goto do_release_region;
+ goto err_add_port;
pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, up);
return 0;
-err:
+
+err_add_port:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+err_ioremap:
+err_port_line:
dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
pdev->id, __func__, ret);
-do_release_region:
- release_mem_region(mem->start, resource_size(mem));
return ret;
}
pm_runtime_disable(&up->pdev->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
pm_qos_remove_request(&up->pm_qos_request);
-
- kfree(up);
}
platform_set_drvdata(dev, NULL);
#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */
#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */
#define FRI2_48_UARTCLK 48000000 /* 48.0000 MHz */
+#define NTC1_UARTCLK 64000000 /* 64.0000 MHz */
struct pch_uart_buffer {
unsigned char *buf;
if (cmp && strstr(cmp, "Fish River Island II"))
return FRI2_48_UARTCLK;
+ /* Kontron COMe-mTT10 (nanoETXexpress-TT) */
+ cmp = dmi_get_system_info(DMI_BOARD_NAME);
+ if (cmp && (strstr(cmp, "COMe-mTT") ||
+ strstr(cmp, "nanoETXexpress-TT")))
+ return NTC1_UARTCLK;
+
return DEFAULT_UARTCLK;
}
}
pci_enable_msi(pdev);
+ pci_set_master(pdev);
iobase = pci_resource_start(pdev, 0);
mapbase = pci_resource_start(pdev, 1);
ucon &= ucon_mask;
wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+ wr_regl(port, S3C2410_ULCON, cfg->ulcon);
/* reset both fifos */
wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
gotoxy(vc, vc->vc_x, vc->vc_y);
csi_J(vc, 0);
update_screen(vc);
- pr_info("Console: %s %s %dx%d",
+ pr_info("Console: %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
display_desc, vc->vc_cols, vc->vc_rows);
printable = 1;
- printk("\n");
console_unlock();
# USB device configuration
#
-menuconfig USB_SUPPORT
- bool "USB support"
- depends on HAS_IOMEM
- default y
- ---help---
- This option adds core support for Universal Serial Bus (USB).
- You will also need drivers from the following menu to make use of it.
-
# many non-PCI SOC chips embed OHCI
config USB_ARCH_HAS_OHCI
boolean
boolean
default PCI
+menuconfig USB_SUPPORT
+ bool "USB support"
+ depends on HAS_IOMEM
+ default y
+ ---help---
+ This option adds core support for Universal Serial Bus (USB).
+ You will also need drivers from the following menu to make use of it.
+
if USB_SUPPORT
config USB_COMMON
if (status == 0) {
status = usb_suspend_device(udev, msg);
- /* Again, ignore errors during system sleep transitions */
- if (!PMSG_IS_AUTO(msg))
+ /*
+ * Ignore errors from non-root-hub devices during
+ * system sleep transitions. For the most part,
+ * these devices should go to low power anyway when
+ * the entire bus is suspended.
+ */
+ if (udev->parent && !PMSG_IS_AUTO(msg))
status = 0;
}
if (status == 0) {
usb_set_device_state(rhdev, USB_STATE_SUSPENDED);
hcd->state = HC_STATE_SUSPENDED;
+
+ /* Did we race with a root-hub wakeup event? */
+ if (rhdev->do_remote_wakeup) {
+ char buffer[6];
+
+ status = hcd->driver->hub_status_data(hcd, buffer);
+ if (status != 0) {
+ dev_dbg(&rhdev->dev, "suspend raced with wakeup event\n");
+ hcd_bus_resume(rhdev, PMSG_AUTO_RESUME);
+ status = -EBUSY;
+ }
+ }
} else {
spin_lock_irq(&hcd_root_hub_lock);
if (!HCD_DEAD(hcd)) {
if (retval)
goto fail;
+ /*
+ * Some superspeed devices have finished the link training process
+ * and attached to a superspeed hub port, but the device descriptor
+ * got from those devices show they aren't superspeed devices. Warm
+ * reset the port attached by the devices can fix them.
+ */
+ if ((udev->speed == USB_SPEED_SUPER) &&
+ (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
+ dev_err(&udev->dev, "got a wrong device descriptor, "
+ "warm reset device\n");
+ hub_port_reset(hub, port1, udev,
+ HUB_BH_RESET_TIME, true);
+ retval = -EINVAL;
+ goto fail;
+ }
+
if (udev->descriptor.bMaxPacketSize0 == 0xff ||
udev->speed == USB_SPEED_SUPER)
i = 512;
retval = usb_unlink_urb(io->urbs [i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
- retval != -EBUSY)
+ retval != -EBUSY &&
+ retval != -EIDRM)
dev_err(&io->dev->dev,
"%s, unlink --> %d\n",
__func__, retval);
}
spin_lock(&io->lock);
}
- urb->dev = NULL;
/* on the last completion, signal usb_sg_wait() */
io->bytes += urb->actual_length;
case -ENXIO: /* hc didn't queue this one */
case -EAGAIN:
case -ENOMEM:
- io->urbs[i]->dev = NULL;
retval = 0;
yield();
break;
/* fail any uncompleted urbs */
default:
- io->urbs[i]->dev = NULL;
io->urbs[i]->status = retval;
dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
__func__, retval);
if (!io->urbs [i]->dev)
continue;
retval = usb_unlink_urb(io->urbs [i]);
- if (retval != -EINPROGRESS && retval != -EBUSY)
+ if (retval != -EINPROGRESS
+ && retval != -ENODEV
+ && retval != -EBUSY
+ && retval != -EIDRM)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
* never submitted, or it was unlinked before, or the hardware is already
* finished with it), even if the completion handler has not yet run.
*
+ * The URB must not be deallocated while this routine is running. In
+ * particular, when a driver calls this routine, it must insure that the
+ * completion handler cannot deallocate the URB.
+ *
* Unlinking and Endpoint Queues:
*
* [The behaviors and guarantees described below do not apply to virtual
* with error -EPERM. Thus even if the URB's completion handler always
* tries to resubmit, it will not succeed and the URB will become idle.
*
+ * The URB must not be deallocated while this routine is running. In
+ * particular, when a driver calls this routine, it must insure that the
+ * completion handler cannot deallocate the URB.
+ *
* This routine may not be used in an interrupt context (such as a bottom
* half or a completion handler), or when holding a spinlock, or in other
* situations where the caller can't schedule().
* with error -EPERM. Thus even if the URB's completion handler always
* tries to resubmit, it will not succeed and the URB will become idle.
*
+ * The URB must not be deallocated while this routine is running. In
+ * particular, when a driver calls this routine, it must insure that the
+ * completion handler cannot deallocate the URB.
+ *
* This routine may not be used in an interrupt context (such as a bottom
* half or a completion handler), or when holding a spinlock, or in other
* situations where the caller can't schedule().
DBG (dev, "%s %d\n", __func__, dev->state);
/* dev->state must prevent interference */
-restart:
spin_lock_irq (&dev->lock);
while (!list_empty(&dev->epfiles)) {
struct ep_data *ep;
if (ehci->debug)
dbgp_external_startup();
+ ehci->port_c_suspend = ehci->suspended_ports =
+ ehci->resuming_ports = 0;
return retval;
}
* like usb_port_resume() does.
*/
ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
+ set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
* remote wakeup, we must fail the suspend.
*/
if (hcd->self.root_hub->do_remote_wakeup) {
- port = HCS_N_PORTS(ehci->hcs_params);
- while (port--) {
- if (ehci->reset_done[port] != 0) {
- spin_unlock_irq(&ehci->lock);
- ehci_dbg(ehci, "suspend failed because "
- "port %d is resuming\n",
- port + 1);
- return -EBUSY;
- }
+ if (ehci->resuming_ports) {
+ spin_unlock_irq(&ehci->lock);
+ ehci_dbg(ehci, "suspend failed because a port is resuming\n");
+ return -EBUSY;
}
}
ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- u32 temp, status = 0;
+ u32 temp, status;
u32 mask;
int ports, i, retval = 1;
unsigned long flags;
u32 ppcd = 0;
- /* if !USB_SUSPEND, root hub timers won't get shut down ... */
- if (ehci->rh_state != EHCI_RH_RUNNING)
- return 0;
-
/* init status to no-changes */
buf [0] = 0;
ports = HCS_N_PORTS (ehci->hcs_params);
retval++;
}
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = ehci->resuming_ports;
+
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
+ clear_bit(wIndex, &ehci->resuming_ports);
retval = handshake(ehci, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
ehci->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
+ clear_bit(wIndex, &ehci->resuming_ports);
/* force reset to complete */
ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
ehci_readl(ehci, status_reg));
}
- if (!(temp & (PORT_RESUME|PORT_RESET)))
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
ehci->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &ehci->resuming_ports);
+ }
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
+ clear_bit(wIndex, &ehci->resuming_ports);
ehci->reset_done[wIndex] = 0;
if (temp & PORT_PE)
set_bit(wIndex, &ehci->port_c_suspend);
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
/* start resume signalling */
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ set_bit(wIndex-1, &ehci->resuming_ports);
spin_unlock_irqrestore(&ehci->lock, flags);
msleep(20);
pr_err("%s: timeout waiting for SUSPEND\n", __func__);
ehci->reset_done[wIndex-1] = 0;
+ clear_bit(wIndex-1, &ehci->resuming_ports);
tegra->port_resuming = 1;
goto done;
the change-suspend feature turned on */
unsigned long suspended_ports; /* which ports are
suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
}
}
- /* Disable any BIOS SMIs */
- writel(XHCI_LEGACY_DISABLE_SMI,
- base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ /* Mask off (turn off) any enabled SMIs */
+ val &= XHCI_LEGACY_DISABLE_SMI;
+ /* Mask all SMI events bits, RW1C */
+ val |= XHCI_LEGACY_SMI_EVENTS;
+ /* Disable any BIOS SMIs and clear all SMI events*/
+ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
if (usb_is_intel_switchable_xhci(pdev))
usb_enable_xhci_ports(pdev);
status = get_hub_status_data(uhci, buf);
switch (uhci->rh_state) {
- case UHCI_RH_SUSPENDING:
case UHCI_RH_SUSPENDED:
/* if port change, ask to be resumed */
- if (status || uhci->resuming_ports)
+ if (status || uhci->resuming_ports) {
+ status = 1;
usb_hcd_resume_root_hub(hcd);
+ }
break;
case UHCI_RH_AUTO_STOPPED:
xhci_dbg(xhci, " Event Interrupts %s\n",
(temp & CMD_EIE) ? "enabled " : "disabled");
xhci_dbg(xhci, " Host System Error Interrupts %s\n",
- (temp & CMD_EIE) ? "enabled " : "disabled");
+ (temp & CMD_HSEIE) ? "enabled " : "disabled");
xhci_dbg(xhci, " HC has %sfinished light reset\n",
(temp & CMD_LRESET) ? "not " : "");
}
/* USB Legacy Support Control and Status Register - section 7.1.2 */
/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
-/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
-#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17))
+#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29)
/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
#define XHCI_L1C (1 << 16)
int i;
/* Free the Event Ring Segment Table and the actual Event Ring */
- if (xhci->ir_set) {
- xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
- xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
- xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
- }
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
dma_free_coherent(&pdev->dev, size,
xhci->event_ring = NULL;
xhci_dbg(xhci, "Freed event ring\n");
- xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci->medium_streams_pool = NULL;
xhci_dbg(xhci, "Freed medium stream array pool\n");
- xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
if (xhci->dcbaa)
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
fail:
xhci_warn(xhci, "Couldn't initialize memory\n");
+ xhci_halt(xhci);
+ xhci_reset(xhci);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
}
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
}
/* called during probe() after chip reset completes */
return pci_register_driver(&xhci_pci_driver);
}
-void __exit xhci_unregister_pci(void)
+void xhci_unregister_pci(void)
{
pci_unregister_driver(&xhci_pci_driver);
}
u32 irq_pending;
/* Acknowledge the PCI interrupt */
irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- irq_pending |= 0x3;
+ irq_pending |= IMAN_IP;
xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
}
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
- return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+ return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/*
}
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
- return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+ return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/**** Command Ring Operations ****/
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
if (!ret)
xhci->xhc_state |= XHCI_STATE_HALTED;
+ else
+ xhci_warn(xhci, "Host not halted after %u microseconds.\n",
+ XHCI_MAX_HALT_USEC);
return ret;
}
xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
- xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+ xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
- xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
- xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+ xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+ xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
#define CMD_PM_INDEX (1 << 11)
/* bits 12:31 are reserved (and should be preserved on writes). */
+/* IMAN - Interrupt Management Register */
+#define IMAN_IP (1 << 1)
+#define IMAN_IE (1 << 0)
+
/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
#define STS_HALT XHCI_STS_HALT
retval = -ENODEV;
goto exit;
}
- if (port->dev_state != PORT_REGISTERING)
- goto exit;
driver = port->serial->type;
if (driver->port_probe) {
if (!port)
return -ENODEV;
- if (port->dev_state != PORT_UNREGISTERING)
- return retval;
-
device_remove_file(&port->dev, &dev_attr_port_number);
driver = port->serial->type;
unsigned long last_dtr_rts; /* saved modem control outputs */
struct async_icount icount;
wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
- char prev_status, diff_status; /* Used for TIOCMIWAIT */
+ char prev_status; /* Used for TIOCMIWAIT */
+ bool dev_gone; /* Used to abort TIOCMIWAIT */
char transmit_empty; /* If transmitter is empty or not */
struct usb_serial_port *port;
__u16 interface; /* FT2232C, FT2232H or FT4232H port interface
init_waitqueue_head(&priv->delta_msr_wait);
priv->flags = ASYNC_LOW_LATENCY;
+ priv->dev_gone = false;
if (quirk && quirk->port_probe)
quirk->port_probe(priv);
dbg("%s", __func__);
+ priv->dev_gone = true;
+ wake_up_interruptible_all(&priv->delta_msr_wait);
+
remove_sysfs_attrs(port);
kref_put(&priv->kref, ftdi_sio_priv_release);
N.B. packet may be processed more than once, but differences
are only processed once. */
status = packet[0] & FTDI_STATUS_B0_MASK;
- if (status & FTDI_RS0_CTS)
- priv->icount.cts++;
- if (status & FTDI_RS0_DSR)
- priv->icount.dsr++;
- if (status & FTDI_RS0_RI)
- priv->icount.rng++;
- if (status & FTDI_RS0_RLSD)
- priv->icount.dcd++;
if (status != priv->prev_status) {
- priv->diff_status |= status ^ priv->prev_status;
- wake_up_interruptible(&priv->delta_msr_wait);
+ char diff_status = status ^ priv->prev_status;
+
+ if (diff_status & FTDI_RS0_CTS)
+ priv->icount.cts++;
+ if (diff_status & FTDI_RS0_DSR)
+ priv->icount.dsr++;
+ if (diff_status & FTDI_RS0_RI)
+ priv->icount.rng++;
+ if (diff_status & FTDI_RS0_RLSD)
+ priv->icount.dcd++;
+
+ wake_up_interruptible_all(&priv->delta_msr_wait);
priv->prev_status = status;
}
*/
case TIOCMIWAIT:
cprev = priv->icount;
- while (1) {
+ while (!priv->dev_gone) {
interruptible_sleep_on(&priv->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
cnow = priv->icount;
- if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
- cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
- return -EIO; /* no change => error */
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
}
cprev = cnow;
}
- /* not reached */
+ return -EIO;
break;
case TIOCSERGETLSR:
return get_lsr_info(port, (struct serial_struct __user *)arg);
/* Product information. */
#define FOCUS_VENDOR_ID 0x0C2E
-#define FOCUS_PRODUCT_ID 0x0720
-#define FOCUS_PRODUCT_ID_UNI 0x0710
+#define FOCUS_PRODUCT_ID_BI 0x0720
+#define FOCUS_PRODUCT_ID_UNI 0x0700
#define METROUSB_SET_REQUEST_TYPE 0x40
#define METROUSB_SET_MODEM_CTRL_REQUEST 10
/* Device table list. */
static struct usb_device_id id_table[] = {
- { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID) },
+ { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
{ }, /* Terminating entry. */
};
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
- else
+ else if ((old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
{ USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
{ USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
{ USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
+ { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
/* Sierra Wireless C885 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
/* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
serial->attached = 1;
}
+ /* Avoid race with tty_open and serial_install by setting the
+ * disconnected flag and not clearing it until all ports have been
+ * registered.
+ */
+ serial->disconnected = 1;
+
if (get_free_serial(serial, num_ports, &minor) == NULL) {
dev_err(&interface->dev, "No more free serial devices\n");
goto probe_error;
port = serial->port[i];
dev_set_name(&port->dev, "ttyUSB%d", port->number);
dbg ("%s - registering %s", __func__, dev_name(&port->dev));
- port->dev_state = PORT_REGISTERING;
device_enable_async_suspend(&port->dev);
retval = device_add(&port->dev);
- if (retval) {
+ if (retval)
dev_err(&port->dev, "Error registering port device, "
"continuing\n");
- port->dev_state = PORT_UNREGISTERED;
- } else {
- port->dev_state = PORT_REGISTERED;
- }
}
+ serial->disconnected = 0;
+
usb_serial_console_init(debug, minor);
exit:
}
kill_traffic(port);
cancel_work_sync(&port->work);
- if (port->dev_state == PORT_REGISTERED) {
-
- /* Make sure the port is bound so that the
- * driver's port_remove method is called.
- */
- if (!port->dev.driver) {
- int rc;
-
- port->dev.driver =
- &serial->type->driver;
- rc = device_bind_driver(&port->dev);
- }
- port->dev_state = PORT_UNREGISTERING;
+ if (device_is_registered(&port->dev))
device_del(&port->dev);
- port->dev_state = PORT_UNREGISTERED;
- }
}
}
serial->type->disconnect(serial);
#undef COMPLIANT_DEV
#undef USUAL_DEV
+#ifdef CONFIG_LOCKDEP
+
+static struct lock_class_key us_interface_key[USB_MAXINTERFACES];
+
+static void us_set_lock_class(struct mutex *mutex,
+ struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_host_config *config = udev->actconfig;
+ int i;
+
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
+ if (config->interface[i] == intf)
+ break;
+ }
+
+ BUG_ON(i == config->desc.bNumInterfaces);
+
+ lockdep_set_class(mutex, &us_interface_key[i]);
+}
+
+#else
+
+static void us_set_lock_class(struct mutex *mutex,
+ struct usb_interface *intf)
+{
+}
+
+#endif
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
*pus = us = host_to_us(host);
memset(us, 0, sizeof(struct us_data));
mutex_init(&(us->dev_mutex));
+ us_set_lock_class(&us->dev_mutex, intf);
init_completion(&us->cmnd_ready);
init_completion(&(us->notify));
init_waitqueue_head(&us->delay_wait);
vhost_test_stop(n, &private);
vhost_test_flush(n);
- vhost_dev_cleanup(&n->dev);
+ vhost_dev_cleanup(&n->dev, false);
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_test_flush(n);
au1100fb_fix.mmio_start = regs_res->start;
au1100fb_fix.mmio_len = resource_size(regs_res);
- if (!devm_request_mem_region(au1100fb_fix.mmio_start,
+ if (!devm_request_mem_region(&dev->dev,
+ au1100fb_fix.mmio_start,
au1100fb_fix.mmio_len,
DRIVER_NAME)) {
print_err("fail to lock memory region at 0x%08lx",
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
- fbdev->fb_mem = dmam_alloc_coherent(&dev->dev, &dev->dev,
+ fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
- fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev, &dev->dev,
+ fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
/* Register Table */
typedef struct {
/* 0h */
- volatile unsigned long Thread0Enable; /* 0x0000 */
- volatile unsigned long Thread1Enable; /* 0x0004 */
- volatile unsigned long Thread0Recover; /* 0x0008 */
- volatile unsigned long Thread1Recover; /* 0x000C */
- volatile unsigned long Thread0Step; /* 0x0010 */
- volatile unsigned long Thread1Step; /* 0x0014 */
- volatile unsigned long VideoInStatus; /* 0x0018 */
- volatile unsigned long Core2InSignStart; /* 0x001C */
- volatile unsigned long Core1ResetVector; /* 0x0020 */
- volatile unsigned long Core1ROMOffset; /* 0x0024 */
- volatile unsigned long Core1ArbiterPriority; /* 0x0028 */
- volatile unsigned long VideoInControl; /* 0x002C */
- volatile unsigned long VideoInReg0CtrlA; /* 0x0030 */
- volatile unsigned long VideoInReg0CtrlB; /* 0x0034 */
- volatile unsigned long VideoInReg1CtrlA; /* 0x0038 */
- volatile unsigned long VideoInReg1CtrlB; /* 0x003C */
- volatile unsigned long Thread0Kicker; /* 0x0040 */
- volatile unsigned long Core2InputSign; /* 0x0044 */
- volatile unsigned long Thread0ProgCtr; /* 0x0048 */
- volatile unsigned long Thread1ProgCtr; /* 0x004C */
- volatile unsigned long Thread1Kicker; /* 0x0050 */
- volatile unsigned long GPRegister1; /* 0x0054 */
- volatile unsigned long GPRegister2; /* 0x0058 */
- volatile unsigned long GPRegister3; /* 0x005C */
- volatile unsigned long GPRegister4; /* 0x0060 */
- volatile unsigned long SerialIntA; /* 0x0064 */
-
- volatile unsigned long Fill0[6]; /* GAP 0x0068 - 0x007C */
-
- volatile unsigned long SoftwareReset; /* 0x0080 */
- volatile unsigned long SerialIntB; /* 0x0084 */
-
- volatile unsigned long Fill1[37]; /* GAP 0x0088 - 0x011C */
-
- volatile unsigned long ROMELQV; /* 0x011C */
- volatile unsigned long WLWH; /* 0x0120 */
- volatile unsigned long ROMELWL; /* 0x0124 */
-
- volatile unsigned long dwFill_1; /* GAP 0x0128 */
-
- volatile unsigned long IntStatus; /* 0x012C */
- volatile unsigned long IntMask; /* 0x0130 */
- volatile unsigned long IntClear; /* 0x0134 */
-
- volatile unsigned long Fill2[6]; /* GAP 0x0138 - 0x014C */
-
- volatile unsigned long ROMGPIOA; /* 0x0150 */
- volatile unsigned long ROMGPIOB; /* 0x0154 */
- volatile unsigned long ROMGPIOC; /* 0x0158 */
- volatile unsigned long ROMGPIOD; /* 0x015C */
-
- volatile unsigned long Fill3[2]; /* GAP 0x0160 - 0x0168 */
-
- volatile unsigned long AGPIntID; /* 0x0168 */
- volatile unsigned long AGPIntClassCode; /* 0x016C */
- volatile unsigned long AGPIntBIST; /* 0x0170 */
- volatile unsigned long AGPIntSSID; /* 0x0174 */
- volatile unsigned long AGPIntPMCSR; /* 0x0178 */
- volatile unsigned long VGAFrameBufBase; /* 0x017C */
- volatile unsigned long VGANotify; /* 0x0180 */
- volatile unsigned long DACPLLMode; /* 0x0184 */
- volatile unsigned long Core1VideoClockDiv; /* 0x0188 */
- volatile unsigned long AGPIntStat; /* 0x018C */
+ volatile u32 Thread0Enable; /* 0x0000 */
+ volatile u32 Thread1Enable; /* 0x0004 */
+ volatile u32 Thread0Recover; /* 0x0008 */
+ volatile u32 Thread1Recover; /* 0x000C */
+ volatile u32 Thread0Step; /* 0x0010 */
+ volatile u32 Thread1Step; /* 0x0014 */
+ volatile u32 VideoInStatus; /* 0x0018 */
+ volatile u32 Core2InSignStart; /* 0x001C */
+ volatile u32 Core1ResetVector; /* 0x0020 */
+ volatile u32 Core1ROMOffset; /* 0x0024 */
+ volatile u32 Core1ArbiterPriority; /* 0x0028 */
+ volatile u32 VideoInControl; /* 0x002C */
+ volatile u32 VideoInReg0CtrlA; /* 0x0030 */
+ volatile u32 VideoInReg0CtrlB; /* 0x0034 */
+ volatile u32 VideoInReg1CtrlA; /* 0x0038 */
+ volatile u32 VideoInReg1CtrlB; /* 0x003C */
+ volatile u32 Thread0Kicker; /* 0x0040 */
+ volatile u32 Core2InputSign; /* 0x0044 */
+ volatile u32 Thread0ProgCtr; /* 0x0048 */
+ volatile u32 Thread1ProgCtr; /* 0x004C */
+ volatile u32 Thread1Kicker; /* 0x0050 */
+ volatile u32 GPRegister1; /* 0x0054 */
+ volatile u32 GPRegister2; /* 0x0058 */
+ volatile u32 GPRegister3; /* 0x005C */
+ volatile u32 GPRegister4; /* 0x0060 */
+ volatile u32 SerialIntA; /* 0x0064 */
+
+ volatile u32 Fill0[6]; /* GAP 0x0068 - 0x007C */
+
+ volatile u32 SoftwareReset; /* 0x0080 */
+ volatile u32 SerialIntB; /* 0x0084 */
+
+ volatile u32 Fill1[37]; /* GAP 0x0088 - 0x011C */
+
+ volatile u32 ROMELQV; /* 0x011C */
+ volatile u32 WLWH; /* 0x0120 */
+ volatile u32 ROMELWL; /* 0x0124 */
+
+ volatile u32 dwFill_1; /* GAP 0x0128 */
+
+ volatile u32 IntStatus; /* 0x012C */
+ volatile u32 IntMask; /* 0x0130 */
+ volatile u32 IntClear; /* 0x0134 */
+
+ volatile u32 Fill2[6]; /* GAP 0x0138 - 0x014C */
+
+ volatile u32 ROMGPIOA; /* 0x0150 */
+ volatile u32 ROMGPIOB; /* 0x0154 */
+ volatile u32 ROMGPIOC; /* 0x0158 */
+ volatile u32 ROMGPIOD; /* 0x015C */
+
+ volatile u32 Fill3[2]; /* GAP 0x0160 - 0x0168 */
+
+ volatile u32 AGPIntID; /* 0x0168 */
+ volatile u32 AGPIntClassCode; /* 0x016C */
+ volatile u32 AGPIntBIST; /* 0x0170 */
+ volatile u32 AGPIntSSID; /* 0x0174 */
+ volatile u32 AGPIntPMCSR; /* 0x0178 */
+ volatile u32 VGAFrameBufBase; /* 0x017C */
+ volatile u32 VGANotify; /* 0x0180 */
+ volatile u32 DACPLLMode; /* 0x0184 */
+ volatile u32 Core1VideoClockDiv; /* 0x0188 */
+ volatile u32 AGPIntStat; /* 0x018C */
/*
- volatile unsigned long Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
- volatile unsigned long Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
- volatile unsigned long Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
- volatile unsigned long Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
- volatile unsigned long Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
+ volatile u32 Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
+ volatile u32 Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
+ volatile u32 Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
+ volatile u32 Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
+ volatile u32 Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
*/
- volatile unsigned long Fill4[412]; /* 0x0190 - 0x07FC */
-
- volatile unsigned long TACtrlStreamBase; /* 0x0800 */
- volatile unsigned long TAObjDataBase; /* 0x0804 */
- volatile unsigned long TAPtrDataBase; /* 0x0808 */
- volatile unsigned long TARegionDataBase; /* 0x080C */
- volatile unsigned long TATailPtrBase; /* 0x0810 */
- volatile unsigned long TAPtrRegionSize; /* 0x0814 */
- volatile unsigned long TAConfiguration; /* 0x0818 */
- volatile unsigned long TAObjDataStartAddr; /* 0x081C */
- volatile unsigned long TAObjDataEndAddr; /* 0x0820 */
- volatile unsigned long TAXScreenClip; /* 0x0824 */
- volatile unsigned long TAYScreenClip; /* 0x0828 */
- volatile unsigned long TARHWClamp; /* 0x082C */
- volatile unsigned long TARHWCompare; /* 0x0830 */
- volatile unsigned long TAStart; /* 0x0834 */
- volatile unsigned long TAObjReStart; /* 0x0838 */
- volatile unsigned long TAPtrReStart; /* 0x083C */
- volatile unsigned long TAStatus1; /* 0x0840 */
- volatile unsigned long TAStatus2; /* 0x0844 */
- volatile unsigned long TAIntStatus; /* 0x0848 */
- volatile unsigned long TAIntMask; /* 0x084C */
-
- volatile unsigned long Fill5[235]; /* GAP 0x0850 - 0x0BF8 */
-
- volatile unsigned long TextureAddrThresh; /* 0x0BFC */
- volatile unsigned long Core1Translation; /* 0x0C00 */
- volatile unsigned long TextureAddrReMap; /* 0x0C04 */
- volatile unsigned long RenderOutAGPRemap; /* 0x0C08 */
- volatile unsigned long _3DRegionReadTrans; /* 0x0C0C */
- volatile unsigned long _3DPtrReadTrans; /* 0x0C10 */
- volatile unsigned long _3DParamReadTrans; /* 0x0C14 */
- volatile unsigned long _3DRegionReadThresh; /* 0x0C18 */
- volatile unsigned long _3DPtrReadThresh; /* 0x0C1C */
- volatile unsigned long _3DParamReadThresh; /* 0x0C20 */
- volatile unsigned long _3DRegionReadAGPRemap; /* 0x0C24 */
- volatile unsigned long _3DPtrReadAGPRemap; /* 0x0C28 */
- volatile unsigned long _3DParamReadAGPRemap; /* 0x0C2C */
- volatile unsigned long ZBufferAGPRemap; /* 0x0C30 */
- volatile unsigned long TAIndexAGPRemap; /* 0x0C34 */
- volatile unsigned long TAVertexAGPRemap; /* 0x0C38 */
- volatile unsigned long TAUVAddrTrans; /* 0x0C3C */
- volatile unsigned long TATailPtrCacheTrans; /* 0x0C40 */
- volatile unsigned long TAParamWriteTrans; /* 0x0C44 */
- volatile unsigned long TAPtrWriteTrans; /* 0x0C48 */
- volatile unsigned long TAParamWriteThresh; /* 0x0C4C */
- volatile unsigned long TAPtrWriteThresh; /* 0x0C50 */
- volatile unsigned long TATailPtrCacheAGPRe; /* 0x0C54 */
- volatile unsigned long TAParamWriteAGPRe; /* 0x0C58 */
- volatile unsigned long TAPtrWriteAGPRe; /* 0x0C5C */
- volatile unsigned long SDRAMArbiterConf; /* 0x0C60 */
- volatile unsigned long SDRAMConf0; /* 0x0C64 */
- volatile unsigned long SDRAMConf1; /* 0x0C68 */
- volatile unsigned long SDRAMConf2; /* 0x0C6C */
- volatile unsigned long SDRAMRefresh; /* 0x0C70 */
- volatile unsigned long SDRAMPowerStat; /* 0x0C74 */
-
- volatile unsigned long Fill6[2]; /* GAP 0x0C78 - 0x0C7C */
-
- volatile unsigned long RAMBistData; /* 0x0C80 */
- volatile unsigned long RAMBistCtrl; /* 0x0C84 */
- volatile unsigned long FIFOBistKey; /* 0x0C88 */
- volatile unsigned long RAMBistResult; /* 0x0C8C */
- volatile unsigned long FIFOBistResult; /* 0x0C90 */
+ volatile u32 Fill4[412]; /* 0x0190 - 0x07FC */
+
+ volatile u32 TACtrlStreamBase; /* 0x0800 */
+ volatile u32 TAObjDataBase; /* 0x0804 */
+ volatile u32 TAPtrDataBase; /* 0x0808 */
+ volatile u32 TARegionDataBase; /* 0x080C */
+ volatile u32 TATailPtrBase; /* 0x0810 */
+ volatile u32 TAPtrRegionSize; /* 0x0814 */
+ volatile u32 TAConfiguration; /* 0x0818 */
+ volatile u32 TAObjDataStartAddr; /* 0x081C */
+ volatile u32 TAObjDataEndAddr; /* 0x0820 */
+ volatile u32 TAXScreenClip; /* 0x0824 */
+ volatile u32 TAYScreenClip; /* 0x0828 */
+ volatile u32 TARHWClamp; /* 0x082C */
+ volatile u32 TARHWCompare; /* 0x0830 */
+ volatile u32 TAStart; /* 0x0834 */
+ volatile u32 TAObjReStart; /* 0x0838 */
+ volatile u32 TAPtrReStart; /* 0x083C */
+ volatile u32 TAStatus1; /* 0x0840 */
+ volatile u32 TAStatus2; /* 0x0844 */
+ volatile u32 TAIntStatus; /* 0x0848 */
+ volatile u32 TAIntMask; /* 0x084C */
+
+ volatile u32 Fill5[235]; /* GAP 0x0850 - 0x0BF8 */
+
+ volatile u32 TextureAddrThresh; /* 0x0BFC */
+ volatile u32 Core1Translation; /* 0x0C00 */
+ volatile u32 TextureAddrReMap; /* 0x0C04 */
+ volatile u32 RenderOutAGPRemap; /* 0x0C08 */
+ volatile u32 _3DRegionReadTrans; /* 0x0C0C */
+ volatile u32 _3DPtrReadTrans; /* 0x0C10 */
+ volatile u32 _3DParamReadTrans; /* 0x0C14 */
+ volatile u32 _3DRegionReadThresh; /* 0x0C18 */
+ volatile u32 _3DPtrReadThresh; /* 0x0C1C */
+ volatile u32 _3DParamReadThresh; /* 0x0C20 */
+ volatile u32 _3DRegionReadAGPRemap; /* 0x0C24 */
+ volatile u32 _3DPtrReadAGPRemap; /* 0x0C28 */
+ volatile u32 _3DParamReadAGPRemap; /* 0x0C2C */
+ volatile u32 ZBufferAGPRemap; /* 0x0C30 */
+ volatile u32 TAIndexAGPRemap; /* 0x0C34 */
+ volatile u32 TAVertexAGPRemap; /* 0x0C38 */
+ volatile u32 TAUVAddrTrans; /* 0x0C3C */
+ volatile u32 TATailPtrCacheTrans; /* 0x0C40 */
+ volatile u32 TAParamWriteTrans; /* 0x0C44 */
+ volatile u32 TAPtrWriteTrans; /* 0x0C48 */
+ volatile u32 TAParamWriteThresh; /* 0x0C4C */
+ volatile u32 TAPtrWriteThresh; /* 0x0C50 */
+ volatile u32 TATailPtrCacheAGPRe; /* 0x0C54 */
+ volatile u32 TAParamWriteAGPRe; /* 0x0C58 */
+ volatile u32 TAPtrWriteAGPRe; /* 0x0C5C */
+ volatile u32 SDRAMArbiterConf; /* 0x0C60 */
+ volatile u32 SDRAMConf0; /* 0x0C64 */
+ volatile u32 SDRAMConf1; /* 0x0C68 */
+ volatile u32 SDRAMConf2; /* 0x0C6C */
+ volatile u32 SDRAMRefresh; /* 0x0C70 */
+ volatile u32 SDRAMPowerStat; /* 0x0C74 */
+
+ volatile u32 Fill6[2]; /* GAP 0x0C78 - 0x0C7C */
+
+ volatile u32 RAMBistData; /* 0x0C80 */
+ volatile u32 RAMBistCtrl; /* 0x0C84 */
+ volatile u32 FIFOBistKey; /* 0x0C88 */
+ volatile u32 RAMBistResult; /* 0x0C8C */
+ volatile u32 FIFOBistResult; /* 0x0C90 */
/*
- volatile unsigned long Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
- volatile unsigned long Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
+ volatile u32 Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
+ volatile u32 Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
*/
- volatile unsigned long Fill7[16]; /* 0x0c94 - 0x0cd0 */
+ volatile u32 Fill7[16]; /* 0x0c94 - 0x0cd0 */
- volatile unsigned long SDRAMAddrSign; /* 0x0CD4 */
- volatile unsigned long SDRAMDataSign; /* 0x0CD8 */
- volatile unsigned long SDRAMSignConf; /* 0x0CDC */
+ volatile u32 SDRAMAddrSign; /* 0x0CD4 */
+ volatile u32 SDRAMDataSign; /* 0x0CD8 */
+ volatile u32 SDRAMSignConf; /* 0x0CDC */
/* DWFILL; //GAP 0x0CE0 */
- volatile unsigned long dwFill_2;
-
- volatile unsigned long ISPSignature; /* 0x0CE4 */
-
- volatile unsigned long Fill8[454]; /*GAP 0x0CE8 - 0x13FC */
-
- volatile unsigned long DACPrimAddress; /* 0x1400 */
- volatile unsigned long DACPrimSize; /* 0x1404 */
- volatile unsigned long DACCursorAddr; /* 0x1408 */
- volatile unsigned long DACCursorCtrl; /* 0x140C */
- volatile unsigned long DACOverlayAddr; /* 0x1410 */
- volatile unsigned long DACOverlayUAddr; /* 0x1414 */
- volatile unsigned long DACOverlayVAddr; /* 0x1418 */
- volatile unsigned long DACOverlaySize; /* 0x141C */
- volatile unsigned long DACOverlayVtDec; /* 0x1420 */
-
- volatile unsigned long Fill9[9]; /* GAP 0x1424 - 0x1444 */
-
- volatile unsigned long DACVerticalScal; /* 0x1448 */
- volatile unsigned long DACPixelFormat; /* 0x144C */
- volatile unsigned long DACHorizontalScal; /* 0x1450 */
- volatile unsigned long DACVidWinStart; /* 0x1454 */
- volatile unsigned long DACVidWinEnd; /* 0x1458 */
- volatile unsigned long DACBlendCtrl; /* 0x145C */
- volatile unsigned long DACHorTim1; /* 0x1460 */
- volatile unsigned long DACHorTim2; /* 0x1464 */
- volatile unsigned long DACHorTim3; /* 0x1468 */
- volatile unsigned long DACVerTim1; /* 0x146C */
- volatile unsigned long DACVerTim2; /* 0x1470 */
- volatile unsigned long DACVerTim3; /* 0x1474 */
- volatile unsigned long DACBorderColor; /* 0x1478 */
- volatile unsigned long DACSyncCtrl; /* 0x147C */
- volatile unsigned long DACStreamCtrl; /* 0x1480 */
- volatile unsigned long DACLUTAddress; /* 0x1484 */
- volatile unsigned long DACLUTData; /* 0x1488 */
- volatile unsigned long DACBurstCtrl; /* 0x148C */
- volatile unsigned long DACCrcTrigger; /* 0x1490 */
- volatile unsigned long DACCrcDone; /* 0x1494 */
- volatile unsigned long DACCrcResult1; /* 0x1498 */
- volatile unsigned long DACCrcResult2; /* 0x149C */
- volatile unsigned long DACLinecount; /* 0x14A0 */
-
- volatile unsigned long Fill10[151]; /*GAP 0x14A4 - 0x16FC */
-
- volatile unsigned long DigVidPortCtrl; /* 0x1700 */
- volatile unsigned long DigVidPortStat; /* 0x1704 */
+ volatile u32 dwFill_2;
+
+ volatile u32 ISPSignature; /* 0x0CE4 */
+
+ volatile u32 Fill8[454]; /*GAP 0x0CE8 - 0x13FC */
+
+ volatile u32 DACPrimAddress; /* 0x1400 */
+ volatile u32 DACPrimSize; /* 0x1404 */
+ volatile u32 DACCursorAddr; /* 0x1408 */
+ volatile u32 DACCursorCtrl; /* 0x140C */
+ volatile u32 DACOverlayAddr; /* 0x1410 */
+ volatile u32 DACOverlayUAddr; /* 0x1414 */
+ volatile u32 DACOverlayVAddr; /* 0x1418 */
+ volatile u32 DACOverlaySize; /* 0x141C */
+ volatile u32 DACOverlayVtDec; /* 0x1420 */
+
+ volatile u32 Fill9[9]; /* GAP 0x1424 - 0x1444 */
+
+ volatile u32 DACVerticalScal; /* 0x1448 */
+ volatile u32 DACPixelFormat; /* 0x144C */
+ volatile u32 DACHorizontalScal; /* 0x1450 */
+ volatile u32 DACVidWinStart; /* 0x1454 */
+ volatile u32 DACVidWinEnd; /* 0x1458 */
+ volatile u32 DACBlendCtrl; /* 0x145C */
+ volatile u32 DACHorTim1; /* 0x1460 */
+ volatile u32 DACHorTim2; /* 0x1464 */
+ volatile u32 DACHorTim3; /* 0x1468 */
+ volatile u32 DACVerTim1; /* 0x146C */
+ volatile u32 DACVerTim2; /* 0x1470 */
+ volatile u32 DACVerTim3; /* 0x1474 */
+ volatile u32 DACBorderColor; /* 0x1478 */
+ volatile u32 DACSyncCtrl; /* 0x147C */
+ volatile u32 DACStreamCtrl; /* 0x1480 */
+ volatile u32 DACLUTAddress; /* 0x1484 */
+ volatile u32 DACLUTData; /* 0x1488 */
+ volatile u32 DACBurstCtrl; /* 0x148C */
+ volatile u32 DACCrcTrigger; /* 0x1490 */
+ volatile u32 DACCrcDone; /* 0x1494 */
+ volatile u32 DACCrcResult1; /* 0x1498 */
+ volatile u32 DACCrcResult2; /* 0x149C */
+ volatile u32 DACLinecount; /* 0x14A0 */
+
+ volatile u32 Fill10[151]; /*GAP 0x14A4 - 0x16FC */
+
+ volatile u32 DigVidPortCtrl; /* 0x1700 */
+ volatile u32 DigVidPortStat; /* 0x1704 */
/*
- volatile unsigned long Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
- volatile unsigned long Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
+ volatile u32 Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
+ volatile u32 Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
*/
- volatile unsigned long Fill11[1598];
+ volatile u32 Fill11[1598];
/* DWFILL; //GAP 0x3000 ALUT 256MB offset */
- volatile unsigned long Fill_3;
+ volatile u32 Fill_3;
} STG4000REG;
mddi_set_auto_hibernate(&mddi->client_data, 1);
}
-static int __init mddi_get_client_caps(struct mddi_info *mddi)
+static int __devinit mddi_get_client_caps(struct mddi_info *mddi)
{
int i, j;
static struct mddi_info mddi_info[2];
-static int __init mddi_clk_setup(struct platform_device *pdev,
- struct mddi_info *mddi,
- unsigned long clk_rate)
+static int __devinit mddi_clk_setup(struct platform_device *pdev,
+ struct mddi_info *mddi,
+ unsigned long clk_rate)
{
int ret;
par->pmi_setpal = pmi_setpal;
par->ypan = ypan;
- if (par->pmi_setpal || par->ypan)
- uvesafb_vbe_getpmi(task, par);
+ if (par->pmi_setpal || par->ypan) {
+ if (__supported_pte_mask & _PAGE_NX) {
+ par->pmi_setpal = par->ypan = 0;
+ printk(KERN_WARNING "uvesafb: NX protection is actively."
+ "We have better not to use the PMI.\n");
+ } else {
+ uvesafb_vbe_getpmi(task, par);
+ }
+ }
#else
/* The protected mode interface is not available on non-x86. */
par->pmi_setpal = par->ypan = 0;
#include <linux/slab.h>
#include <linux/module.h>
+/*
+ * Balloon device works in 4K page units. So each page is pointed to by
+ * multiple balloon pages. All memory counters in this driver are in balloon
+ * page units.
+ */
+#define VIRTIO_BALLOON_PAGES_PER_PAGE (PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
+
struct virtio_balloon
{
struct virtio_device *vdev;
/* Waiting for host to ack the pages we released. */
struct completion acked;
- /* The pages we've told the Host we're not using. */
+ /* Number of balloon pages we've told the Host we're not using. */
unsigned int num_pages;
+ /*
+ * The pages we've told the Host we're not using.
+ * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
+ * to num_pages above.
+ */
struct list_head pages;
/* The array of pfns we tell the Host about. */
BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
/* Convert pfn from Linux page size to balloon page size. */
- return pfn >> (PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT);
+ return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
+static struct page *balloon_pfn_to_page(u32 pfn)
+{
+ BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
+ return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
}
static void balloon_ack(struct virtqueue *vq)
wait_for_completion(&vb->acked);
}
+static void set_page_pfns(u32 pfns[], struct page *page)
+{
+ unsigned int i;
+
+ /* Set balloon pfns pointing at this page.
+ * Note that the first pfn points at start of the page. */
+ for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
+ pfns[i] = page_to_balloon_pfn(page) + i;
+}
+
static void fill_balloon(struct virtio_balloon *vb, size_t num)
{
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
- for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
+ for (vb->num_pfns = 0; vb->num_pfns < num;
+ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
__GFP_NOMEMALLOC | __GFP_NOWARN);
if (!page) {
msleep(200);
break;
}
- vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
+ set_page_pfns(vb->pfns + vb->num_pfns, page);
+ vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
totalram_pages--;
- vb->num_pages++;
list_add(&page->lru, &vb->pages);
}
{
unsigned int i;
- for (i = 0; i < num; i++) {
- __free_page(pfn_to_page(pfns[i]));
+ /* Find pfns pointing at start of each page, get pages and free them. */
+ for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ __free_page(balloon_pfn_to_page(pfns[i]));
totalram_pages++;
}
}
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
- for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
+ for (vb->num_pfns = 0; vb->num_pfns < num;
+ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
page = list_first_entry(&vb->pages, struct page, lru);
list_del(&page->lru);
- vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
- vb->num_pages--;
+ set_page_pfns(vb->pfns + vb->num_pfns, page);
+ vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
}
/*
static inline s64 towards_target(struct virtio_balloon *vb)
{
- u32 v;
+ __le32 v;
+ s64 target;
+
vb->vdev->config->get(vb->vdev,
offsetof(struct virtio_balloon_config, num_pages),
&v, sizeof(v));
- return (s64)v - vb->num_pages;
+ target = le32_to_cpu(v);
+ return target - vb->num_pages;
}
static void update_balloon_size(struct virtio_balloon *vb)
bio_put(bio);
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+ BUG_ON(!bio);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
GFP_NOFS);
+ BUG_ON(!comp_bio);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
static inline bool btrfs_root_readonly(struct btrfs_root *root)
{
- return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
+ return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
}
/* struct btrfs_root_backup */
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
*/
- if (trans && (!trans->transaction->in_commit) &&
- (root && root != root->fs_info->tree_root) &&
- btrfs_test_opt(root, SPACE_CACHE)) {
+ if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
ret = load_free_space_cache(fs_info, cache);
spin_lock(&cache->lock);
/*
* returns target flags in extended format or 0 if restripe for this
* chunk_type is not in progress
+ *
+ * should be called with either volume_mutex or balance_lock held
*/
static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
- BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
- !spin_is_locked(&fs_info->balance_lock));
-
if (!bctl)
return 0;
num_bytes += div64_u64(data_used + meta_used, 50);
if (num_bytes * 3 > meta_used)
- num_bytes = div64_u64(meta_used, 3) * 2;
+ num_bytes = div64_u64(meta_used, 3);
return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
}
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
- int ret;
+ int ret = 0;
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
}
bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio) {
+ free_io_failure(inode, failrec, 0);
+ return -EIO;
+ }
bio->bi_private = state;
bio->bi_end_io = failed_bio->bi_end_io;
bio->bi_sector = failrec->logical >> 9;
u64 used = btrfs_block_group_used(&block_group->item);
/*
- * If we're unmounting then just return, since this does a search on the
- * normal root and not the commit root and we could deadlock.
- */
- if (btrfs_fs_closing(fs_info))
- return 0;
-
- /*
* If this block group has been marked to be cleared for one reason or
* another then we can't trust the on disk cache, so just return.
*/
path = btrfs_alloc_path();
if (!path)
return 0;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
inode = lookup_free_space_inode(root, block_group, path);
if (IS_ERR(inode)) {
BUG_ON(!page->page);
bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio)
+ return -EIO;
bio->bi_bdev = page->bdev;
bio->bi_sector = page->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
DECLARE_COMPLETION_ONSTACK(complete);
bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio)
+ return -EIO;
bio->bi_bdev = page_bad->bdev;
bio->bi_sector = page_bad->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
int count = 0;
+ int err = 0;
if (--trans->use_count) {
trans->block_rsv = trans->orig_rsv;
if (current->journal_info == trans)
current->journal_info = NULL;
- memset(trans, 0, sizeof(*trans));
- kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (throttle)
btrfs_run_delayed_iputs(root);
if (trans->aborted ||
root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- return -EIO;
+ err = -EIO;
}
- return 0;
+ memset(trans, 0, sizeof(*trans));
+ kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ return err;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
int sub_stripes = 0;
u64 stripes_per_dev = 0;
u32 remaining_stripes = 0;
+ u32 last_stripe = 0;
if (map->type &
(BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
stripe_nr_orig,
factor,
&remaining_stripes);
+ div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
+ last_stripe *= sub_stripes;
}
for (i = 0; i < num_stripes; i++) {
BTRFS_BLOCK_GROUP_RAID10)) {
bbio->stripes[i].length = stripes_per_dev *
map->stripe_len;
+
if (i / sub_stripes < remaining_stripes)
bbio->stripes[i].length +=
map->stripe_len;
+
+ /*
+ * Special for the first stripe and
+ * the last stripe:
+ *
+ * |-------|...|-------|
+ * |----------|
+ * off end_off
+ */
if (i < sub_stripes)
bbio->stripes[i].length -=
stripe_offset;
- if ((i / sub_stripes + 1) %
- sub_stripes == remaining_stripes)
+
+ if (stripe_index >= last_stripe &&
+ stripe_index <= (last_stripe +
+ sub_stripes - 1))
bbio->stripes[i].length -=
stripe_end_offset;
+
if (i == sub_stripes - 1)
stripe_offset = 0;
} else
/* Options which could be blank */
Opt_blank_pass,
+ Opt_blank_user,
+ Opt_blank_ip,
Opt_err
};
{ Opt_wsize, "wsize=%s" },
{ Opt_actimeo, "actimeo=%s" },
+ { Opt_blank_user, "user=" },
+ { Opt_blank_user, "username=" },
{ Opt_user, "user=%s" },
{ Opt_user, "username=%s" },
{ Opt_blank_pass, "pass=" },
{ Opt_pass, "pass=%s" },
{ Opt_pass, "password=%s" },
+ { Opt_blank_ip, "ip=" },
+ { Opt_blank_ip, "addr=" },
{ Opt_ip, "ip=%s" },
{ Opt_ip, "addr=%s" },
{ Opt_unc, "unc=%s" },
string = match_strdup(args);
if (string == NULL)
return -ENOMEM;
- rc = kstrtoul(string, 10, option);
+ rc = kstrtoul(string, 0, option);
kfree(string);
return rc;
/* String Arguments */
+ case Opt_blank_user:
+ /* null user, ie. anonymous authentication */
+ vol->nullauth = 1;
+ vol->username = NULL;
+ break;
case Opt_user:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
- if (!*string) {
- /* null user, ie. anonymous authentication */
- vol->nullauth = 1;
- } else if (strnlen(string, MAX_USERNAME_SIZE) >
+ if (strnlen(string, MAX_USERNAME_SIZE) >
MAX_USERNAME_SIZE) {
printk(KERN_WARNING "CIFS: username too long\n");
goto cifs_parse_mount_err;
}
vol->password[j] = '\0';
break;
+ case Opt_blank_ip:
+ vol->UNCip = NULL;
+ break;
case Opt_ip:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
- if (!*string) {
- vol->UNCip = NULL;
- } else if (strnlen(string, INET6_ADDRSTRLEN) >
+ if (strnlen(string, INET6_ADDRSTRLEN) >
INET6_ADDRSTRLEN) {
printk(KERN_WARNING "CIFS: ip address "
"too long\n");
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: invalid path to "
- "network resource\n");
- goto cifs_parse_mount_err;
- }
-
temp_len = strnlen(string, 300);
if (temp_len == 300) {
printk(KERN_WARNING "CIFS: UNC name too long\n");
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: invalid domain"
- " name\n");
- goto cifs_parse_mount_err;
- } else if (strnlen(string, 256) == 256) {
+ if (strnlen(string, 256) == 256) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: srcaddr value not"
- " specified\n");
- goto cifs_parse_mount_err;
- } else if (!cifs_convert_address(
+ if (!cifs_convert_address(
(struct sockaddr *)&vol->srcaddr,
string, strlen(string))) {
printk(KERN_WARNING "CIFS: Could not parse"
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: Invalid path"
- " prefix\n");
- goto cifs_parse_mount_err;
- }
temp_len = strnlen(string, 1024);
if (string[0] != '/')
temp_len++; /* missing leading slash */
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: Invalid iocharset"
- " specified\n");
- goto cifs_parse_mount_err;
- } else if (strnlen(string, 1024) >= 65) {
+ if (strnlen(string, 1024) >= 65) {
printk(KERN_WARNING "CIFS: iocharset name "
"too long.\n");
goto cifs_parse_mount_err;
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: No socket option"
- " specified\n");
- goto cifs_parse_mount_err;
- }
if (strnicmp(string, "TCP_NODELAY", 11) == 0)
vol->sockopt_tcp_nodelay = 1;
break;
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: Invalid (empty)"
- " netbiosname\n");
- break;
- }
-
memset(vol->source_rfc1001_name, 0x20,
RFC1001_NAME_LEN);
/*
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: Empty server"
- " netbiosname specified\n");
- break;
- }
/* last byte, type, is 0x20 for servr type */
memset(vol->target_rfc1001_name, 0x20,
RFC1001_NAME_LEN_WITH_NULL);
if (string == NULL)
goto out_nomem;
- if (!*string) {
- cERROR(1, "no protocol version specified"
- " after vers= mount option");
- goto cifs_parse_mount_err;
- }
-
if (strnicmp(string, "cifs", 4) == 0 ||
strnicmp(string, "1", 1) == 0) {
/* This is the default */
if (string == NULL)
goto out_nomem;
- if (!*string) {
- printk(KERN_WARNING "CIFS: no security flavor"
- " specified\n");
- break;
- }
-
if (cifs_parse_security_flavors(string, vol) != 0)
goto cifs_parse_mount_err;
break;
unsigned long s_ext_blocks;
unsigned long s_ext_extents;
#endif
- /* ext4 extent cache stats */
- unsigned long extent_cache_hits;
- unsigned long extent_cache_misses;
/* for buddy allocator */
struct ext4_group_info ***s_group_info;
ret = 1;
}
errout:
- if (!ret)
- sbi->extent_cache_misses++;
- else
- sbi->extent_cache_hits++;
trace_ext4_ext_in_cache(inode, block, ret);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
return ret;
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
- ex->ee_len = cpu_to_le32(ee_len);
+ ex->ee_len = cpu_to_le16(ee_len);
ext4_ext_try_to_merge(inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + depth);
goto out;
ext4_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
- return 0;
+ return -1;
}
qname = match_strdup(args);
if (!qname) {
ext4_msg(sb, KERN_ERR,
"Not enough memory for storing quotafile name");
- return 0;
+ return -1;
}
if (sbi->s_qf_names[qtype] &&
strcmp(sbi->s_qf_names[qtype], qname)) {
ext4_msg(sb, KERN_ERR,
"%s quota file already specified", QTYPE2NAME(qtype));
kfree(qname);
- return 0;
+ return -1;
}
sbi->s_qf_names[qtype] = qname;
if (strchr(sbi->s_qf_names[qtype], '/')) {
"quotafile must be on filesystem root");
kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
- return 0;
+ return -1;
}
set_opt(sb, QUOTA);
return 1;
sbi->s_qf_names[qtype]) {
ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
- return 0;
+ return -1;
}
/*
* The space will be released later when all options are confirmed
const struct mount_opts *m;
int arg = 0;
+#ifdef CONFIG_QUOTA
+ if (token == Opt_usrjquota)
+ return set_qf_name(sb, USRQUOTA, &args[0]);
+ else if (token == Opt_grpjquota)
+ return set_qf_name(sb, GRPQUOTA, &args[0]);
+ else if (token == Opt_offusrjquota)
+ return clear_qf_name(sb, USRQUOTA);
+ else if (token == Opt_offgrpjquota)
+ return clear_qf_name(sb, GRPQUOTA);
+#endif
if (args->from && match_int(args, &arg))
return -1;
switch (token) {
sbi->s_mount_opt |= m->mount_opt;
}
#ifdef CONFIG_QUOTA
- } else if (token == Opt_usrjquota) {
- if (!set_qf_name(sb, USRQUOTA, &args[0]))
- return -1;
- } else if (token == Opt_grpjquota) {
- if (!set_qf_name(sb, GRPQUOTA, &args[0]))
- return -1;
- } else if (token == Opt_offusrjquota) {
- if (!clear_qf_name(sb, USRQUOTA))
- return -1;
- } else if (token == Opt_offgrpjquota) {
- if (!clear_qf_name(sb, GRPQUOTA))
- return -1;
} else if (m->flags & MOPT_QFMT) {
if (sb_any_quota_loaded(sb) &&
sbi->s_jquota_fmt != m->mount_opt) {
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
}
-static ssize_t extent_cache_hits_show(struct ext4_attr *a,
- struct ext4_sb_info *sbi, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
-}
-
-static ssize_t extent_cache_misses_show(struct ext4_attr *a,
- struct ext4_sb_info *sbi, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses);
-}
-
static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
struct ext4_sb_info *sbi,
const char *buf, size_t count)
EXT4_RO_ATTR(delayed_allocation_blocks);
EXT4_RO_ATTR(session_write_kbytes);
EXT4_RO_ATTR(lifetime_write_kbytes);
-EXT4_RO_ATTR(extent_cache_hits);
-EXT4_RO_ATTR(extent_cache_misses);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
inode_readahead_blks_store, s_inode_readahead_blks);
EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
ATTR_LIST(delayed_allocation_blocks),
ATTR_LIST(session_write_kbytes),
ATTR_LIST(lifetime_write_kbytes),
- ATTR_LIST(extent_cache_hits),
- ATTR_LIST(extent_cache_misses),
ATTR_LIST(inode_readahead_blks),
ATTR_LIST(inode_goal),
ATTR_LIST(mb_stats),
return 0;
out:
d_genocide(root);
+ shrink_dcache_parent(root);
dput(root);
return -ENOMEM;
}
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
- if (unlikely(*p > nlm4_failed))
+ if (unlikely(ntohl(*p) > ntohl(nlm4_failed)))
goto out_bad_xdr;
*stat = *p;
return 0;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
- if (unlikely(*p > nlm_lck_denied_grace_period))
+ if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period)))
goto out_enum;
*stat = *p;
return 0;
return p;
}
-static int
+static __be32
compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
const char *name, int namlen)
{
struct svc_export *exp;
struct dentry *dparent, *dchild;
- int rv = 0;
+ __be32 rv = nfserr_noent;
dparent = cd->fh.fh_dentry;
exp = cd->fh.fh_export;
if (isdotent(name, namlen)) {
if (namlen == 2) {
dchild = dget_parent(dparent);
- if (dchild == dparent) {
- /* filesystem root - cannot return filehandle for ".." */
- dput(dchild);
- return -ENOENT;
- }
+ /* filesystem root - cannot return filehandle for ".." */
+ if (dchild == dparent)
+ goto out;
} else
dchild = dget(dparent);
} else
dchild = lookup_one_len(name, dparent, namlen);
if (IS_ERR(dchild))
- return -ENOENT;
- rv = -ENOENT;
+ return rv;
if (d_mountpoint(dchild))
goto out;
- rv = fh_compose(fhp, exp, dchild, &cd->fh);
- if (rv)
- goto out;
if (!dchild->d_inode)
goto out;
- rv = 0;
+ rv = fh_compose(fhp, exp, dchild, &cd->fh);
out:
dput(dchild);
return rv;
static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
{
struct svc_fh fh;
- int err;
+ __be32 err;
fh_init(&fh, NFS3_FHSIZE);
err = compose_entry_fh(cd, &fh, name, namlen);
struct nfsd4_setattr *setattr)
{
__be32 status = nfs_ok;
+ int err;
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
nfs4_lock_state();
return status;
}
}
- status = fh_want_write(&cstate->current_fh);
- if (status)
- return status;
+ err = fh_want_write(&cstate->current_fh);
+ if (err)
+ return nfserrno(err);
status = nfs_ok;
status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
* vfs_test_lock. (Arguably perhaps test_lock should be done with an
* inode operation.)
*/
-static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
{
struct file *file;
- int err;
-
- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
- if (err)
- return err;
- err = vfs_test_lock(file, lock);
- nfsd_close(file);
+ __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+ if (!err) {
+ err = nfserrno(vfs_test_lock(file, lock));
+ nfsd_close(file);
+ }
return err;
}
struct inode *inode;
struct file_lock file_lock;
struct nfs4_lockowner *lo;
- int error;
__be32 status;
if (locks_in_grace())
nfs4_transform_lock_offset(&file_lock);
- status = nfs_ok;
- error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
- if (error) {
- status = nfserrno(error);
+ status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
+ if (status)
goto out;
- }
+
if (file_lock.fl_type != F_UNLCK) {
status = nfserr_denied;
nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL);
if (!stateid) {
- status = PTR_ERR(stateid);
+ status = nfserrno(-ENOMEM);
goto out;
}
*p++ = htonl(test_stateid->ts_num_ids);
list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
- *p++ = htonl(stateid->ts_id_status);
+ *p++ = stateid->ts_id_status;
}
ADJUST_ARGS();
}
el = path_leaf_el(path);
- rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
+ rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
ocfs2_adjust_rightmost_records(handle, et, path, rec);
tmp_el = left_path->p_node[subtree_root].el;
blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
- for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
+ for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
break;
}
}
- BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
+ BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
out:
ocfs2_free_path(left_path);
trace_ocfs2_divide_leaf_refcount_block(
(unsigned long long)ref_leaf_bh->b_blocknr,
- le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
+ le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
/*
* XXX: Improvement later.
rb = (struct ocfs2_refcount_block *)
prev_bh->b_data;
- if (le64_to_cpu(rb->rf_records.rl_used) +
+ if (le16_to_cpu(rb->rf_records.rl_used) +
recs_add >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
if (prev_bh) {
rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
- if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
+ if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
* one will split a refcount rec, so totally we need
* clusters * 2 new refcount rec.
*/
- if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
+ if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
cluster_ac->ac_bh,
le64_to_cpu(rec->e_blkno),
- le32_to_cpu(rec->e_leaf_clusters));
+ le16_to_cpu(rec->e_leaf_clusters));
if (ret)
mlog_errno(ret);
/* Try all the clusters to free */
{
unsigned int bpc = le16_to_cpu(cl->cl_bpc);
unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
- unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc;
+ unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
if (res->sr_bit_offset < bitoff)
return 0;
#ifndef arch_irq_stat
#define arch_irq_stat() 0
#endif
-#ifndef arch_idle_time
-#define arch_idle_time(cpu) 0
-#endif
+
+#ifdef arch_idle_time
+
+static cputime64_t get_idle_time(int cpu)
+{
+ cputime64_t idle;
+
+ idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
+ idle += arch_idle_time(cpu);
+ return idle;
+}
+
+static cputime64_t get_iowait_time(int cpu)
+{
+ cputime64_t iowait;
+
+ iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+ if (cpu_online(cpu) && nr_iowait_cpu(cpu))
+ iowait += arch_idle_time(cpu);
+ return iowait;
+}
+
+#else
static u64 get_idle_time(int cpu)
{
u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
- if (idle_time == -1ULL) {
+ if (idle_time == -1ULL)
/* !NO_HZ so we can rely on cpustat.idle */
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
- idle += arch_idle_time(cpu);
- } else
+ else
idle = usecs_to_cputime64(idle_time);
return idle;
return iowait;
}
+#endif
+
static int show_stat(struct seq_file *p, void *v)
{
int i, j;
else
parent_sd = &sysfs_root;
+ if (!parent_sd)
+ return -ENOENT;
+
if (sysfs_ns_type(parent_sd))
ns = kobj->ktype->namespace(kobj);
type = sysfs_read_ns_type(kobj);
dup_name = sd->s_name;
sd->s_name = new_name;
- sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
}
/* Move to the appropriate place in the appropriate directories rbtree. */
sysfs_get(new_parent_sd);
sysfs_put(sd->s_parent);
sd->s_ns = new_ns;
+ sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
sd->s_parent = new_parent_sd;
sysfs_link_sibling(sd);
/* Updates may happen before the object has been instantiated */
if (unlikely(update && !kobj->sd))
return -EINVAL;
-
+ if (!grp->attrs) {
+ WARN(1, "sysfs: attrs not set by subsystem for group: %s/%s\n",
+ kobj->name, grp->name ? "" : grp->name);
+ return -EINVAL;
+ }
if (grp->name) {
error = sysfs_create_subdir(kobj, grp->name, &sd);
if (error)
struct drm_exynos_vidi_connection {
unsigned int connection;
unsigned int extensions;
- uint64_t *edid;
+ uint64_t edid;
};
struct drm_exynos_plane_set_zpos {
/* memory type definitions. */
enum e_drm_exynos_gem_mem_type {
/* Physically Non-Continuous memory. */
- EXYNOS_BO_NONCONTIG = 1 << 0
+ EXYNOS_BO_NONCONTIG = 1 << 0,
+ EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG
};
#define DRM_EXYNOS_GEM_CREATE 0x00
struct device dev;
struct resource res;
struct clk *pclk;
- struct regulator *vcore;
u64 dma_mask;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
#define amba_pclk_disable(d) \
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
-#define amba_vcore_enable(d) \
- (IS_ERR((d)->vcore) ? 0 : regulator_enable((d)->vcore))
-
-#define amba_vcore_disable(d) \
- do { if (!IS_ERR((d)->vcore)) regulator_disable((d)->vcore); } while (0)
-
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
#ifndef _SSP_PL022_H
#define _SSP_PL022_H
+#include <linux/types.h>
+
/**
* whether SSP is in loopback mode or not
*/
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
-static inline int queue_is_locked(struct request_queue *q)
+static inline void queue_lockdep_assert_held(struct request_queue *q)
{
-#ifdef CONFIG_SMP
- spinlock_t *lock = q->queue_lock;
- return lock && spin_is_locked(lock);
-#else
- return 1;
-#endif
+ if (q->queue_lock)
+ lockdep_assert_held(q->queue_lock);
}
static inline void queue_flag_set_unlocked(unsigned int flag,
static inline int queue_flag_test_and_clear(unsigned int flag,
struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
if (test_bit(flag, &q->queue_flags)) {
__clear_bit(flag, &q->queue_flags);
static inline int queue_flag_test_and_set(unsigned int flag,
struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
if (!test_bit(flag, &q->queue_flags)) {
__set_bit(flag, &q->queue_flags);
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
__set_bit(flag, &q->queue_flags);
}
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
__clear_bit(flag, &q->queue_flags);
}
d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
}
+static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
/**
* struct irq_chip - hardware interrupt chip descriptor
*
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
-/* This type is the placeholder for a hardware interrupt number. It has to
- * be big enough to enclose whatever representation is used by a given
- * platform.
- */
-typedef unsigned long irq_hw_number_t;
-
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
unsigned int size;
unsigned int *revmap;
} linear;
+ struct {
+ unsigned int max_irq;
+ } nomap;
struct radix_tree_root tree;
} revmap_data;
const struct irq_domain_ops *ops;
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+ unsigned int max_irq,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
extern struct irq_domain *irq_find_host(struct device_node *node);
extern void irq_set_default_host(struct irq_domain *host);
-extern void irq_set_virq_count(unsigned int count);
static inline struct irq_domain *irq_domain_add_legacy_isa(
struct device_node *of_node,
}
extern struct irq_domain *irq_find_host(struct device_node *node);
extern void irq_set_default_host(struct irq_domain *host);
-extern void irq_set_virq_count(unsigned int count);
extern unsigned int irq_create_mapping(struct irq_domain *host,
#include <generated/autoconf.h>
/*
- * Helper macros to use CONFIG_ options in C expressions. Note that
+ * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
* these only work with boolean and tristate options.
*/
/*
+ * Getting something that works in C and CPP for an arg that may or may
+ * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
+ * we match on the placeholder define, insert the "0," for arg1 and generate
+ * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
+ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
+ * the last step cherry picks the 2nd arg, we get a zero.
+ */
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
* 0 otherwise.
*
*/
#define IS_ENABLED(option) \
- (__enabled_ ## option || __enabled_ ## option ## _MODULE)
+ (config_enabled(option) || config_enabled(option##_MODULE))
/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
*/
-#define IS_BUILTIN(option) __enabled_ ## option
+#define IS_BUILTIN(option) config_enabled(option)
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise.
*/
-#define IS_MODULE(option) __enabled_ ## option ## _MODULE
+#define IS_MODULE(option) config_enabled(option##_MODULE)
#endif /* __LINUX_KCONFIG_H */
struct xt_table *table);
/* Check for an extension */
-extern int ip6t_ext_hdr(u8 nexthdr);
+static inline int
+ip6t_ext_hdr(u8 nexthdr)
+{ return (nexthdr == IPPROTO_HOPOPTS) ||
+ (nexthdr == IPPROTO_ROUTING) ||
+ (nexthdr == IPPROTO_FRAGMENT) ||
+ (nexthdr == IPPROTO_ESP) ||
+ (nexthdr == IPPROTO_AH) ||
+ (nexthdr == IPPROTO_NONE) ||
+ (nexthdr == IPPROTO_DSTOPTS);
+}
+
/* find specified header and get offset to it */
extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
int target, unsigned short *fragoff);
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
-#define UPF_IIR_ONCE ((__force upf_t) (1 << 26))
+#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
/* The exact UART type is known and should not be probed. */
#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
union {
__u32 mark;
__u32 dropcount;
+ __u32 avail_size;
};
sk_buff_data_t transport_header;
}
/**
+ * skb_availroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ * allocated by sk_stream_alloc()
+ */
+static inline int skb_availroom(const struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
+}
+
+/**
* skb_reserve - adjust headroom
* @skb: buffer to alter
* @len: bytes to move
#include <linux/compiler.h>
+#ifdef __KERNEL__
+
#undef NULL
-#if defined(__cplusplus)
-#define NULL 0
-#else
#define NULL ((void *)0)
-#endif
-
-#ifdef __KERNEL__
enum {
false = 0,
typedef phys_addr_t resource_size_t;
+/*
+ * This type is the placeholder for a hardware interrupt number. It has to be
+ * big enough to enclose whatever representation is used by a given platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
typedef struct {
int counter;
} atomic_t;
/* parity check flag */
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-enum port_dev_state {
- PORT_UNREGISTERED,
- PORT_REGISTERING,
- PORT_REGISTERED,
- PORT_UNREGISTERING,
-};
-
/* USB serial flags */
#define USB_SERIAL_WRITE_BUSY 0
char throttle_req;
unsigned long sysrq; /* sysrq timeout */
struct device dev;
- enum port_dev_state dev_state;
};
#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
*/
#define VGA_DEFAULT_DEVICE (NULL)
+struct pci_dev;
+
/* For use by clients */
/**
HCI_SERVICE_CACHE,
HCI_LINK_KEYS,
HCI_DEBUG_KEYS,
+ HCI_UNREGISTER,
HCI_LE_SCAN,
HCI_SSP_ENABLED,
#define HCI_DEV_NONE 0xffff
#define HCI_CHANNEL_RAW 0
-#define HCI_CHANNEL_CONTROL 1
#define HCI_CHANNEL_MONITOR 2
+#define HCI_CHANNEL_CONTROL 3
struct hci_filter {
unsigned long type_mask;
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return (test_bit(HCI_SSP_ENABLED, &hdev->flags) &&
+ return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags));
}
static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
{
- u8 field_len;
- size_t parsed;
+ size_t parsed = 0;
- for (parsed = 0; parsed < data_len - 1; parsed += field_len) {
- field_len = data[0];
+ if (data_len < 2)
+ return false;
+
+ while (parsed < data_len - 1) {
+ u8 field_len = data[0];
if (field_len == 0)
break;
#define MGMT_OP_SET_DISCOVERABLE 0x0006
struct mgmt_cp_set_discoverable {
__u8 val;
- __u16 timeout;
+ __le16 timeout;
} __packed;
#define MGMT_SET_DISCOVERABLE_SIZE 3
ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *c)
{
- if (WARN_ON(c->control.rates[0].idx < 0))
+ if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
return NULL;
return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
}
static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
+ if (!cmd->request->rq_disk)
+ return NULL;
+
return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
}
help
This option will show the mapping relationship between hardware irq
numbers and Linux irq numbers. The mapping is exposed via debugfs
- in the file "virq_mapping".
+ in the file "irq_domain_mapping".
If you don't know what this means you don't need it.
static DEFINE_MUTEX(irq_domain_mutex);
static DEFINE_MUTEX(revmap_trees_mutex);
-static unsigned int irq_virq_count = NR_IRQS;
static struct irq_domain *irq_default_domain;
/**
}
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+ unsigned int max_irq,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
- if (domain)
+ if (domain) {
+ domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
irq_domain_add(domain);
+ }
return domain;
}
irq_default_domain = domain;
}
-/**
- * irq_set_virq_count() - Set the maximum number of linux irqs
- * @count: number of linux irqs, capped with NR_IRQS
- *
- * This is mainly for use by platforms like iSeries who want to program
- * the virtual irq number in the controller to avoid the reverse mapping
- */
-void irq_set_virq_count(unsigned int count)
-{
- pr_debug("irq: Trying to set virq count to %d\n", count);
-
- BUG_ON(count < NUM_ISA_INTERRUPTS);
- if (count < NR_IRQS)
- irq_virq_count = count;
-}
-
static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
pr_debug("irq: create_direct virq allocation failed\n");
return 0;
}
- if (virq >= irq_virq_count) {
+ if (virq >= domain->revmap_data.nomap.max_irq) {
pr_err("ERROR: no free irqs available below %i maximum\n",
- irq_virq_count);
+ domain->revmap_data.nomap.max_irq);
irq_free_desc(virq);
return 0;
}
-
pr_debug("irq: create_direct obtained virq %d\n", virq);
if (irq_setup_virq(domain, virq, virq)) {
unsigned int irq_create_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- unsigned int virq, hint;
+ unsigned int hint;
+ int virq;
pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
return irq_domain_legacy_revmap(domain, hwirq);
/* Allocate a virtual interrupt number */
- hint = hwirq % irq_virq_count;
+ hint = hwirq % nr_irqs;
if (hint == 0)
hint++;
virq = irq_alloc_desc_from(hint, 0);
- if (!virq)
+ if (virq <= 0)
virq = irq_alloc_desc_from(1, 0);
- if (!virq) {
+ if (virq <= 0) {
pr_debug("irq: -> virq allocation failed\n");
return 0;
}
irq_hw_number_t hwirq)
{
unsigned int i;
- unsigned int hint = hwirq % irq_virq_count;
+ unsigned int hint = hwirq % nr_irqs;
/* Look for default domain if nececssary */
if (domain == NULL)
if (data && (data->domain == domain) && (data->hwirq == hwirq))
return i;
i++;
- if (i >= irq_virq_count)
+ if (i >= nr_irqs)
i = 1;
} while(i != hint);
return 0;
void *data;
int i;
- seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
- "chip name", "chip data", "domain name");
+ seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
+ "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+ "domain name");
for (i = 1; i < nr_irqs; i++) {
desc = irq_to_desc(i);
seq_printf(m, "%-15s ", p);
data = irq_desc_get_chip_data(desc);
- seq_printf(m, "0x%16p ", data);
+ seq_printf(m, data ? "0x%p " : " %p ", data);
if (desc->irq_data.domain && desc->irq_data.domain->of_node)
p = desc->irq_data.domain->of_node->full_name;
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/irqflags.h>
#include <asm/processor.h>
/*
if (value) {
if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
return -EFAULT;
- } else
- memset((char *) &set_buffer, 0, sizeof(set_buffer));
+ } else {
+ memset(&set_buffer, 0, sizeof(set_buffer));
+ printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
+ " Misfeature support will be removed\n",
+ current->comm);
+ }
error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
if (error || !ovalue)
/*
* Avoid nested stack-dumping if a panic occurs during oops processing
*/
- if (!oops_in_progress)
+ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
dump_stack();
#endif
#
# Timer subsystem related configuration options
#
+
+# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is
+# only related to the tick functionality. Oneshot clockevent devices
+# are supported independ of this.
config TICK_ONESHOT
bool
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+
+ tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
+
if (cpumask_empty(tick_get_broadcast_mask()))
goto end;
- tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
hrtimer_get_expires(&ts->sched_timer), 0))
break;
}
- /* Update jiffies and reread time */
- tick_do_update_jiffies64(now);
+ /* Reread time and update jiffies */
now = ktime_get();
+ tick_do_update_jiffies64(now);
}
}
/* be noisy on error issues */
if (error == -EEXIST)
- printk(KERN_ERR "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ WARN(1, "%s failed for %s with "
+ "-EEXIST, don't try to register things with "
+ "the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- printk(KERN_ERR "%s failed for %s (%d)\n",
- __func__, kobject_name(kobj), error);
- dump_stack();
+ WARN(1, "%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
* so no worry about deadlock.
*/
page = pte_page(entry);
+ get_page(page);
if (page != pagecache_page)
lock_page(page);
}
if (page != pagecache_page)
unlock_page(page);
+ put_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
if (action == CPU_ONLINE)
return NOTIFY_OK;
- if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
for_each_mem_cgroup(iter)
goto try_to_free;
cond_resched();
/* "ret" should also be checked to ensure all lists are empty. */
- } while (memcg->res.usage > 0 || ret);
+ } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
out:
css_put(&memcg->css);
return ret;
lru_add_drain_all();
/* try to free all pages in this cgroup */
shrink = 1;
- while (nr_retries && memcg->res.usage > 0) {
+ while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
int progress;
if (signal_pending(current)) {
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed >= nr_to_reclaim)
- nr_to_reclaim = 0;
- else
- nr_to_reclaim -= nr_reclaimed;
-
- if (!nr_to_reclaim && priority < DEF_PRIORITY)
+ if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
hci_req_lock(hdev);
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
ret = -ERFKILL;
goto done;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
if (chan->retry_count >= chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return;
}
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static void l2cap_retrans_timeout(struct work_struct *work)
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
}
if (la.l2_cid)
- err = l2cap_add_scid(chan, la.l2_cid);
+ err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
else
err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
if (la.l2_cid && la.l2_psm)
return -EINVAL;
- err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
+ err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
+ &la.l2_bdaddr);
if (err)
return err;
if (cp->val) {
type = PAGE_SCAN_TYPE_INTERLACED;
- acp.interval = 0x0024; /* 22.5 msec page scan interval */
+
+ /* 22.5 msec page scan interval */
+ acp.interval = __constant_cpu_to_le16(0x0024);
} else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */
- acp.interval = 0x0800; /* default 1.28 sec page scan */
+
+ /* default 1.28 sec page scan */
+ acp.interval = __constant_cpu_to_le16(0x0800);
}
- acp.window = 0x0012; /* default 11.25 msec page scan window */
+ /* default 11.25 msec page scan window */
+ acp.window = __constant_cpu_to_le16(0x0012);
err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
&acp);
name, name_len);
if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
- eir_len = eir_append_data(&ev->eir[eir_len], eir_len,
+ eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, dev_class, 3);
put_unaligned_le16(eir_len, &ev->eir_len);
hlist_del_rcu(&mp->hlist[mdb->ver]);
mdb->size--;
- del_timer(&mp->query_timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
out:
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
- del_timer(&p->query_timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
return NULL;
}
-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
-{
- struct net_bridge *br = mp->br;
- struct sk_buff *skb;
-
- skb = br_multicast_alloc_query(br, &mp->addr);
- if (!skb)
- goto timer;
-
- netif_rx(skb);
-
-timer:
- if (++mp->queries_sent < br->multicast_last_member_count)
- mod_timer(&mp->query_timer,
- jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_group_query_expired(unsigned long data)
-{
- struct net_bridge_mdb_entry *mp = (void *)data;
- struct net_bridge *br = mp->br;
-
- spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || !mp->mglist ||
- mp->queries_sent >= br->multicast_last_member_count)
- goto out;
-
- br_multicast_send_group_query(mp);
-
-out:
- spin_unlock(&br->multicast_lock);
-}
-
-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
-{
- struct net_bridge_port *port = pg->port;
- struct net_bridge *br = port->br;
- struct sk_buff *skb;
-
- skb = br_multicast_alloc_query(br, &pg->addr);
- if (!skb)
- goto timer;
-
- br_deliver(port, skb);
-
-timer:
- if (++pg->queries_sent < br->multicast_last_member_count)
- mod_timer(&pg->query_timer,
- jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_port_group_query_expired(unsigned long data)
-{
- struct net_bridge_port_group *pg = (void *)data;
- struct net_bridge_port *port = pg->port;
- struct net_bridge *br = port->br;
-
- spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
- pg->queries_sent >= br->multicast_last_member_count)
- goto out;
-
- br_multicast_send_port_group_query(pg);
-
-out:
- spin_unlock(&br->multicast_lock);
-}
-
static struct net_bridge_mdb_entry *br_multicast_get_group(
struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group, int hash)
mp->addr = *group;
setup_timer(&mp->timer, br_multicast_group_expired,
(unsigned long)mp);
- setup_timer(&mp->query_timer, br_multicast_group_query_expired,
- (unsigned long)mp);
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
mdb->size++;
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
- setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
- (unsigned long)p);
rcu_assign_pointer(*pp, p);
time_after(mp->timer.expires, time) :
try_to_del_timer_sync(&mp->timer) >= 0)) {
mod_timer(&mp->timer, time);
-
- mp->queries_sent = 0;
- mod_timer(&mp->query_timer, now);
}
goto out;
time_after(p->timer.expires, time) :
try_to_del_timer_sync(&p->timer) >= 0)) {
mod_timer(&p->timer, time);
-
- p->queries_sent = 0;
- mod_timer(&p->query_timer, now);
}
break;
hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
hlist[ver]) {
del_timer(&mp->timer);
- del_timer(&mp->query_timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group);
}
}
struct hlist_node mglist;
struct rcu_head rcu;
struct timer_list timer;
- struct timer_list query_timer;
struct br_ip addr;
- u32 queries_sent;
};
struct net_bridge_mdb_entry
struct net_bridge_port_group __rcu *ports;
struct rcu_head rcu;
struct timer_list timer;
- struct timer_list query_timer;
struct br_ip addr;
bool mglist;
- u32 queries_sent;
};
struct net_bridge_mdb_htable
goto adjust_others;
}
- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+ data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ gfp_mask);
if (!data)
goto nodata;
+ size = SKB_WITH_OVERHEAD(ksize(data));
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void.
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (iph == NULL)
- return -NF_DROP;
+ return -NF_ACCEPT;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though. */
if (iph->frag_off & htons(IP_OFFSET))
- return -NF_DROP;
+ return -NF_ACCEPT;
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
+ /* Check bogus IP headers */
+ if (*dataoff > skb->len) {
+ pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
+ "nhoff %u, ihl %u, skblen %u\n",
+ nhoff, iph->ihl << 2, skb->len);
+ return -NF_ACCEPT;
+ }
+
return NF_ACCEPT;
}
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
if (skb) {
if (sk_wmem_schedule(sk, skb->truesize)) {
+ skb_reserve(skb, sk->sk_prot->max_header);
/*
* Make sure that we have exactly size bytes
* available to the caller, no more, no less.
*/
- skb_reserve(skb, skb_tailroom(skb) - size);
+ skb->avail_size = size;
return skb;
}
__kfree_skb(skb);
copy = seglen;
/* Where to copy to? */
- if (skb_tailroom(skb) > 0) {
+ if (skb_availroom(skb) > 0) {
/* We have some space in skb head. Superb! */
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ copy = min_t(int, copy, skb_availroom(skb));
err = skb_add_data_nocache(sk, skb, from, copy);
if (err)
goto do_fault;
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
- limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
- limit = max(limit, 128UL);
+ limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
if (!win_dep) {
m -= (new_sample >> 3);
new_sample += m;
- } else if (m < new_sample)
- new_sample = m << 3;
+ } else {
+ m <<= 3;
+ if (m < new_sample)
+ new_sample = m;
+ }
} else {
/* No previous measure. */
new_sample = m << 3;
/* Punt if not enough space exists in the first SKB for
* the data in the second
*/
- if (skb->len > skb_tailroom(to))
+ if (skb->len > skb_availroom(to))
break;
if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
Hence the start of any table is given by get_table() below. */
-/* Check for an extension */
-int
-ip6t_ext_hdr(u8 nexthdr)
-{
- return (nexthdr == IPPROTO_HOPOPTS) ||
- (nexthdr == IPPROTO_ROUTING) ||
- (nexthdr == IPPROTO_FRAGMENT) ||
- (nexthdr == IPPROTO_ESP) ||
- (nexthdr == IPPROTO_AH) ||
- (nexthdr == IPPROTO_NONE) ||
- (nexthdr == IPPROTO_DSTOPTS);
-}
-
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static inline bool
EXPORT_SYMBOL(ip6t_register_table);
EXPORT_SYMBOL(ip6t_unregister_table);
EXPORT_SYMBOL(ip6t_do_table);
-EXPORT_SYMBOL(ip6t_ext_hdr);
EXPORT_SYMBOL(ipv6_find_hdr);
module_init(ip6_tables_init);
*/
printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
sdata->name, ifmgd->bssid);
- assoc_data->timeout = jiffies +
- TU_TO_EXP_TIME(req->bss->beacon_interval);
+ assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
} else {
assoc_data->have_beacon = true;
assoc_data->sent_assoc = false;
return 0;
err_timeout:
- nf_conntrack_timeout_fini(net);
+ nf_conntrack_ecache_fini(net);
err_ecache:
nf_conntrack_tstamp_fini(net);
err_tstamp:
* Let's try to use the data from the packet.
*/
sender->td_end = end;
- win <<= sender->td_scale;
- sender->td_maxwin = (win == 0 ? 1 : win);
+ swin = win << sender->td_scale;
+ sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
/*
* We haven't seen traffic in the other direction yet
while (remaining_len > 0) {
- frag_len = min_t(u16, local->remote_miu, remaining_len);
+ frag_len = min_t(size_t, local->remote_miu, remaining_len);
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
release_sock(sk);
remaining_len -= frag_len;
- msg_ptr += len;
+ msg_ptr += frag_len;
}
kfree(msg_data);
goto bad_res;
}
+ if (!netif_running(netdev)) {
+ result = -ENETDOWN;
+ goto bad_res;
+ }
+
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
rem_txq_params) {
.doit = nl80211_get_key,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.doit = nl80211_set_beacon,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.doit = nl80211_start_ap,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.doit = nl80211_stop_ap,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_set_station,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_del_station,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_del_mpath,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_set_bss,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_get_mesh_config,
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_setdel_pmksa,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_setdel_pmksa,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_flush_pmksa,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
.doit = nl80211_probe_client,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
if (cmd == SIOCSIWENCODEEXT) {
struct iw_encode_ext *ee = (void *) extra;
- if (iwp->length < sizeof(*ee) + ee->key_len)
- return -EFAULT;
+ if (iwp->length < sizeof(*ee) + ee->key_len) {
+ err = -EFAULT;
+ goto out;
+ }
}
}
"No space is necessary after a cast\n" . $hereprev);
}
- if ($rawline =~ /^\+[ \t]*\/\*[ \t]*$/ &&
- $prevrawline =~ /^\+[ \t]*$/) {
- CHK("BLOCK_COMMENT_STYLE",
- "Don't begin block comments with only a /* line, use /* comment...\n" . $hereprev);
- }
-
# check for spaces at the beginning of a line.
# Exceptions:
# 1) within comments
};
/*
- * Generate the __enabled_CONFIG_* and __enabled_CONFIG_*_MODULE macros for
- * use by the IS_{ENABLED,BUILTIN,MODULE} macros. The _MODULE variant is
- * generated even for booleans so that the IS_ENABLED() macro works.
- */
-static void
-header_print__enabled_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg)
-{
-
- switch (sym->type) {
- case S_BOOLEAN:
- case S_TRISTATE: {
- fprintf(fp, "#define __enabled_" CONFIG_ "%s %d\n",
- sym->name, (*value == 'y'));
- fprintf(fp, "#define __enabled_" CONFIG_ "%s_MODULE %d\n",
- sym->name, (*value == 'm'));
- break;
- }
- default:
- break;
- }
-}
-
-static struct conf_printer header__enabled_printer_cb =
-{
- .print_symbol = header_print__enabled_symbol,
- .print_comment = header_print_comment,
-};
-
-/*
* Tristate printer
*
* This printer is used when generating the `include/config/tristate.conf' file.
conf_write_heading(out_h, &header_printer_cb, NULL);
for_all_symbols(i, sym) {
- if (!sym->name)
- continue;
-
sym_calc_value(sym);
-
- conf_write_symbol(out_h, sym, &header__enabled_printer_cb, NULL);
-
- if (!(sym->flags & SYMBOL_WRITE))
+ if (!(sym->flags & SYMBOL_WRITE) || !sym->name)
continue;
+ /* write symbol to auto.conf, tristate and header files */
conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1);
conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1);
for (;;) {
badness = fill_and_eval_dacs(codec, fill_hardwired,
fill_mio_first);
- if (badness < 0)
+ if (badness < 0) {
+ kfree(best_cfg);
return badness;
+ }
debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n",
cfg->line_out_type, fill_hardwired, fill_mio_first,
badness);
cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
fill_hardwired = true;
continue;
- }
+ }
if (cfg->hp_outs > 0 &&
cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
cfg->speaker_outs = cfg->line_outs;
cfg->line_out_type = AUTO_PIN_HP_OUT;
fill_hardwired = true;
continue;
- }
+ }
break;
}
static int alc880_parse_auto_config(struct hda_codec *codec)
{
static const hda_nid_t alc880_ignore[] = { 0x1d, 0 };
- static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 };
+ static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 };
return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids);
}
{ 0x16, 0x99130111 }, /* CLFE speaker */
{ 0x17, 0x99130112 }, /* surround speaker */
{ }
- }
+ },
+ .chained = true,
+ .chain_id = ALC882_FIXUP_GPIO1,
},
[ALC882_FIXUP_ACER_ASPIRE_8930G] = {
.type = ALC_FIXUP_PINS,
{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
{ }
- }
+ },
+ .chained = true,
+ .chain_id = ALC882_FIXUP_GPIO1,
},
[ALC885_FIXUP_MACPRO_GPIO] = {
.type = ALC_FIXUP_FUNC,
ALC882_FIXUP_ACER_ASPIRE_4930G),
SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
+ SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G),
SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
+ SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
{}
};
+static const struct alc_model_fixup alc882_fixup_models[] = {
+ {.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
+ {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
+ {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
+ {}
+};
+
/*
* BIOS auto configuration
*/
if (err < 0)
goto error;
- alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups);
+ alc_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
+ alc882_fixups);
alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
alc_auto_parse_customize_define(codec);
* Basically the device should work as is without the fixup table.
* If BIOS doesn't give a proper info, enable the corresponding
* fixup entry.
- */
+ */
SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
ALC269_FIXUP_AMIC),
SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
{
if (action == ALC_FIXUP_ACT_PRE_PROBE)
codec->no_jack_detect = 1;
-}
+}
static const struct alc_fixup alc861_fixups[] = {
[ALC861_FIXUP_FSC_AMILO_PI1505] = {
* Basically the device should work as is without the fixup table.
* If BIOS doesn't give a proper info, enable the corresponding
* fixup entry.
- */
+ */
SND_PCI_QUIRK(0x1043, 0x1000, "ASUS N50Vm", ALC662_FIXUP_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x1092, "ASUS NB", ALC662_FIXUP_ASUS_MODE3),
SND_PCI_QUIRK(0x1043, 0x1173, "ASUS K73Jn", ALC662_FIXUP_ASUS_MODE1),
cscope*
config.mak
config.mak.autogen
+*-bison.*
+*-flex.*
FLEX = $(CROSS_COMPILE)flex
BISON= $(CROSS_COMPILE)bison
-event-parser:
- $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/parse-events-flex.c: util/parse-events.l
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
-$(OUTPUT)util/parse-events-flex.c: event-parser
-$(OUTPUT)util/parse-events-bison.c: event-parser
+$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
+ $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
-pmu-parser:
- $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+$(OUTPUT)util/pmu-flex.c: util/pmu.l
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
-$(OUTPUT)util/pmu-flex.c: pmu-parser
-$(OUTPUT)util/pmu-bison.c: pmu-parser
+$(OUTPUT)util/pmu-bison.c: util/pmu.y
+ $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
-$(OUTPUT)util/parse-events.o: event-parser pmu-parser
+$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
LIB_FILE=$(OUTPUT)libperf.a
endif
ifdef NO_GTK2
- BASIC_CFLAGS += -DNO_GTK2
+ BASIC_CFLAGS += -DNO_GTK2_SUPPORT
else
FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
@echo ' html - make html documentation'
@echo ' info - make GNU info documentation (access with info <foo>)'
@echo ' pdf - make pdf documentation'
- @echo ' event-parser - make event parser code'
- @echo ' pmu-parser - make pmu format parser code'
@echo ' TAGS - use etags to make tag information for source browsing'
@echo ' tags - use ctags to make tag information for source browsing'
@echo ' cscope - use cscope to make interactive browsing database'
#include "util/debug.h"
#include <sys/prctl.h>
+#include <sys/resource.h>
#include <semaphore.h>
#include <pthread.h>
#include "util/debug.h"
#include <assert.h>
+#include <elf.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
+#include <sys/utsname.h>
#include <sys/mman.h>
#include <linux/unistd.h>
symbol__annotate_zero_histograms(sym);
}
+static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
+{
+ struct utsname uts;
+ int err = uname(&uts);
+
+ ui__warning("Out of bounds address found:\n\n"
+ "Addr: %" PRIx64 "\n"
+ "DSO: %s %c\n"
+ "Map: %" PRIx64 "-%" PRIx64 "\n"
+ "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
+ "Arch: %s\n"
+ "Kernel: %s\n"
+ "Tools: %s\n\n"
+ "Not all samples will be on the annotation output.\n\n"
+ "Please report to linux-kernel@vger.kernel.org\n",
+ ip, map->dso->long_name, dso__symtab_origin(map->dso),
+ map->start, map->end, sym->start, sym->end,
+ sym->binding == STB_GLOBAL ? 'g' :
+ sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
+ err ? "[unknown]" : uts.machine,
+ err ? "[unknown]" : uts.release, perf_version_string);
+ if (use_browser <= 0)
+ sleep(5);
+
+ map->erange_warned = true;
+}
+
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
int counter, u64 ip)
{
struct annotation *notes;
struct symbol *sym;
+ int err;
if (he == NULL || he->ms.sym == NULL ||
((top->sym_filter_entry == NULL ||
}
ip = he->ms.map->map_ip(he->ms.map, ip);
- symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
+ err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
pthread_mutex_unlock(¬es->lock);
+
+ if (err == -ERANGE && !he->ms.map->erange_warned)
+ ui__warn_map_erange(he->ms.map, sym, ip);
}
static void perf_top__show_details(struct perf_top *top)
/* Tag samples to be skipped. */
static const char *skip_symbols[] = {
+ "intel_idle",
"default_idle",
"native_safe_halt",
"cpu_idle",
fi
MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
+PERF_BUILDID_LINKDIR=$(readlink -f $PERF_BUILDID_DIR)/
cut -d ' ' -f 1 $BUILDIDS | \
while read build_id ; do
linkname=$PERF_BUILDID_DIR.build-id/${build_id:0:2}/${build_id:2}
filename=$(readlink -f $linkname)
echo ${linkname#$PERF_BUILDID_DIR} >> $MANIFEST
- echo ${filename#$PERF_BUILDID_DIR} >> $MANIFEST
+ echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST
done
tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
- if (addr > sym->end)
- return 0;
+ if (addr < sym->start || addr > sym->end)
+ return -ERANGE;
offset = addr - sym->start;
h = annotation__histogram(notes, evidx);
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
- struct objdump_line *pos;
- int len = sym->end - sym->start;
+ int len = sym->end - sym->start, offset;
h->sum = 0;
-
- list_for_each_entry(pos, ¬es->src->source, node) {
- if (pos->offset != -1 && pos->offset < len) {
- h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
- h->sum += h->addr[pos->offset];
- }
+ for (offset = 0; offset < len; ++offset) {
+ h->addr[offset] = h->addr[offset] * 7 / 8;
+ h->sum += h->addr[offset];
}
}
if (!cmp) {
he->period += period;
++he->nr_events;
+
+ /* If the map of an existing hist_entry has
+ * become out-of-date due to an exec() or
+ * similar, update it. Otherwise we will
+ * mis-adjust symbol addresses when computing
+ * the history counter to increment.
+ */
+ if (he->ms.map != entry->ms.map) {
+ he->ms.map = entry->ms.map;
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+ }
goto out;
}
RB_CLEAR_NODE(&self->rb_node);
self->groups = NULL;
self->referenced = false;
+ self->erange_warned = false;
}
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
u64 end;
u8 /* enum map_type */ type;
bool referenced;
+ bool erange_warned;
u32 priv;
u64 pgoff;
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
- return perf_session__find_machine(session, event->ip.pid);
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ u32 pid;
+
+ if (event->header.type == PERF_RECORD_MMAP)
+ pid = event->mmap.pid;
+ else
+ pid = event->ip.pid;
+
+ return perf_session__find_machine(session, pid);
+ }
return perf_session__find_host_machine(session);
}
dump_sample(session, event, sample);
if (evsel == NULL) {
++session->hists.stats.nr_unknown_id;
- return -1;
+ return 0;
}
if (machine == NULL) {
++session->hists.stats.nr_unprocessable_samples;
- return -1;
+ return 0;
}
return tool->sample(tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
static bool map_symbol__toggle_fold(struct map_symbol *self)
{
+ if (!self)
+ return false;
+
if (!self->has_children)
return false;