S: D-64295
S: Germany
+N: Avi Kivity
+E: avi.kivity@gmail.com
+D: Kernel-based Virtual Machine (KVM)
+S: Ra'annana, Israel
+
N: Andi Kleen
E: andi@firstfloor.org
U: http://www.halobates.de
- semantics and behavior of local atomic operations.
lockdep-design.txt
- documentation on the runtime locking correctness validator.
+lockup-watchdogs.txt
+ - info on soft and hard lockup detectors (aka nmi_watchdog).
logo.gif
- full colour GIF image of Linux logo (penguin - Tux).
logo.txt
- directory with information on the NetLabel subsystem.
networking/
- directory with info on various aspects of networking with Linux.
-nmi_watchdog.txt
- - info on NMI watchdog for SMP systems.
nommu-mmap.txt
- documentation about no-mmu memory mapping support.
numastat.txt
-----------------------------------------------------------------------
0000000000000000 0000007fffffffff 512GB user
-ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc
+ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc
-ffffffbbfffd0000 ffffffbcfffdffff 64KB [guard page]
+ffffffbbffff0000 ffffffbbffffffff 64KB [guard page]
-ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O space
+ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
-ffffffbbffff0000 ffffffbcffffffff 64KB [guard page]
+ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
-ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
+ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
-ffffffbe00000000 ffffffbffbffffff ~8GB [guard, future vmmemap]
+ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
ffffffbffc000000 ffffffbfffffffff 64MB modules
5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+Please note that unlike the global swappiness, memcg knob set to 0
+really prevents from any swapping even if there is a swap storage
+available. This might lead to memcg OOM killer if there are no file
+pages to reclaim.
Following cgroups' swappiness can't be changed.
- root cgroup (uses /proc/sys/vm/swappiness).
shared across all System Controller members.
TC/TCLIB Timer required properties:
-- compatible: Should be "atmel,<chip>-pit".
+- compatible: Should be "atmel,<chip>-tcb".
<chip> can be "at91rm9200" or "at91sam9x5"
- reg: Should contain registers location and length
- interrupts: Should contain all interrupts for the TC block
--- /dev/null
+* EETI eGalax Multiple Touch Controller
+
+Required properties:
+- compatible: must be "eeti,egalax_ts"
+- reg: i2c slave address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: touch controller interrupt
+- wakeup-gpios: the gpio pin to be used for waking up the controller
+ as well as uased as irq pin
+
+Example:
+
+ egalax_ts@04 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 2>;
+ wakeup-gpios = <&gpio1 9 0>;
+ };
MDC, MDIO.
+Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
+node.
+
Example:
-mdio {
+aliases {
+ mdio-gpio0 = <&mdio0>;
+};
+
+mdio0: mdio {
compatible = "virtual,mdio-gpio";
#address-cells = <1>;
#size-cells = <0>;
With some exceptions, these support nvidia,high-speed-mode,
nvidia,schmitt, nvidia,low-power-mode, nvidia,pull-down-strength,
- nvidia,pull-up-strength, nvidia,slew_rate-rising, nvidia,slew_rate-falling.
+ nvidia,pull-up-strength, nvidia,slew-rate-rising, nvidia,slew-rate-falling.
drive_ao1, drive_ao2, drive_at1, drive_at2, drive_cdev1, drive_cdev2,
drive_csus, drive_dap1, drive_dap2, drive_dap3, drive_dap4, drive_dbg,
drive groups:
These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
- nvidia,slew_rate-rising, nvidia,slew_rate-falling. Most but not all
+ nvidia,slew-rate-rising, nvidia,slew-rate-falling. Most but not all
support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode.
ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, cec, crt, csus, dap1,
2 Modifying System Parameters
3 Per-Process Parameters
- 3.1 /proc/<pid>/oom_score_adj - Adjust the oom-killer
+ 3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer
score
3.2 /proc/<pid>/oom_score - Display current oom-killer score
3.3 /proc/<pid>/io - Display the IO accounting fields
CHAPTER 3: PER-PROCESS PARAMETERS
------------------------------------------------------------------------------
-3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score
+3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score
--------------------------------------------------------------------------------
-This file can be used to adjust the badness heuristic used to select which
+These file can be used to adjust the badness heuristic used to select which
process gets killed in out of memory conditions.
The badness heuristic assigns a value to each candidate task ranging from 0
equivalent to discounting 50% of the task's allowed memory from being considered
as scoring against the task.
+For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also
+be used to tune the badness score. Its acceptable values range from -16
+(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17
+(OOM_DISABLE) to disable oom killing entirely for that task. Its value is
+scaled linearly with /proc/<pid>/oom_score_adj.
+
The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
requires CAP_SYS_RESOURCE.
-------------------------------------------------------------
This file can be used to check the current score used by the oom-killer is for
-any given <pid>.
+any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which
+process should be killed in an out-of-memory situation.
+
3.3 /proc/<pid>/io - Display the IO accounting fields
-------------------------------------------------------
High level behavior (mixed):
============================
- kernel(driver): calls request_firmware(&fw_entry, $FIRMWARE, device)
-
- userspace:
+ 1), kernel(driver):
+ - calls request_firmware(&fw_entry, $FIRMWARE, device)
+ - kernel searchs the fimware image with name $FIRMWARE directly
+ in the below search path of root filesystem:
+ "/lib/firmware/updates/" UTS_RELEASE,
+ "/lib/firmware/updates",
+ "/lib/firmware/" UTS_RELEASE,
+ "/lib/firmware"
+ - If found, goto 7), else goto 2)
+
+ 2), userspace:
- /sys/class/firmware/xxx/{loading,data} appear.
- hotplug gets called with a firmware identifier in $FIRMWARE
and the usual hotplug environment.
- hotplug: echo 1 > /sys/class/firmware/xxx/loading
- kernel: Discard any previous partial load.
+ 3), kernel: Discard any previous partial load.
- userspace:
+ 4), userspace:
- hotplug: cat appropriate_firmware_image > \
/sys/class/firmware/xxx/data
- kernel: grows a buffer in PAGE_SIZE increments to hold the image as it
+ 5), kernel: grows a buffer in PAGE_SIZE increments to hold the image as it
comes in.
- userspace:
+ 6), userspace:
- hotplug: echo 0 > /sys/class/firmware/xxx/loading
- kernel: request_firmware() returns and the driver has the firmware
+ 7), kernel: request_firmware() returns and the driver has the firmware
image in fw_entry->{data,size}. If something went wrong
request_firmware() returns non-zero and fw_entry is set to
NULL.
- kernel(driver): Driver code calls release_firmware(fw_entry) releasing
+ 8), kernel(driver): Driver code calls release_firmware(fw_entry) releasing
the firmware image and any related resource.
High level behavior (driver code):
BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
(not yet published)
-Author: Andreas Herrmann <andreas.herrmann3@amd.com>
+Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
Description
-----------
This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
-PROMISC mdoe.
+PROMISC mode.
# ip link delete vxlan0
3. Show vxlan info
- # ip -d show vxlan0
+ # ip -d link show vxlan0
It is possible to create, destroy and display the vxlan
forwarding table using the new bridge command.
# bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
2. Delete forwarding table entry
- # bridge fdb delete 00:17:42:8a:b4:05
+ # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
3. Show forwarding table
# bridge fdb show dev vxlan0
--- /dev/null
+Chinese translated version of Documentation/IRQ.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Eric W. Biederman <ebiederman@xmission.com>
+Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+---------------------------------------------------------------------
+Documentation/IRQ.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
+交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
+译存在问题,请联系中文版维护者。
+英文版维护者: Eric W. Biederman <ebiederman@xmission.com>
+中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+
+
+以下为正文
+---------------------------------------------------------------------
+何为 IRQ?
+
+一个 IRQ 是来自某个设备的一个中断请求。目前,它们可以来自一个硬件引脚,
+或来自一个数据包。多个设备可能连接到同个硬件引脚,从而共享一个 IRQ。
+
+一个 IRQ 编号是用于告知硬件中断源的内核标识。通常情况下,这是一个
+全局 irq_desc 数组的索引,但是除了在 linux/interrupt.h 中的实现,
+具体的细节是体系结构特定的。
+
+一个 IRQ 编号是设备上某个可能的中断源的枚举。通常情况下,枚举的编号是
+该引脚在系统内中断控制器的所有输入引脚中的编号。对于 ISA 总线中的情况,
+枚举的是在两个 i8259 中断控制器中 16 个输入引脚。
+
+架构可以对 IRQ 编号指定额外的含义,在硬件涉及任何手工配置的情况下,
+是被提倡的。ISA 的 IRQ 是一个分配这类额外含义的典型例子。
--- /dev/null
+Chinese translated version of Documentation/arm64/booting.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Will Deacon <will.deacon@arm.com>
+Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+---------------------------------------------------------------------
+Documentation/arm64/booting.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
+交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
+译存在问题,请联系中文版维护者。
+
+英文版维护者: Will Deacon <will.deacon@arm.com>
+中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+
+以下为正文
+---------------------------------------------------------------------
+ 启动 AArch64 Linux
+ ==================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2012 年 09 月 07 日
+
+本文档基于 Russell King 的 ARM 启动文档,且适用于所有公开发布的
+AArch64 Linux 内核代码。
+
+AArch64 异常模型由多个异常级别(EL0 - EL3)组成,对于 EL0 和 EL1
+异常级有对应的安全和非安全模式。EL2 是系统管理级,且仅存在于
+非安全模式下。EL3 是最高特权级,且仅存在于安全模式下。
+
+基于本文档的目的,我们将简单地使用‘引导装载程序’(‘boot loader’)
+这个术语来定义在将控制权交给 Linux 内核前 CPU 上执行的所有软件。
+这可能包含安全监控和系统管理代码,或者它可能只是一些用于准备最小启动
+环境的指令。
+
+基本上,引导装载程序(至少)应实现以下操作:
+
+1、设置和初始化 RAM
+2、设置设备树数据
+3、解压内核映像
+4、调用内核映像
+
+
+1、设置和初始化 RAM
+-----------------
+
+必要性: 强制
+
+引导装载程序应该找到并初始化系统中所有内核用于保持系统变量数据的 RAM。
+这个操作的执行是设备依赖的。(它可能使用内部算法来自动定位和计算所有
+RAM,或可能使用对这个设备已知的 RAM 信息,还可能使用任何引导装载程序
+设计者想到的匹配方法。)
+
+
+2、设置设备树数据
+---------------
+
+必要性: 强制
+
+设备树数据块(dtb)大小必须不大于 2 MB,且位于从内核映像起始算起第一个
+512MB 内的 2MB 边界上。这使得内核可以通过初始页表中的单个节描述符来
+映射此数据块。
+
+
+3、解压内核映像
+-------------
+
+必要性: 可选
+
+AArch64 内核当前没有提供自解压代码,因此如果使用了压缩内核映像文件
+(比如 Image.gz),则需要通过引导装载程序(使用 gzip 等)来进行解压。
+若引导装载程序没有实现这个需求,就要使用非压缩内核映像文件。
+
+
+4、调用内核映像
+-------------
+
+必要性: 强制
+
+已解压的内核映像包含一个 32 字节的头,内容如下:
+
+ u32 magic = 0x14000008; /* 跳转到 stext, 小端 */
+ u32 res0 = 0; /* 保留 */
+ u64 text_offset; /* 映像装载偏移 */
+ u64 res1 = 0; /* 保留 */
+ u64 res2 = 0; /* 保留 */
+
+映像必须位于系统 RAM 起始处的特定偏移(当前是 0x80000)。系统 RAM
+的起始地址必须是以 2MB 对齐的。
+
+在跳转入内核前,必须符合以下状态:
+
+- 停止所有 DMA 设备,这样内存数据就不会因为虚假网络包或磁盘数据而
+ 被破坏。这可能可以节省你许多的调试时间。
+
+- 主 CPU 通用寄存器设置
+ x0 = 系统 RAM 中设备树数据块(dtb)的物理地址。
+ x1 = 0 (保留,将来可能使用)
+ x2 = 0 (保留,将来可能使用)
+ x3 = 0 (保留,将来可能使用)
+
+- CPU 模式
+ 所有形式的中断必须在 PSTATE.DAIF 中被屏蔽(Debug、SError、IRQ
+ 和 FIQ)。
+ CPU 必须处于 EL2(推荐,可访问虚拟化扩展)或非安全 EL1 模式下。
+
+- 高速缓存、MMU
+ MMU 必须关闭。
+ 指令缓存开启或关闭都可以。
+ 数据缓存必须关闭且无效。
+ 外部高速缓存(如果存在)必须配置并禁用。
+
+- 架构计时器
+ CNTFRQ 必须设定为计时器的频率。
+ 如果在 EL1 模式下进入内核,则 CNTHCTL_EL2 中的 EL1PCTEN (bit 0)
+ 必须置位。
+
+- 一致性
+ 通过内核启动的所有 CPU 在内核入口地址上必须处于相同的一致性域中。
+ 这可能要根据具体实现来定义初始化过程,以使能每个CPU上对维护操作的
+ 接收。
+
+- 系统寄存器
+ 在进入内核映像的异常级中,所有构架中可写的系统寄存器必须通过软件
+ 在一个更高的异常级别下初始化,以防止在 未知 状态下运行。
+
+引导装载程序必须在每个 CPU 处于以下状态时跳入内核入口:
+
+- 主 CPU 必须直接跳入内核映像的第一条指令。通过此 CPU 传递的设备树
+ 数据块必须在每个 CPU 节点中包含以下内容:
+
+ 1、‘enable-method’属性。目前,此字段支持的值仅为字符串“spin-table”。
+
+ 2、‘cpu-release-addr’标识一个 64-bit、初始化为零的内存位置。
+
+ 引导装载程序必须生成这些设备树属性,并在跳入内核入口之前将其插入
+ 数据块。
+
+- 任何辅助 CPU 必须在内存保留区(通过设备树中的 /memreserve/ 域传递
+ 给内核)中自旋于内核之外,轮询它们的 cpu-release-addr 位置(必须
+ 包含在保留区中)。可通过插入 wfe 指令来降低忙循环开销,而主 CPU 将
+ 发出 sev 指令。当对 cpu-release-addr 所指位置的读取操作返回非零值
+ 时,CPU 必须直接跳入此值所指向的地址。
+
+- 辅助 CPU 通用寄存器设置
+ x0 = 0 (保留,将来可能使用)
+ x1 = 0 (保留,将来可能使用)
+ x2 = 0 (保留,将来可能使用)
+ x3 = 0 (保留,将来可能使用)
--- /dev/null
+Chinese translated version of Documentation/arm64/memory.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Catalin Marinas <catalin.marinas@arm.com>
+Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+---------------------------------------------------------------------
+Documentation/arm64/memory.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
+交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
+译存在问题,请联系中文版维护者。
+
+英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
+中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+
+以下为正文
+---------------------------------------------------------------------
+ Linux 在 AArch64 中的内存布局
+ ===========================
+
+作者: Catalin Marinas <catalin.marinas@arm.com>
+日期: 2012 年 02 月 20 日
+
+本文档描述 AArch64 Linux 内核所使用的虚拟内存布局。此构架可以实现
+页大小为 4KB 的 4 级转换表和页大小为 64KB 的 3 级转换表。
+
+AArch64 Linux 使用页大小为 4KB 的 3 级转换表配置,对于用户和内核
+都有 39-bit (512GB) 的虚拟地址空间。对于页大小为 64KB的配置,仅
+使用 2 级转换表,但内存布局相同。
+
+用户地址空间的 63:39 位为 0,而内核地址空间的相应位为 1。TTBRx 的
+选择由虚拟地址的 63 位给出。swapper_pg_dir 仅包含内核(全局)映射,
+而用户 pgd 仅包含用户(非全局)映射。swapper_pgd_dir 地址被写入
+TTBR1 中,且从不写入 TTBR0。
+
+
+AArch64 Linux 内存布局:
+
+起始地址 结束地址 大小 用途
+-----------------------------------------------------------------------
+0000000000000000 0000007fffffffff 512GB 用户空间
+
+ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc
+
+ffffffbbfffd0000 ffffffbcfffdffff 64KB [防护页]
+
+ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O 空间
+
+ffffffbbffff0000 ffffffbcffffffff 64KB [防护页]
+
+ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
+
+ffffffbe00000000 ffffffbffbffffff ~8GB [防护页,未来用于 vmmemap]
+
+ffffffbffc000000 ffffffbfffffffff 64MB 模块
+
+ffffffc000000000 ffffffffffffffff 256GB 内存空间
+
+
+4KB 页大小的转换表查找:
+
++--------+--------+--------+--------+--------+--------+--------+--------+
+|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
++--------+--------+--------+--------+--------+--------+--------+--------+
+ | | | | | |
+ | | | | | v
+ | | | | | [11:0] 页内偏移
+ | | | | +-> [20:12] L3 索引
+ | | | +-----------> [29:21] L2 索引
+ | | +---------------------> [38:30] L1 索引
+ | +-------------------------------> [47:39] L0 索引 (未使用)
+ +-------------------------------------------------> [63] TTBR0/1
+
+
+64KB 页大小的转换表查找:
+
++--------+--------+--------+--------+--------+--------+--------+--------+
+|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
++--------+--------+--------+--------+--------+--------+--------+--------+
+ | | | | |
+ | | | | v
+ | | | | [15:0] 页内偏移
+ | | | +----------> [28:16] L3 索引
+ | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
+ | +-------------------------------> [47:42] L1 索引 (未使用)
+ +-------------------------------------------------> [63] TTBR0/1
F: include/linux/altera_jtaguart.h
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
-M: Andreas Herrmann <andreas.herrmann3@amd.com>
+M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/fam15h_power
F: arch/x86/include/asm/geode.h
AMD IOMMU (AMD-VI)
-M: Joerg Roedel <joerg.roedel@amd.com>
+M: Joerg Roedel <joro@8bytes.org>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-S: Supported
+S: Maintained
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT
-M: Andreas Herrmann <andreas.herrmann3@amd.com>
+M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: amd64-microcode@amd64.org
-S: Supported
+S: Maintained
F: arch/x86/kernel/microcode_amd.c
AMS (Apple Motion Sensor) DRIVER
S: Maintained
F: arch/arm/
+ARM SUB-ARCHITECTURES
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: MAINTAINED
+F: arch/arm/mach-*/
+F: arch/arm/plat-*/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
+
ARM PRIMECELL AACI PL041 DRIVER
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: arch/arm/mach-sa1100/jornada720.c
F: arch/arm/mach-sa1100/include/mach/jornada720.h
+ARM/IGEP MACHINE SUPPORT
+M: Enric Balletbo i Serra <eballetbo@gmail.com>
+M: Javier Martinez Canillas <javier@dowhile0.org>
+L: linux-omap@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-omap2/board-igep0020.c
+
ARM/INCOME PXA270 SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
M: Seung-Woo Kim <sw0312.kim@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
L: dri-devel@lists.freedesktop.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
S: Supported
F: drivers/gpu/drm/exynos
F: include/drm/exynos*
EDAC-AMD64
M: Doug Thompson <dougthompson@xmission.com>
-M: Borislav Petkov <borislav.petkov@amd.com>
+M: Borislav Petkov <bp@alien8.de>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
-S: Supported
+S: Maintained
F: drivers/edac/amd64_edac*
EDAC-E752X
EXTENSIBLE FIRMWARE INTERFACE (EFI)
M: Matt Fleming <matt.fleming@intel.com>
L: linux-efi@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
S: Maintained
F: Documentation/x86/efi-stub.txt
F: arch/ia64/kernel/efi.c
F: drivers/net/hyperv/
F: drivers/staging/hv/
+I2C OVER PARALLEL PORT
+M: Jean Delvare <khali@linux-fr.org>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-parport
+F: Documentation/i2c/busses/i2c-parport-light
+F: drivers/i2c/busses/i2c-parport.c
+F: drivers/i2c/busses/i2c-parport-light.c
+
+I2C/SMBUS CONTROLLER DRIVERS FOR PC
+M: Jean Delvare <khali@linux-fr.org>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-ali1535
+F: Documentation/i2c/busses/i2c-ali1563
+F: Documentation/i2c/busses/i2c-ali15x3
+F: Documentation/i2c/busses/i2c-amd756
+F: Documentation/i2c/busses/i2c-amd8111
+F: Documentation/i2c/busses/i2c-i801
+F: Documentation/i2c/busses/i2c-nforce2
+F: Documentation/i2c/busses/i2c-piix4
+F: Documentation/i2c/busses/i2c-sis5595
+F: Documentation/i2c/busses/i2c-sis630
+F: Documentation/i2c/busses/i2c-sis96x
+F: Documentation/i2c/busses/i2c-via
+F: Documentation/i2c/busses/i2c-viapro
+F: drivers/i2c/busses/i2c-ali1535.c
+F: drivers/i2c/busses/i2c-ali1563.c
+F: drivers/i2c/busses/i2c-ali15x3.c
+F: drivers/i2c/busses/i2c-amd756.c
+F: drivers/i2c/busses/i2c-amd756-s4882.c
+F: drivers/i2c/busses/i2c-amd8111.c
+F: drivers/i2c/busses/i2c-i801.c
+F: drivers/i2c/busses/i2c-isch.c
+F: drivers/i2c/busses/i2c-nforce2.c
+F: drivers/i2c/busses/i2c-nforce2-s4985.c
+F: drivers/i2c/busses/i2c-piix4.c
+F: drivers/i2c/busses/i2c-sis5595.c
+F: drivers/i2c/busses/i2c-sis630.c
+F: drivers/i2c/busses/i2c-sis96x.c
+F: drivers/i2c/busses/i2c-via.c
+F: drivers/i2c/busses/i2c-viapro.c
+
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
F: drivers/i2c/busses/i2c-stub.c
I2C SUBSYSTEM
-M: "Jean Delvare (PC drivers, core)" <khali@linux-fr.org>
+M: Wolfram Sang <w.sang@pengutronix.de>
M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
-M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
L: linux-i2c@vger.kernel.org
W: http://i2c.wiki.kernel.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
F: include/linux/i2c.h
F: include/linux/i2c-*.h
+I2C-TAOS-EVM DRIVER
+M: Jean Delvare <khali@linux-fr.org>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-taos-evm
+F: drivers/i2c/busses/i2c-taos-evm.c
+
I2C-TINY-USB DRIVER
M: Till Harbaum <till@harbaum.org>
L: linux-i2c@vger.kernel.org
F: drivers/platform/x86/ideapad-laptop.c
IDE/ATAPI DRIVERS
-M: Borislav Petkov <petkovbb@gmail.com>
+M: Borislav Petkov <bp@alien8.de>
L: linux-ide@vger.kernel.org
S: Maintained
F: Documentation/cdrom/ide-cd
F: include/linux/sunrpc/
KERNEL VIRTUAL MACHINE (KVM)
-M: Avi Kivity <avi@redhat.com>
M: Marcelo Tosatti <mtosatti@redhat.com>
+M: Gleb Natapov <gleb@redhat.com>
L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
S: Odd Fixes
F: drivers/net/
F: include/linux/if_*
-F: include/linux/*device.h
+F: include/linux/netdevice.h
+F: include/linux/arcdevice.h
+F: include/linux/etherdevice.h
+F: include/linux/fcdevice.h
+F: include/linux/fddidevice.h
+F: include/linux/hippidevice.h
+F: include/linux/inetdevice.h
NETXEN (1/10) GbE SUPPORT
M: Sony Chacko <sony.chacko@qlogic.com>
F: sound/drivers/opl4/
OPROFILE
-M: Robert Richter <robert.richter@amd.com>
+M: Robert Richter <rric@kernel.org>
L: oprofile-list@lists.sf.net
S: Maintained
F: arch/*/include/asm/oprofile*.h
F: drivers/pinctrl/spear/
PKTCDVD DRIVER
-M: Peter Osterlund <petero2@telia.com>
+M: Jiri Kosina <jkosina@suse.cz>
S: Maintained
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
S: Maintained
F: arch/xtensa/
+THERMAL
+M: Zhang Rui <rui.zhang@intel.com>
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+S: Supported
+F: drivers/thermal/
+F: include/linux/thermal.h
+
THINKPAD ACPI EXTRAS DRIVER
M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
L: ibm-acpi-devel@lists.sourceforge.net
S: Maintained
F: drivers/net/ethernet/via/via-rhine.c
-VIAPRO SMBUS DRIVER
-M: Jean Delvare <khali@linux-fr.org>
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: Documentation/i2c/busses/i2c-viapro
-F: drivers/i2c/busses/i2c-viapro.c
-
VIA SD/MMC CARD CONTROLLER DRIVER
M: Bruce Chang <brucechang@via.com.tw>
M: Harald Welte <HaraldWelte@viatech.com>
X86 MCE INFRASTRUCTURE
M: Tony Luck <tony.luck@intel.com>
-M: Borislav Petkov <bp@amd64.org>
+M: Borislav Petkov <bp@alien8.de>
L: linux-edac@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/mcheck/*
VERSION = 3
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc7
NAME = Terrified Chipmunk
# *DOCUMENTATION*
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/hwrpb.h>
+#include <asm/sysinfo.h>
#endif
#ifndef __ASSEMBLY__
mm_segment_t addr_limit; /* thread address space */
unsigned cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
+ unsigned int status; /* thread-synchronous flags */
int bpt_nsaved;
unsigned long bpt_addr[2]; /* breakpoint handling */
* - these are process state flags and used from assembly
* - pending work-to-be-done flags come first and must be assigned to be
* within bits 0 to 7 to fit in and immediate operand.
- * - ALPHA_UAC_SHIFT below must be kept consistent with the unaligned
- * control flags.
*
* TIF_SYSCALL_TRACE is known to be 0 via blbs.
*/
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */
#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
-#define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */
-#define TIF_UAC_NOFIX 11 /* ! flags as they match */
-#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
/* Work to do on interrupt/exception return. */
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
| _TIF_SYSCALL_TRACE)
-#define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT
-#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \
- 1 << TIF_UAC_SIGBUS)
+#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */
+#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */
+#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */
+#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
+#define TS_POLLING 0x0010 /* idle task polling need_resched,
+ skip sending interrupt */
-#define SET_UNALIGN_CTL(task,value) ({ \
- task_thread_info(task)->flags = ((task_thread_info(task)->flags & \
- ~ALPHA_UAC_MASK) \
- | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\
- | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \
- | (((value) << (ALPHA_UAC_SHIFT - 1)) & (1<<TIF_UAC_NOFIX)));\
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
+
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK 1
+static inline void set_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->status |= TS_RESTORE_SIGMASK;
+ WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ if (!(ti->status & TS_RESTORE_SIGMASK))
+ return false;
+ ti->status &= ~TS_RESTORE_SIGMASK;
+ return true;
+}
+#endif
+
+#define SET_UNALIGN_CTL(task,value) ({ \
+ __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
+ if (value & PR_UNALIGN_NOPRINT) \
+ status |= TS_UAC_NOPRINT; \
+ if (value & PR_UNALIGN_SIGBUS) \
+ status |= TS_UAC_SIGBUS; \
+ if (value & 4) /* alpha-specific */ \
+ status |= TS_UAC_NOFIX; \
+ task_thread_info(task)->status = status; \
0; })
#define GET_UNALIGN_CTL(task,value) ({ \
- put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\
- >> ALPHA_UAC_SHIFT \
- | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\
- >> (ALPHA_UAC_SHIFT + 1) \
- | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\
- >> (ALPHA_UAC_SHIFT - 1), \
- (int __user *)(value)); \
+ __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
+ __u32 res = 0; \
+ if (status & TS_UAC_NOPRINT) \
+ res |= PR_UNALIGN_NOPRINT; \
+ if (status & TS_UAC_SIGBUS) \
+ res |= PR_UNALIGN_SIGBUS; \
+ if (status & TS_UAC_NOFIX) \
+ res |= 4; \
+ put_user(res, (int __user *)(value)); \
})
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* __KERNEL__ */
#endif /* _ALPHA_THREAD_INFO_H */
* unhappy with OSF UFS. [CHECKME]
*/
static int
-osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
}
static int
-osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
}
static int
-osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags)
{
struct procfs_args tmp;
case GSI_UACPROC:
if (nbytes < sizeof(unsigned int))
return -EINVAL;
- w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) &
- UAC_BITMASK;
+ w = current_thread_info()->status & UAC_BITMASK;
if (put_user(w, (unsigned int __user *)buffer))
return -EFAULT;
return 1;
break;
case SSI_NVPAIRS: {
- unsigned long v, w, i;
- unsigned int old, new;
+ unsigned __user *p = buffer;
+ unsigned i;
- for (i = 0; i < nbytes; ++i) {
+ for (i = 0, p = buffer; i < nbytes; ++i, p += 2) {
+ unsigned v, w, status;
- if (get_user(v, 2*i + (unsigned int __user *)buffer))
- return -EFAULT;
- if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer))
+ if (get_user(v, p) || get_user(w, p + 1))
return -EFAULT;
switch (v) {
case SSIN_UACPROC:
- again:
- old = current_thread_info()->flags;
- new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT);
- new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT;
- if (cmpxchg(¤t_thread_info()->flags,
- old, new) != old)
- goto again;
+ w &= UAC_BITMASK;
+ status = current_thread_info()->status;
+ status = (status & ~UAC_BITMASK) | w;
+ current_thread_info()->status = status;
break;
default:
void
cpu_idle(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
while (1) {
/* FIXME -- EV6 and LCA45 know how to power down
/* Check the UAC bits to decide what the user wants us to do
with the unaliged access. */
- if (!test_thread_flag (TIF_UAC_NOPRINT)) {
+ if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
if (__ratelimit(&ratelimit)) {
printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
current->comm, task_pid_nr(current),
regs->pc - 4, va, opcode, reg);
}
}
- if (test_thread_flag (TIF_UAC_SIGBUS))
+ if ((current_thread_info()->status & TS_UAC_SIGBUS))
goto give_sigbus;
/* Not sure why you'd want to use this, but... */
- if (test_thread_flag (TIF_UAC_NOFIX))
+ if ((current_thread_info()->status & TS_UAC_NOFIX))
return;
/* Don't bother reading ds in the access check since we already
select CPU_FEROCEON
select GENERIC_CLOCKEVENTS
select PCI
+ select PCI_QUIRKS
select PLAT_ORION_LEGACY
help
Support for the following Marvell Kirkwood series SoCs:
default "4"
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
- depends on SMP && HOTPLUG && EXPERIMENTAL
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP && HOTPLUG
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
default 100
config THUMB2_KERNEL
- bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
- depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
+ bool "Compile the kernel in Thumb-2 mode"
+ depends on CPU_V7 && !CPU_V6 && !CPU_V6K
select AEABI
select ARM_ASM_UNIFIED
select ARM_UNWIND
config XEN
bool "Xen guest support on ARM (EXPERIMENTAL)"
depends on EXPERIMENTAL && ARM && OF
+ depends on CPU_V7 && !CPU_V6
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
-# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
-KBUILD_CFLAGS +=$(call cc-option,-marm,)
# Never generate .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
-CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
-AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
+CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
+else
+CFLAGS_ISA :=$(call cc-option,-marm,)
+AFLAGS_ISA :=$(CFLAGS_ISA)
endif
# Need -Uarm for gcc < 3.x
-KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_THUMB2) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
-KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
+KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
- @echo ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+ @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
$(obj)/Image $(obj)/zImage: FORCE
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
- @echo ' Kernel: $@ is ready'
+ @$(kecho) ' Kernel: $@ is ready'
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
- @echo ' Kernel: $@ is ready'
+ @$(kecho) ' Kernel: $@ is ready'
endif
$(obj)/uImage: $(obj)/zImage FORCE
@$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
- @echo ' Image $@ is ready'
+ @$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
- @echo ' Kernel: $@ is ready'
+ @$(kecho) ' Kernel: $@ is ready'
PHONY += initrd FORCE
initrd:
mov pc, lr
ENDPROC(__setup_mmu)
+@ Enable unaligned access on v6, to allow better code generation
+@ for the decompressor C code:
+__armv6_mmu_cache_on:
+ mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
+ b __armv4_mmu_cache_on
+
__arm926ejs_mmu_cache_on:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #4 @ put dcache in WT mode
bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ @ (needed for ARM1176)
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
.word 0x0007b000 @ ARMv6
.word 0x000ff000
- W(b) __armv4_mmu_cache_on
+ W(b) __armv6_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv6_mmu_cache_flush
#size-cells = <0>;
btn3 {
- label = "Buttin 3";
+ label = "Button 3";
gpios = <&pioA 30 1>;
linux,code = <0x103>;
gpio-key,wakeup;
};
btn4 {
- label = "Buttin 4";
+ label = "Button 4";
gpios = <&pioA 31 1>;
linux,code = <0x104>;
gpio-key,wakeup;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80004000 0x1000>;
interrupts = <0 21 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80122000 0x1000>;
interrupts = <0 22 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80128000 0x1000>;
interrupts = <0 55 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80110000 0x1000>;
interrupts = <0 12 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x8012a000 0x1000>;
interrupts = <0 51 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
interrupts = <0 60 0x4>;
status = "disabled";
};
+
sdi@80118000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80118000 0x1000>;
interrupts = <0 50 0x4>;
status = "disabled";
};
+
sdi@80005000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80005000 0x1000>;
interrupts = <0 41 0x4>;
status = "disabled";
};
+
sdi@80119000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80119000 0x1000>;
interrupts = <0 59 0x4>;
status = "disabled";
};
+
sdi@80114000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80114000 0x1000>;
interrupts = <0 99 0x4>;
status = "disabled";
};
+
sdi@80008000 {
compatible = "arm,pl18x", "arm,primecell";
- reg = <0x80114000 0x1000>;
+ reg = <0x80008000 0x1000>;
interrupts = <0 100 0x4>;
status = "disabled";
};
compatible = "samsung,trats", "samsung,exynos4210";
memory {
- reg = <0x40000000 0x20000000
- 0x60000000 0x20000000>;
+ reg = <0x40000000 0x10000000
+ 0x50000000 0x10000000
+ 0x60000000 0x10000000
+ 0x70000000 0x10000000>;
};
chosen {
interrupts = <13>, <56>;
interrupt-names = "gpmi-dma", "bch";
clocks = <&clks 34>;
+ clock-names = "gpmi_io";
fsl,gpmi-dma-channel = <4>;
status = "disabled";
};
interrupts = <88>, <41>;
interrupt-names = "gpmi-dma", "bch";
clocks = <&clks 50>;
+ clock-names = "gpmi_io";
fsl,gpmi-dma-channel = <4>;
status = "disabled";
};
interrupt-names = "common", "tx", "rx", "sidetone";
interrupt-parent = <&intc>;
ti,buffer-size = <1280>;
- ti,hwmods = "mcbsp2";
+ ti,hwmods = "mcbsp2", "mcbsp2_sidetone";
};
mcbsp3: mcbsp@49024000 {
interrupt-names = "common", "tx", "rx", "sidetone";
interrupt-parent = <&intc>;
ti,buffer-size = <128>;
- ti,hwmods = "mcbsp3";
+ ti,hwmods = "mcbsp3", "mcbsp3_sidetone";
};
mcbsp4: mcbsp@49026000 {
pinmux: pinmux {
compatible = "nvidia,tegra30-pinmux";
- reg = <0x70000868 0xd0 /* Pad control registers */
- 0x70003000 0x3e0>; /* Mux registers */
+ reg = <0x70000868 0xd4 /* Pad control registers */
+ 0x70003000 0x3e4>; /* Mux registers */
};
serial@70006000 {
.set_mode = sp804_set_mode,
.set_next_event = sp804_set_next_event,
.rating = 300,
- .cpumask = cpu_all_mask,
};
static struct irqaction sp804_timer_irq = {
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
evt->name = name;
evt->irq = irq;
+ evt->cpumask = cpu_possible_mask;
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
CONFIG_SPI=y
CONFIG_SPI_IMX=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_MC9S08DZ60=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_IMX2_WDT=y
CONFIG_SOC_CAMERA_OV2640=y
CONFIG_VIDEO_MX3=y
CONFIG_FB=y
+CONFIG_LCD_PLATFORM=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370_XP=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_XP=y
+# CONFIG_CACHE_L2X0 is not set
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
-CONFIG_USE_OF=y
+# CONFIG_COMPACTION is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_ARCH_VERSATILE=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) get_unaligned(rp)
+#define flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))
#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
#define flat_get_relocate_addr(rel) (rel)
#define flat_set_persistent(relval, p) 0
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
- : "+Qo" (*(volatile u16 __force *)addr)
+ : "+Q" (*(volatile u16 __force *)addr)
: "r" (val));
}
{
u16 val;
asm volatile("ldrh %1, %0"
- : "+Qo" (*(volatile u16 __force *)addr),
+ : "+Q" (*(volatile u16 __force *)addr),
"=r" (val));
return val;
}
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
-extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
- unsigned long rate);
#endif
#define USER_DS KERNEL_DS
#define segment_eq(a,b) (1)
-#define __addr_ok(addr) (1)
-#define __range_ok(addr,size) (0)
+#define __addr_ok(addr) ((void)(addr),1)
+#define __range_ok(addr,size) ((void)(addr),0)
#define get_fs() (KERNEL_DS)
static inline void set_fs(mm_segment_t fs)
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
- tst \tmp, #HWCAP_VFPv3D16
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
- addne \base, \base, #32*4 @ step over unused register space
+ tst \tmp, #HWCAP_VFPD32
+ ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
- tst \tmp, #HWCAP_VFPv3D16
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
- addne \base, \base, #32*4 @ step over unused register space
+ tst \tmp, #HWCAP_VFPD32
+ stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
#ifndef __ASSEMBLY__
/* Explicitly size integers that represent pfns in the interface with
- * Xen so that we can have one ABI that works for 32 and 64 bit guests. */
+ * Xen so that we can have one ABI that works for 32 and 64 bit guests.
+ * Note that this means that the xen_pfn_t type may be capable of
+ * representing pfn's which the guest cannot represent in its own pfn
+ * type. However since pfn space is controlled by the guest this is
+ * fine since it simply wouldn't be able to create any sure pfns in
+ * the first place.
+ */
typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn "llx"
typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong "llx"
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
-__DEFINE_GUEST_HANDLE(ulong, unsigned long);
DEFINE_GUEST_HANDLE(char);
DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(long);
DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t);
#include <xen/interface/grant_table.h>
#define pfn_to_mfn(pfn) (pfn)
-#define phys_to_machine_mapping_valid (1)
+#define phys_to_machine_mapping_valid(pfn) (1)
#define mfn_to_pfn(mfn) (mfn)
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
+#define INVALID_P2M_ENTRY (~0UL)
+
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
return 0;
}
+static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ return true;
+}
+
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- BUG();
- return false;
+ return __set_phys_to_machine(pfn, mfn);
}
#endif /* _ASM_ARM_XEN_PAGE_H */
--- /dev/null
+/*
+ * Copyright (c) 2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
+ * accesses to the 8250.
+ */
+
+#include <linux/serial_reg.h>
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #UART_TX << UART_SHIFT]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
+ and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ bne 1002b
+ .endm
+
+ /* The UART's don't have any flow control IO's wired up. */
+ .macro waituart,rd,rx
+ .endm
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
- * accesses to the 8250.
*/
-#include <linux/serial_reg.h>
#define UART_SHIFT 2
#define PICOXCELL_UART1_BASE 0x80230000
ldr \rp, =PICOXCELL_UART1_BASE
.endm
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- /* The UART's don't have any flow control IO's wired up. */
- .macro waituart,rd,rx
- .endm
+#include "8250_32.S"
* published by the Free Software Foundation.
*/
+#define UART_SHIFT 2
+#define DEBUG_LL_UART_OFFSET 0x00002000
+
.macro addruart, rp, rv, tmp
mov \rp, #DEBUG_LL_UART_OFFSET
orr \rp, \rp, #0x00c00000
orr \rp, \rp, #0xff000000 @ physical base
.endm
+#include "8250_32.S"
+
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
-#define HWCAP_VFPv3D16 (1 << 14)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS (1 << 15)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
/* Order is clear bits in "clr" then set bits in "set" */
irq_modify_status(irq, clr, set & ~clr);
}
+EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void)
{
TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+#endif
+#if __LINUX_ARM_ARCH__ >= 7
TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13")
TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
#if __LINUX_ARM_ARCH__ >= 6
TEST_UNSUPPORTED("ldrex r2, [sp]")
+#endif
+#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)
TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]")
TEST_UNSUPPORTED("ldrexd r2, r3, [sp]")
TEST_UNSUPPORTED("strexb r0, r2, [sp]")
for (i = 0; i < image->nr_segments; i++) {
current_segment = &image->segment[i];
- err = memblock_is_region_memory(current_segment->mem,
- current_segment->memsz);
- if (err)
- return - EINVAL;
+ if (!memblock_is_region_memory(current_segment->mem,
+ current_segment->memsz))
+ return -EINVAL;
err = get_user(header, (__be32*)current_segment->buf);
if (err)
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+ if (unlikely(period != hwc->last_period))
+ left = period - (hwc->last_period - left);
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
update_sched_clock();
}
-void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
- unsigned long rate)
-{
- setup_sched_clock(read, bits, rate);
- cd.needs_suspend = true;
-}
-
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
- if (cd.needs_suspend)
- cd.suspended = true;
+ cd.suspended = true;
return 0;
}
static void sched_clock_resume(void)
{
- if (cd.needs_suspend) {
- cd.epoch_cyc = read_sched_clock();
- cd.epoch_cyc_copy = cd.epoch_cyc;
- cd.suspended = false;
- }
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
}
static struct syscore_ops sched_clock_ops = {
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ /*
+ * The identity mapping is uncached (strongly ordered), so
+ * switch away from it before attempting any exclusive accesses.
+ */
+ cpu_switch_mm(mm->pgd, mm);
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
+ cpu = smp_processor_id();
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
- cpu_switch_mm(mm->pgd, mm);
- enter_lazy_tlb(mm, current);
- local_flush_tlb_all();
printk("CPU%u: Booted secondary processor\n", cpu);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- /* timer load already set up */
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
+ __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
*timer_val = delay_timer->read_current_timer();
return 0;
}
+EXPORT_SYMBOL_GPL(read_current_timer);
static void __timer_delay(unsigned long cycles)
{
bool
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
menu "Atmel AT91 System-on-Chip"
comment "Atmel AT91 Processor"
-config SOC_AT91SAM9
- bool
- select AT91_SAM9_SMC
- select AT91_SAM9_TIME
- select CPU_ARM926T
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
-
config SOC_AT91RM9200
bool "AT91RM9200"
select CPU_ARM920T
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91rm9200", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91rm9200.0", &twi_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
static struct platform_device at91rm9200_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
static struct platform_device at91rm9200_twi_device = {
.name = "i2c-at91rm9200",
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260", &twi_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260.0", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20.0", &twi_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
CLKDEV_CON_DEV_ID("usart", "fffb0000.serial", &usart0_clk),
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
static struct platform_device at91sam9260_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
};
static struct platform_device at91sam9260_twi_device = {
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &hck0),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9261", &twi_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9261.0", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10.0", &twi_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
CLKDEV_CON_ID("pioB", &pioB_clk),
CLKDEV_CON_ID("pioC", &pioC_clk),
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
static struct platform_device at91sam9261_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
};
static struct platform_device at91sam9261_twi_device = {
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260.0", &twi_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
static struct platform_device at91sam9263_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
static struct platform_device at91sam9263_twi_device = {
.name = "i2c-at91sam9260",
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AESTDESSHA,
- .end = AT91SAM9G45_ID_AESTDESSHA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AESTDESSHA,
- .end = AT91SAM9G45_ID_AESTDESSHA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AESTDESSHA,
- .end = AT91SAM9G45_ID_AESTDESSHA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9rl_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
static struct platform_device at91sam9rl_twi_device = {
.name = "i2c-at91sam9g20",
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
if (!priority)
priority = at91x40_default_irq_priority;
- at91_aic_init(priority);
+ at91_aic_init(priority, at91_extern_irq);
}
.max_speed_hz = 125000 * 16,
.bus_num = 0,
.platform_data = &ads_info,
- .irq = AT91SAM9263_ID_IRQ1,
+ .irq = NR_IRQS_LEGACY + AT91SAM9263_ID_IRQ1,
},
#endif
};
.max_speed_hz = 125000 * 26, /* (max sample rate @ 3V) * (cmd + data + overhead) */
.bus_num = 0,
.platform_data = &ads_info,
- .irq = AT91SAM9261_ID_IRQ0,
+ .irq = NR_IRQS_LEGACY + AT91SAM9261_ID_IRQ0,
.controller_data = (void *) AT91_PIN_PA28, /* CS pin */
},
#endif
.max_speed_hz = 125000 * 26, /* (max sample rate @ 3V) * (cmd + data + overhead) */
.bus_num = 0,
.platform_data = &ads_info,
- .irq = AT91SAM9263_ID_IRQ1,
+ .irq = NR_IRQS_LEGACY + AT91SAM9263_ID_IRQ1,
},
#endif
};
extern void __init at91_init_irq_default(void);
extern void __init at91_init_interrupts(unsigned int priority[]);
extern void __init at91x40_init_interrupts(unsigned int priority[]);
-extern void __init at91_aic_init(unsigned int priority[]);
+extern void __init at91_aic_init(unsigned int priority[],
+ unsigned int ext_irq_mask);
extern int __init at91_aic_of_init(struct device_node *node,
struct device_node *parent);
extern int __init at91_aic5_of_init(struct device_node *node,
/*
* Initialize the AIC interrupt controller.
*/
-void __init at91_aic_init(unsigned int *priority)
+void __init at91_aic_init(unsigned int *priority, unsigned int ext_irq_mask)
{
unsigned int i;
int irq_base;
- if (at91_aic_pm_init())
+ at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
+ * sizeof(*at91_extern_irq), GFP_KERNEL);
+
+ if (at91_aic_pm_init() || at91_extern_irq == NULL)
panic("Unable to allocate bit maps\n");
+ *at91_extern_irq = ext_irq_mask;
+
at91_aic_base = ioremap(AT91_AIC, 512);
if (!at91_aic_base)
panic("Unable to ioremap AIC registers\n");
void __init at91_init_interrupts(unsigned int *priority)
{
/* Initialize the AIC interrupt controller */
- at91_aic_init(priority);
+ at91_aic_init(priority, at91_extern_irq);
/* Enable GPIO interrupts */
at91_gpio_irq_setup();
}
/* at91sam9g10 */
- if ((cidr & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
+ if ((socid & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
at91_soc_initdata.type = AT91_SOC_SAM9G10;
at91_boot_soc = at91sam9261_soc;
}
break;
case VPBE_ENC_CUSTOM_TIMINGS:
if (pclock <= 27000000) {
- v |= DM644X_VPSS_MUXSEL_PLL2_MODE |
- DM644X_VPSS_DACCLKEN;
+ v |= DM644X_VPSS_DACCLKEN;
writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
} else {
/*
static inline int irq_to_pmu(int irq)
{
- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
+ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
return irq - IRQ_DOVE_PMU_START;
return -EINVAL;
int pin = irq_to_pmu(d->irq);
u32 u;
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
u = ~(1 << (pin & 31));
- writel(u, PMU_INTERRUPT_CAUSE);
+ u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
+ writel_relaxed(u, PMU_INTERRUPT_CAUSE);
}
static struct irq_chip pmu_irq_chip = {
#include <plat/fimc-core.h>
#include <plat/iic-core.h>
#include <plat/tv-core.h>
+#include <plat/spi-core.h>
#include <plat/regs-serial.h>
#include "common.h"
s5p_fb_setname(0, "exynos4-fb");
s5p_hdmi_setname("exynos4-hdmi");
+
+ s3c64xx_spi_setname("exynos4210-spi");
}
static void __init exynos5_map_io(void)
s3c_i2c0_setname("s3c2440-i2c");
s3c_i2c1_setname("s3c2440-i2c");
s3c_i2c2_setname("s3c2440-i2c");
+
+ s3c64xx_spi_setname("exynos4210-spi");
}
static void __init exynos4_init_clocks(int xtal)
exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma1_peri);
exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+
+ if (samsung_rev() == EXYNOS4210_REV_0)
+ exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1;
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma0_peri);
#define EXYNOS4_PA_MDMA0 0x10810000
#define EXYNOS4_PA_MDMA1 0x12850000
+#define EXYNOS4_PA_S_MDMA1 0x12840000
#define EXYNOS4_PA_PDMA0 0x12680000
#define EXYNOS4_PA_PDMA1 0x12690000
#define EXYNOS5_PA_MDMA0 0x10800000
DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
/* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
+ .smp = smp_ops(exynos_smp_ops),
.init_irq = exynos4_init_irq,
.map_io = exynos4_dt_map_io,
.handle_irq = gic_handle_irq,
hignbank_set_pwr_soft_reset();
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
- cpu_do_idle();
+ while (1)
+ cpu_do_idle();
}
busy->div.hw.init = &init;
clk = clk_register(NULL, &busy->div.hw);
- if (!clk)
+ if (IS_ERR(clk))
kfree(busy);
return clk;
clk = clk_register(dev, &gate->hw);
if (IS_ERR(clk))
- kfree(clk);
+ kfree(gate);
return clk;
}
clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0), 4);
clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0), 5);
clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0), 6);
- clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0), 7);
- clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0), 8);
+ clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per7", ccm(CCM_CGCR0), 7);
+ clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "per8", ccm(CCM_CGCR0), 8);
clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
- clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
+ clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 6);
clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
- clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
+ clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 6);
clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
#define MX25_H1_SIC_SHIFT 21
#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
#define MX25_H1_PP_BIT (1 << 18)
-#define MX25_H1_PM_BIT (1 << 8)
+#define MX25_H1_PM_BIT (1 << 16)
#define MX25_H1_IPPUE_UP_BIT (1 << 7)
#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX25_H1_TLL_BIT (1 << 5)
#define MX35_H1_SIC_SHIFT 21
#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
#define MX35_H1_PP_BIT (1 << 18)
-#define MX35_H1_PM_BIT (1 << 8)
+#define MX35_H1_PM_BIT (1 << 16)
#define MX35_H1_IPPUE_UP_BIT (1 << 7)
#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX35_H1_TLL_BIT (1 << 5)
}
l2x0_base = ioremap(MX3x_L2CC_BASE_ADDR, 4096);
- if (IS_ERR(l2x0_base)) {
- printk(KERN_ERR "remapping L2 cache area failed with %ld\n",
- PTR_ERR(l2x0_base));
+ if (!l2x0_base) {
+ printk(KERN_ERR "remapping L2 cache area failed\n");
return;
}
* Enable the IO window to be way up high, at 0xfffffc00
*/
local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
+ local_write_config(0x40, 4, 0x000080FF); /* No TRDY time limit */
} else {
printk("PCI: IXP4xx is target - No bus scan performed\n");
}
.pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
.length = IXP4XX_PCI_CFG_REGION_SIZE,
.type = MT_DEVICE
- },
-#ifdef CONFIG_DEBUG_LL
- { /* Debug UART mapping */
- .virtual = (unsigned long)IXP4XX_DEBUG_UART_BASE_VIRT,
- .pfn = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS),
- .length = IXP4XX_DEBUG_UART_REGION_SIZE,
+ }, { /* Queue Manager */
+ .virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT,
+ .pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
+ .length = IXP4XX_QMGR_REGION_SIZE,
.type = MT_DEVICE
- }
-#endif
+ },
};
void __init ixp4xx_map_io(void)
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/pci.h>
+#include <asm/system_info.h>
#define SLOT_ETHA 0x0B /* IDSEL = AD21 */
#define SLOT_ETHB 0x0C /* IDSEL = AD20 */
};
-static struct platform_device *device_tab[6] __initdata = {
+static struct platform_device *device_tab[7] __initdata = {
&device_flash, /* index 0 */
};
#else
mov \rp, #0
#endif
- orr \rv, \rp, #0xff000000 @ virtual
- orr \rv, \rv, #0x00b00000
+ orr \rv, \rp, #0xfe000000 @ virtual
+ orr \rv, \rv, #0x00f00000
orr \rp, \rp, #0xc8000000 @ physical
.endm
*
* 0x50000000 0x10000000 ioremap'd EXP BUS
*
- * 0x6000000 0x00004000 ioremap'd QMgr
+ * 0xC8000000 0x00013000 0xFEF00000 On-Chip Peripherals
*
- * 0xC0000000 0x00001000 0xffbff000 PCI CFG
+ * 0xC0000000 0x00001000 0xFEF13000 PCI CFG
*
- * 0xC4000000 0x00001000 0xffbfe000 EXP CFG
+ * 0xC4000000 0x00001000 0xFEF14000 EXP CFG
*
- * 0xC8000000 0x00013000 0xffbeb000 On-Chip Peripherals
+ * 0x60000000 0x00004000 0xFEF15000 QMgr
*/
/*
* Queue Manager
*/
-#define IXP4XX_QMGR_BASE_PHYS (0x60000000)
-#define IXP4XX_QMGR_REGION_SIZE (0x00004000)
+#define IXP4XX_QMGR_BASE_PHYS 0x60000000
+#define IXP4XX_QMGR_BASE_VIRT IOMEM(0xFEF15000)
+#define IXP4XX_QMGR_REGION_SIZE 0x00004000
/*
- * Expansion BUS Configuration registers
+ * Peripheral space, including debug UART. Must be section-aligned so that
+ * it can be used with the low-level debug code.
*/
-#define IXP4XX_EXP_CFG_BASE_PHYS (0xC4000000)
-#define IXP4XX_EXP_CFG_BASE_VIRT IOMEM(0xFFBFE000)
-#define IXP4XX_EXP_CFG_REGION_SIZE (0x00001000)
+#define IXP4XX_PERIPHERAL_BASE_PHYS 0xC8000000
+#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFEF00000)
+#define IXP4XX_PERIPHERAL_REGION_SIZE 0x00013000
/*
* PCI Config registers
*/
-#define IXP4XX_PCI_CFG_BASE_PHYS (0xC0000000)
-#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFFBFF000)
-#define IXP4XX_PCI_CFG_REGION_SIZE (0x00001000)
-
-/*
- * Peripheral space
- */
-#define IXP4XX_PERIPHERAL_BASE_PHYS (0xC8000000)
-#define IXP4XX_PERIPHERAL_BASE_VIRT IOMEM(0xFFBEB000)
-#define IXP4XX_PERIPHERAL_REGION_SIZE (0x00013000)
+#define IXP4XX_PCI_CFG_BASE_PHYS 0xC0000000
+#define IXP4XX_PCI_CFG_BASE_VIRT IOMEM(0xFEF13000)
+#define IXP4XX_PCI_CFG_REGION_SIZE 0x00001000
/*
- * Debug UART
- *
- * This is basically a remap of UART1 into a region that is section
- * aligned so that it * can be used with the low-level debug code.
+ * Expansion BUS Configuration registers
*/
-#define IXP4XX_DEBUG_UART_BASE_PHYS (0xC8000000)
-#define IXP4XX_DEBUG_UART_BASE_VIRT IOMEM(0xffb00000)
-#define IXP4XX_DEBUG_UART_REGION_SIZE (0x00001000)
+#define IXP4XX_EXP_CFG_BASE_PHYS 0xC4000000
+#define IXP4XX_EXP_CFG_BASE_VIRT 0xFEF14000
+#define IXP4XX_EXP_CFG_REGION_SIZE 0x00001000
#define IXP4XX_EXP_CS0_OFFSET 0x00
#define IXP4XX_EXP_CS1_OFFSET 0x04
static inline void qmgr_put_entry(unsigned int queue, u32 val)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
#if DEBUG_QMGR
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
static inline u32 qmgr_get_entry(unsigned int queue)
{
u32 val;
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
val = __raw_readl(&qmgr_regs->acc[queue][0]);
#if DEBUG_QMGR
BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
static inline int __qmgr_get_stat1(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
>> ((queue & 7) << 2)) & 0xF;
}
static inline int __qmgr_get_stat2(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
BUG_ON(queue >= HALF_QUEUES);
return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
>> ((queue & 0xF) << 1)) & 0x3;
*/
static inline int qmgr_stat_below_low_watermark(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
if (queue >= HALF_QUEUES)
return (__raw_readl(&qmgr_regs->statne_h) >>
(queue - HALF_QUEUES)) & 0x01;
*/
static inline int qmgr_stat_full(unsigned int queue)
{
- extern struct qmgr_regs __iomem *qmgr_regs;
+ const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
if (queue >= HALF_QUEUES)
return (__raw_readl(&qmgr_regs->statf_h) >>
(queue - HALF_QUEUES)) & 0x01;
/* NPE mailbox_status value for reset */
#define RESET_MBOX_STAT 0x0000F0F0
-const char *npe_names[] = { "NPE-A", "NPE-B", "NPE-C" };
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
#define print_npe(pri, npe, fmt, ...) \
printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
EXPORT_SYMBOL(npe_names);
EXPORT_SYMBOL(npe_running);
#include <linux/module.h>
#include <mach/qmgr.h>
-struct qmgr_regs __iomem *qmgr_regs;
+static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
static struct resource *mem_res;
static spinlock_t qmgr_lock;
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
if (mem_res == NULL)
return -EBUSY;
- qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
- if (qmgr_regs == NULL) {
- err = -ENOMEM;
- goto error_map;
- }
-
/* reset qmgr registers */
for (i = 0; i < 4; i++) {
__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
error_irq2:
free_irq(IRQ_IXP4XX_QM1, NULL);
error_irq:
- iounmap(qmgr_regs);
-error_map:
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
return err;
}
free_irq(IRQ_IXP4XX_QM2, NULL);
synchronize_irq(IRQ_IXP4XX_QM1);
synchronize_irq(IRQ_IXP4XX_QM2);
- iounmap(qmgr_regs);
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
}
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Krzysztof Halasa");
-EXPORT_SYMBOL(qmgr_regs);
EXPORT_SYMBOL(qmgr_set_irq);
EXPORT_SYMBOL(qmgr_enable_irq);
EXPORT_SYMBOL(qmgr_disable_irq);
return 1;
}
+/*
+ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
+ * is operating as a root complex this needs to be switched to
+ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
+ * the device. Decoding setup is handled by the orion code.
+ */
static void __devinit rc_pci_fixup(struct pci_dev *dev)
{
- /*
- * Prevent enumeration of root complex.
- */
if (dev->bus->parent == NULL && dev->devfn == 0) {
int i;
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
- select PINCTRL
select PM_RUNTIME
select REGULATOR
select SERIAL_OMAP
} else
return;
+ /* Make sure that the GPIO pins are muxed correctly */
+ omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT);
+
err = gpio_request_array(igep_wlan_bt_gpios,
ARRAY_SIZE(igep_wlan_bt_gpios));
if (err) {
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/opp.h>
+#include <linux/cpu.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
};
#endif
-static void __init beagle_opp_init(void)
+static int __init beagle_opp_init(void)
{
int r = 0;
- /* Initialize the omap3 opp table */
- if (omap3_opp_init()) {
+ if (!machine_is_omap3_beagle())
+ return 0;
+
+ /* Initialize the omap3 opp table if not already created. */
+ r = omap3_opp_init();
+ if (IS_ERR_VALUE(r) && (r != -EEXIST)) {
pr_err("%s: opp default init failed\n", __func__);
- return;
+ return r;
}
/* Custom OPP enabled for all xM versions */
if (cpu_is_omap3630()) {
struct device *mpu_dev, *iva_dev;
- mpu_dev = omap_device_get_by_hwmod_name("mpu");
+ mpu_dev = get_cpu_device(0);
iva_dev = omap_device_get_by_hwmod_name("iva");
if (IS_ERR(mpu_dev) || IS_ERR(iva_dev)) {
pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n",
__func__, mpu_dev, iva_dev);
- return;
+ return -ENODEV;
}
/* Enable MPU 1GHz and lower opps */
r = opp_enable(mpu_dev, 800000000);
opp_disable(iva_dev, 660000000);
}
}
- return;
+ return 0;
}
+device_initcall(beagle_opp_init);
static void __init omap3_beagle_init(void)
{
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-
- beagle_opp_init();
}
MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
CLK(NULL, "gfx_fck_div_ck", &gfx_fck_div_ck, CK_AM33XX),
CLK(NULL, "sysclkout_pre_ck", &sysclkout_pre_ck, CK_AM33XX),
CLK(NULL, "clkout2_ck", &clkout2_ck, CK_AM33XX),
+ CLK(NULL, "timer_32k_ck", &clkdiv32k_ick, CK_AM33XX),
+ CLK(NULL, "timer_sys_ck", &sys_clkin_ck, CK_AM33XX),
};
int __init am33xx_clk_init(void)
.clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
.wkdep_srcs = iss_wkup_sleep_deps,
.sleepdep_srcs = iss_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_dss_44xx_clkdm = {
struct spi_board_info *spi_bi = &ads7846_spi_board_info;
int err;
- err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
- if (err) {
- pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
- return;
- }
+ /*
+ * If a board defines get_pendown_state() function, request the pendown
+ * GPIO and set the GPIO debounce time.
+ * If a board does not define the get_pendown_state() function, then
+ * the ads7846 driver will setup the pendown GPIO itself.
+ */
+ if (board_pdata && board_pdata->get_pendown_state) {
+ err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+ if (err) {
+ pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+ return;
+ }
- if (gpio_debounce)
- gpio_set_debounce(gpio_pendown, gpio_debounce);
+ if (gpio_debounce)
+ gpio_set_debounce(gpio_pendown, gpio_debounce);
+
+ gpio_export(gpio_pendown, 0);
+ }
spi_bi->bus_num = bus_num;
spi_bi->irq = gpio_to_irq(gpio_pendown);
+ ads7846_config.gpio_pendown = gpio_pendown;
+
if (board_pdata) {
board_pdata->gpio_pendown = gpio_pendown;
+ board_pdata->gpio_pendown_debounce = gpio_debounce;
spi_bi->platform_data = board_pdata;
- if (board_pdata->get_pendown_state)
- gpio_export(gpio_pendown, 0);
- } else {
- ads7846_config.gpio_pendown = gpio_pendown;
}
- if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state))
- gpio_free(gpio_pendown);
-
spi_register_board_info(&ads7846_spi_board_info, 1);
}
#else
#include <linux/of.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/omap4-keypad.h>
+#include <linux/platform_data/omap_ocp2scp.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
static inline void omap_init_vout(void) {}
#endif
+#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
+static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
+{
+ int cnt = 0;
+
+ while (ocp2scp_dev->drv_name != NULL) {
+ cnt++;
+ ocp2scp_dev++;
+ }
+
+ return cnt;
+}
+
+static void omap_init_ocp2scp(void)
+{
+ struct omap_hwmod *oh;
+ struct platform_device *pdev;
+ int bus_id = -1, dev_cnt = 0, i;
+ struct omap_ocp2scp_dev *ocp2scp_dev;
+ const char *oh_name, *name;
+ struct omap_ocp2scp_platform_data *pdata;
+
+ if (!cpu_is_omap44xx())
+ return;
+
+ oh_name = "ocp2scp_usb_phy";
+ name = "omap-ocp2scp";
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("%s: could not find omap_hwmod for %s\n", __func__,
+ oh_name);
+ return;
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: No memory for ocp2scp pdata\n", __func__);
+ return;
+ }
+
+ ocp2scp_dev = oh->dev_attr;
+ dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
+
+ if (!dev_cnt) {
+ pr_err("%s: No devices connected to ocp2scp\n", __func__);
+ kfree(pdata);
+ return;
+ }
+
+ pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
+ * dev_cnt, GFP_KERNEL);
+ if (!pdata->devices) {
+ pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
+ kfree(pdata);
+ return;
+ }
+
+ for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
+ pdata->devices[i] = ocp2scp_dev;
+
+ pdata->dev_cnt = dev_cnt;
+
+ pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL,
+ 0, false);
+ if (IS_ERR(pdev)) {
+ pr_err("Could not build omap_device for %s %s\n",
+ name, oh_name);
+ kfree(pdata->devices);
+ kfree(pdata);
+ return;
+ }
+}
+#else
+static inline void omap_init_ocp2scp(void) { }
+#endif
+
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
omap_init_sham();
omap_init_aes();
omap_init_vout();
+ omap_init_ocp2scp();
return 0;
}
"sys_off_mode", NULL, NULL, NULL,
"gpio_9", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_CTS, 150,
- "uart1_cts", NULL, NULL, NULL,
+ "uart1_cts", "ssi1_rdy_tx", NULL, NULL,
"gpio_150", "hsusb3_tll_clk", NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_RTS, 149,
- "uart1_rts", NULL, NULL, NULL,
+ "uart1_rts", "ssi1_flag_tx", NULL, NULL,
"gpio_149", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_RX, 151,
- "uart1_rx", NULL, "mcbsp1_clkr", "mcspi4_clk",
+ "uart1_rx", "ss1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
"gpio_151", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_TX, 148,
- "uart1_tx", NULL, NULL, NULL,
+ "uart1_tx", "ssi1_dat_tx", NULL, NULL,
"gpio_148", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART2_CTS, 144,
"uart2_cts", "mcbsp3_dx", "gpt9_pwm_evt", NULL,
}
/**
+ * _wait_softreset_complete - wait for an OCP softreset to complete
+ * @oh: struct omap_hwmod * to wait on
+ *
+ * Wait until the IP block represented by @oh reports that its OCP
+ * softreset is complete. This can be triggered by software (see
+ * _ocp_softreset()) or by hardware upon returning from off-mode (one
+ * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT
+ * microseconds. Returns the number of microseconds waited.
+ */
+static int _wait_softreset_complete(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_class_sysconfig *sysc;
+ u32 softrst_mask;
+ int c = 0;
+
+ sysc = oh->class->sysc;
+
+ if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
+ omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
+ & SYSS_RESETDONE_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+ else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
+ softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
+ omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
+ & softrst_mask),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+ }
+
+ return c;
+}
+
+/**
* _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
* @oh: struct omap_hwmod *
*
if (!oh->class->sysc)
return;
+ /*
+ * Wait until reset has completed, this is needed as the IP
+ * block is reset automatically by hardware in some cases
+ * (off-mode for example), and the drivers require the
+ * IP to be ready when they access it
+ */
+ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+ _enable_optional_clocks(oh);
+ _wait_softreset_complete(oh);
+ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+ _disable_optional_clocks(oh);
+
v = oh->_sysc_cache;
sf = oh->class->sysc->sysc_flags;
*/
static int _ocp_softreset(struct omap_hwmod *oh)
{
- u32 v, softrst_mask;
+ u32 v;
int c = 0;
int ret = 0;
if (oh->class->sysc->srst_udelay)
udelay(oh->class->sysc->srst_udelay);
- if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
- omap_test_timeout((omap_hwmod_read(oh,
- oh->class->sysc->syss_offs)
- & SYSS_RESETDONE_MASK),
- MAX_MODULE_SOFTRESET_WAIT, c);
- else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
- softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
- omap_test_timeout(!(omap_hwmod_read(oh,
- oh->class->sysc->sysc_offs)
- & softrst_mask),
- MAX_MODULE_SOFTRESET_WAIT, c);
- }
-
+ c = _wait_softreset_complete(oh);
if (c == MAX_MODULE_SOFTRESET_WAIT)
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
oh->name, MAX_MODULE_SOFTRESET_WAIT);
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return -EINVAL;
+ if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK)
+ return -EPERM;
+
if (oh->rst_lines_cnt == 0) {
r = _enable(oh);
if (r) {
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/power/smartreflex.h>
+#include <linux/platform_data/omap_ocp2scp.h>
#include <plat/omap_hwmod.h>
#include <plat/i2c.h>
.name = "mcpdm",
.class = &omap44xx_mcpdm_hwmod_class,
.clkdm_name = "abe_clkdm",
+ /*
+ * It's suspected that the McPDM requires an off-chip main
+ * functional clock, controlled via I2C. This IP block is
+ * currently reset very early during boot, before I2C is
+ * available, so it doesn't seem that we have any choice in
+ * the kernel other than to avoid resetting it.
+ */
+ .flags = HWMOD_EXT_OPT_MAIN_CLK,
.mpu_irqs = omap44xx_mcpdm_irqs,
.sdma_reqs = omap44xx_mcpdm_sdma_reqs,
.main_clk = "mcpdm_fck",
.sysc = &omap44xx_ocp2scp_sysc,
};
+/* ocp2scp dev_attr */
+static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
+ {
+ .name = "usb_phy",
+ .start = 0x4a0ad080,
+ .end = 0x4a0ae000,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* XXX: Remove this once control module driver is in place */
+ .name = "ctrl_dev",
+ .start = 0x4a002300,
+ .end = 0x4a002303,
+ .flags = IORESOURCE_MEM,
+ },
+ { }
+};
+
+static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
+ {
+ .drv_name = "omap-usb2",
+ .res = omap44xx_usb_phy_and_pll_addrs,
+ },
+ { }
+};
+
/* ocp2scp_usb_phy */
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
.modulemode = MODULEMODE_HWCTRL,
},
},
+ .dev_attr = ocp2scp_dev_attr,
};
/*
#define PM_RTA_ERRATUM_i608 (1 << 0)
#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1)
+#define PM_PER_MEMORIES_ERRATUM_i582 (1 << 2)
#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
extern u16 pm34xx_errata;
/* Enable the l2 cache toggling in sleep logic */
enable_omap3630_toggle_l2_on_restore();
if (omap_rev() < OMAP3630_REV_ES1_2)
- pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
+ pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
+ PM_PER_MEMORIES_ERRATUM_i582);
+ } else if (cpu_is_omap34xx()) {
+ pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
}
}
int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
- struct clockdomain *neon_clkdm, *mpu_clkdm;
+ struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
int ret;
if (!omap3_has_io_chain_ctrl())
neon_clkdm = clkdm_lookup("neon_clkdm");
mpu_clkdm = clkdm_lookup("mpu_clkdm");
+ per_clkdm = clkdm_lookup("per_clkdm");
+ wkup_clkdm = clkdm_lookup("wkup_clkdm");
#ifdef CONFIG_SUSPEND
omap_pm_suspend = omap3_pm_suspend;
if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
omap3630_ctrl_disable_rta();
+ /*
+ * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
+ * not correctly reset when the PER powerdomain comes back
+ * from OFF or OSWR when the CORE powerdomain is kept active.
+ * See OMAP36xx Erratum i582 "PER Domain reset issue after
+ * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
+ * complete workaround. The kernel must also prevent the PER
+ * powerdomain from going to OSWR/OFF while the CORE
+ * powerdomain is not going to OSWR/OFF. And if PER last
+ * power state was off while CORE last power state was ON, the
+ * UART3/4 and McBSP2/3 SIDETONE devices need to run a
+ * self-test using their loopback tests; if that fails, those
+ * devices are unusable until the PER/CORE can complete a transition
+ * from ON to OSWR/OFF and then back to ON.
+ *
+ * XXX Technically this workaround is only needed if off-mode
+ * or OSWR is enabled.
+ */
+ if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
+ clkdm_add_wkdep(per_clkdm, wkup_clkdm);
+
clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
omap3_secure_ram_storage =
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
+ if (console_uart_id == bdata->id) {
+ omap_device_enable(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ }
+
oh->dev_attr = uart;
if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads)
{
/* PMIC part*/
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
+ omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
/* Register additional devices on i2c1 bus if needed */
};
static struct regulator_consumer_supply omap4_vdd1_supply[] = {
- REGULATOR_SUPPLY("vcc", "mpu.0"),
+ REGULATOR_SUPPLY("vcc", "cpu0"),
};
static struct regulator_consumer_supply omap4_vdd2_supply[] = {
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
- pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+ pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n",
__func__, voltdm->name, i2c_high_speed);
return;
}
#include <linux/mfd/asic3.h>
#include <linux/mtd/physmap.h>
#include <linux/pda_power.h>
+#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/gpio-regulator.h>
*/
static struct platform_pwm_backlight_data backlight_data = {
- .pwm_id = 1,
+ .pwm_id = -1, /* Superseded by pwm_lookup */
.max_brightness = 200,
.dft_brightness = 100,
.pwm_period_ns = 30923,
},
};
+static struct pwm_lookup hx4700_pwm_lookup[] = {
+ PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL),
+};
+
/*
* USB "Transceiver"
*/
pxa_set_stuart_info(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
+ pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);
gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
}
-static unsigned long gpio18_config[] = {
- GPIO18_RDY,
- GPIO18_GPIO,
-};
+static unsigned long gpio18_config = GPIO18_GPIO;
static void spitz_presuspend(void)
{
PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
- pxa2xx_mfp_config(&gpio18_config[0], 1);
+ pxa2xx_mfp_config(&gpio18_config, 1);
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
gpio_free(18);
static void spitz_postsuspend(void)
{
- pxa2xx_mfp_config(&gpio18_config[1], 1);
}
static int spitz_should_wakeup(unsigned int resume_on_alarm)
#include <plat/nand-core.h>
#include <plat/adc-core.h>
#include <plat/rtc-core.h>
+#include <plat/spi-core.h>
static struct map_desc s3c2416_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
/* initialize device information early */
s3c2416_default_sdhci0();
s3c2416_default_sdhci1();
+ s3c64xx_spi_setname("s3c2443-spi");
iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
}
#include <plat/nand-core.h>
#include <plat/adc-core.h>
#include <plat/rtc-core.h>
+#include <plat/spi-core.h>
static struct map_desc s3c2443_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
s3c24xx_gpiocfg_default.set_pull = s3c2443_gpio_setpull;
s3c24xx_gpiocfg_default.get_pull = s3c2443_gpio_getpull;
+ /* initialize device information early */
+ s3c64xx_spi_setname("s3c2443-spi");
+
iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
}
#include <plat/sdhci.h>
#include <plat/adc-core.h>
#include <plat/fb-core.h>
+#include <plat/spi-core.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-irqtype.h>
#include <plat/regs-serial.h>
/* initialize any device information early */
s3c_adc_setname("s3c64xx-adc");
s3c_fb_setname("s5p64x0-fb");
+ s3c64xx_spi_setname("s5p64x0-spi");
s5p64x0_default_sdhci0();
s5p64x0_default_sdhci1();
/* initialize any device information early */
s3c_adc_setname("s3c64xx-adc");
s3c_fb_setname("s5p64x0-fb");
+ s3c64xx_spi_setname("s5p64x0-spi");
s5p64x0_default_sdhci0();
s5p64x0_default_sdhci1();
#include <plat/fb-core.h>
#include <plat/iic-core.h>
#include <plat/onenand-core.h>
+#include <plat/spi-core.h>
#include <plat/regs-serial.h>
#include <plat/watchdog-reset.h>
s3c_onenand_setname("s5pc100-onenand");
s3c_fb_setname("s5pc100-fb");
s3c_cfcon_setname("s5pc100-pata");
+
+ s3c64xx_spi_setname("s5pc100-spi");
}
void __init s5pc100_init_clocks(int xtal)
#include <plat/iic-core.h>
#include <plat/keypad-core.h>
#include <plat/tv-core.h>
+#include <plat/spi-core.h>
#include <plat/regs-serial.h>
#include "common.h"
/* setup TV devices */
s5p_hdmi_setname("s5pv210-hdmi");
+
+ s3c64xx_spi_setname("s5pv210-spi");
}
void __init s5pv210_init_clocks(int xtal)
{
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*16way */
- l2x0_init((void __iomem __force *)(0xf0100000), 0x40470000, 0x82000fff);
+ l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff);
#endif
r8a7779_pm_init();
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/irq.h>
#include <linux/platform_data/clk-ux500.h>
#include <asm/hardware/gic.h>
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- union offset_union offset;
+ union offset_union uninitialized_var(offset);
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
if (thumb2_32b) {
offset.un = 0;
handler = do_alignment_t32_to_handler(&instr, regs, &offset);
- } else
+ } else {
+ offset.un = 0;
handler = do_alignment_ldmstm;
+ }
break;
default:
gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
{
u64 mask = get_coherent_dma_mask(dev);
- struct page *page;
+ struct page *page = NULL;
void *addr;
#ifdef CONFIG_DMA_API_DEBUG
mov pc, lr
/*
- * cpu_arm926_switch_mm(pgd_phys, tsk)
+ * cpu_v6_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys
*
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
- void *priv;
int vm_active;
const void *caller;
};
struct resource res[] = {
{
.start = data->iobase,
- .end = data->iobase + SZ_4K - 1,
+ .end = data->iobase + data->iosize - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
select CLKDEV_LOOKUP
select GENERIC_IRQ_CHIP
select OMAP_DM_TIMER
+ select PINCTRL
select PROC_DEVICETREE if PROC_FS
select SPARSE_IRQ
select USE_OF
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c-omap.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <mach/irqs.h>
#include <plat/i2c.h>
+#include <plat/omap-pm.h>
#include <plat/omap_device.h>
#define OMAP_I2C_SIZE 0x3f
#ifdef CONFIG_ARCH_OMAP2PLUS
+/*
+ * XXX This function is a temporary compatibility wrapper - only
+ * needed until the I2C driver can be converted to call
+ * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
+ */
+static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
+{
+ omap_pm_set_max_mpu_wakeup_lat(dev, t);
+}
+
static inline int omap2_i2c_add_bus(int bus_id)
{
int l;
dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
pdata->flags = dev_attr->flags;
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ * Only omap3 has support for constraints
+ */
+ if (cpu_is_omap34xx())
+ pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
pdev = omap_device_build(name, bus_id, oh, pdata,
sizeof(struct omap_i2c_bus_platform_data),
NULL, 0, 0);
#define OMAP_UART_WER_MOD_WKUP 0X7F
/* Enable XON/XOFF flow control on output */
-#define OMAP_UART_SW_TX 0x8
+#define OMAP_UART_SW_TX 0x04
/* Enable XON/XOFF flow control on input */
-#define OMAP_UART_SW_RX 0x2
+#define OMAP_UART_SW_RX 0x04
#define OMAP_UART_SYSC_RESET 0X07
#define OMAP_UART_TCR_TRIG 0X0F
* in order to complete the reset. Optional clocks will be disabled
* again after the reset.
* HWMOD_16BIT_REG: Module has 16bit registers
+ * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for
+ * this IP block comes from an off-chip source and is not always
+ * enabled. This prevents the hwmod code from being able to
+ * enable and reset the IP block early. XXX Eventually it should
+ * be possible to query the clock framework for this information.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
#define HWMOD_NO_IDLEST (1 << 6)
#define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7)
#define HWMOD_16BIT_REG (1 << 8)
+#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
/*
* omap_hwmod._int_flags definitions
pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
chan->number, __func__, buf);
- if (chan->end == NULL)
+ if (chan->end == NULL) {
pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
chan->number, __func__, chan);
-
- chan->end->next = buf;
- chan->end = buf;
+ } else {
+ chan->end->next = buf;
+ chan->end = buf;
+ }
}
/* if necessary, update the next buffer field */
--- /dev/null
+/*
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PLAT_S3C_SPI_CORE_H
+#define __PLAT_S3C_SPI_CORE_H
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+ */
+
+/* re-define device name depending on support. */
+static inline void s3c64xx_spi_setname(char *name)
+{
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+ s3c64xx_device_spi0.name = name;
+#endif
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+ s3c64xx_device_spi1.name = name;
+#endif
+#ifdef CONFIG_S3C64XX_DEV_SPI2
+ s3c64xx_device_spi2.name = name;
+#endif
+}
+
+#endif /* __PLAT_S3C_SPI_CORE_H */
#
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
- @echo ' Generating $@'
+ @$(kecho) ' Generating $@'
@mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
elf_hwcap |= HWCAP_VFPv3;
/*
- * Check for VFPv3 D16. CPUs in this configuration
- * only have 16 x 64bit registers.
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
*/
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
- elf_hwcap |= HWCAP_VFPv3D16;
+ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+ else
+ elf_hwcap |= HWCAP_VFPD32;
}
#endif
/*
*pages = NULL;
}
EXPORT_SYMBOL_GPL(free_xenballooned_pages);
+
+/* In the hypervisor.S file. */
+EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
+EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
+EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
+EXPORT_SYMBOL_GPL(privcmd_call);
#include <xen/page.h>
#include <xen/grant_table.h>
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared)
{
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/opcodes-virt.h>
#include <xen/interface/xen.h>
-/* HVC 0xEA1 */
-#ifdef CONFIG_THUMB2_KERNEL
-#define xen_hvc .word 0xf7e08ea1
-#else
-#define xen_hvc .word 0xe140ea71
-#endif
+#define XEN_IMM 0xEA1
#define HYPERCALL_SIMPLE(hypercall) \
ENTRY(HYPERVISOR_##hypercall) \
mov r12, #__HYPERVISOR_##hypercall; \
- xen_hvc; \
+ __HVC(XEN_IMM); \
mov pc, lr; \
ENDPROC(HYPERVISOR_##hypercall)
stmdb sp!, {r4} \
ldr r4, [sp, #4] \
mov r12, #__HYPERVISOR_##hypercall; \
- xen_hvc \
+ __HVC(XEN_IMM); \
ldm sp!, {r4} \
mov pc, lr \
ENDPROC(HYPERVISOR_##hypercall)
mov r2, r3
ldr r3, [sp, #8]
ldr r4, [sp, #4]
- xen_hvc
+ __HVC(XEN_IMM)
ldm sp!, {r4}
mov pc, lr
ENDPROC(privcmd_call);
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select GENERIC_CLOCKEVENTS
select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IOMAP
#include <asm/user.h>
typedef unsigned long elf_greg_t;
-typedef unsigned long elf_freg_t[3];
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef struct user_fp elf_fpregset_t;
+typedef struct user_fpsimd_state elf_fpregset_t;
#define EM_AARCH64 183
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
-
/*
* These are used to set parameters in the core dumps.
*/
* - FPSR and FPCR
* - 32 128-bit data registers
*
- * Note that user_fp forms a prefix of this structure, which is relied
- * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must
- * form a prefix of struct fpsimd_state.
+ * Note that user_fpsimd forms a prefix of this structure, which is
+ * relied upon in the ptrace FP/SIMD accessors.
*/
struct fpsimd_state {
union {
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
-#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL)
+#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
static inline u8 inb(unsigned long addr)
{
extern void __iounmap(volatile void __iomem *addr);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
-#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC)
+#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
#define ARCH_HAS_IOREMAP_WC
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
-#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
+#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
-#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
+#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
-#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
-#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY)
-
-#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
-#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
-#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
-#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
+#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
+#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+
+#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#endif /* __ASSEMBLY__ */
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
-#define pte_exec(pte) (!(pte_val(pte) & PTE_XN))
+#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_present_exec_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
(PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
+ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
#else
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
+
+#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
#endif /* __KERNEL__ */
struct debug_info {
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_COMPAT
-#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
-
- /*
- * This isn't an architected event.
- * We detect this event number and use the cycle counter instead.
- */
- ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF,
};
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
+ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
}
/*
- * Fill in the task's elfregs structure for a core dump.
- */
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
-{
- elf_core_copy_regs(elfregs, task_pt_regs(t));
- return 1;
-}
-
-/*
- * fill in the fpe structure for a core dump...
- */
-int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
-{
- return 0;
-}
-EXPORT_SYMBOL(dump_fpu);
-
-/*
* Shuffle the argument into the correct register before calling the
* thread function. x1 is the thread argument, x2 is the pointer to
* the thread function, and x3 points to the exit function.
* before we continue.
*/
set_cpu_online(cpu, true);
- while (!cpu_active(cpu))
- cpu_relax();
+ complete(&cpu_running);
/*
* OK, it's off to the idle thread for us
#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
max_dma32 = min(max, MAX_DMA32_PFN);
- zone_size[ZONE_DMA32] = max_dma32 - min;
+ zone_size[ZONE_DMA32] = max(min, max_dma32) - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;
select GENERIC_CPU_DEVICES
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
config ZONE_DMA
bool
INITRD_PHYS = 0x02180000
INITRD_VIRT = 0x02180000
+OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment
+
#
# If you don't define ZRELADDR above,
# then it defaults to ZTEXTADDR
targets: $(obj)/Image
$(obj)/Image: vmlinux FORCE
- $(OBJCOPY) -O binary -R .note -R .comment -S vmlinux $@
+ $(OBJCOPY) $(OBJCOPYFLAGS) -S vmlinux $@
#$(obj)/Image: $(CONFIGURE) $(SYSTEM)
-# $(OBJCOPY) -O binary -R .note -R .comment -g -S $(SYSTEM) $@
+# $(OBJCOPY) $(OBJCOPYFLAGS) -g -S $(SYSTEM) $@
bzImage: zImage
zImage: $(CONFIGURE) compressed/$(LINUX)
- $(OBJCOPY) -O binary -R .note -R .comment -S compressed/$(LINUX) $@
+ $(OBJCOPY) $(OBJCOPYFLAGS) -S compressed/$(LINUX) $@
bootpImage: bootp/bootp
- $(OBJCOPY) -O binary -R .note -R .comment -S bootp/bootp $@
+ $(OBJCOPY) $(OBJCOPYFLAGS) -S bootp/bootp $@
compressed/$(LINUX): $(LINUX) dep
@$(MAKE) -C compressed $(LINUX)
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
-#define __ARCH_WANT_KERNEL_EXECVE
/*
* "Conditional" syscalls
call schedule_tail
calll.p @(gr21,gr0)
or gr20,gr20,gr8
- bra sys_exit
-
- .globl ret_from_kernel_execve
-ret_from_kernel_execve:
- ori gr28,0,sp
bra __syscall_exit
###################################################################################################
subicc gr5,#0,gr0,icc0
beq icc0,#0,__entry_return_direct
-__entry_preempt_need_resched:
- ldi @(gr15,#TI_FLAGS),gr4
- andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
- beq icc0,#1,__entry_return_direct
-
- setlos #PREEMPT_ACTIVE,gr5
- sti gr5,@(gr15,#TI_FLAGS)
-
- andi gr23,#~PSR_PIL,gr23
- movgs gr23,psr
-
- call schedule
- sti gr0,@(gr15,#TI_PRE_COUNT)
-
- movsg psr,gr23
- ori gr23,#PSR_PIL_14,gr23
- movgs gr23,psr
- bra __entry_preempt_need_resched
-#else
- bra __entry_return_direct
+ subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
+ call preempt_schedule_irq
#endif
+ bra __entry_return_direct
###############################################################################
childregs = (struct pt_regs *)
(task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE);
+ /* set up the userspace frame (the only place that the USP is stored) */
+ *childregs = *__kernel_frame0_ptr;
+
p->set_child_tid = p->clear_child_tid = NULL;
p->thread.frame = childregs;
p->thread.frame0 = childregs;
if (unlikely(!regs)) {
- memset(childregs, 0, sizeof(struct pt_regs));
childregs->gr9 = usp; /* function */
childregs->gr8 = arg;
- childregs->psr = PSR_S;
p->thread.pc = (unsigned long) ret_from_kernel_thread;
save_user_regs(p->thread.user);
return 0;
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#define __ARCH_H8300_CACHE_H
/* bytes per L1 cache line */
-#define L1_CACHE_BYTES 4
+#define L1_CACHE_SHIFT 2
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/* m68k-elf-gcc 2.95.2 doesn't like these */
generic-y += clkdev.h
generic-y += exec.h
+generic-y += kvm_para.h
+++ /dev/null
-/*
- * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-#ifndef __IA64_KVM_PARA_H
-#define __IA64_KVM_PARA_H
-
-#include <uapi/asm/kvm_para.h>
-
-
-static inline unsigned int kvm_arch_para_features(void)
-{
- return 0;
-}
-
-static inline bool kvm_check_and_clear_guest_paused(void)
-{
- return false;
-}
-
-#endif
high_memory = __va(max_low_pfn * PAGE_SIZE);
- reset_zone_present_pages();
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
-include include/asm-generic/Kbuild.asm
-header-y += cachectl.h
generic-y += bitsperlong.h
generic-y += clkdev.h
#ifndef _M68K_PTRACE_H
#define _M68K_PTRACE_H
-#define PT_D1 0
-#define PT_D2 1
-#define PT_D3 2
-#define PT_D4 3
-#define PT_D5 4
-#define PT_D6 5
-#define PT_D7 6
-#define PT_A0 7
-#define PT_A1 8
-#define PT_A2 9
-#define PT_A3 10
-#define PT_A4 11
-#define PT_A5 12
-#define PT_A6 13
-#define PT_D0 14
-#define PT_USP 15
-#define PT_ORIG_D0 16
-#define PT_SR 17
-#define PT_PC 18
+#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-struct pt_regs {
- long d1;
- long d2;
- long d3;
- long d4;
- long d5;
- long a0;
- long a1;
- long a2;
- long d0;
- long orig_d0;
- long stkadj;
-#ifdef CONFIG_COLDFIRE
- unsigned format : 4; /* frame format specifier */
- unsigned vector : 12; /* vector offset */
- unsigned short sr;
- unsigned long pc;
-#else
- unsigned short sr;
- unsigned long pc;
- unsigned format : 4; /* frame format specifier */
- unsigned vector : 12; /* vector offset */
-#endif
-};
-
-/*
- * This is the extended stack used by signal handlers and the context
- * switcher: it's pushed after the normal "struct pt_regs".
- */
-struct switch_stack {
- unsigned long d6;
- unsigned long d7;
- unsigned long a3;
- unsigned long a4;
- unsigned long a5;
- unsigned long a6;
- unsigned long retpc;
-};
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-
-#define PTRACE_GET_THREAD_AREA 25
-
-#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
-
-#ifdef __KERNEL__
-
#ifndef PS_S
#define PS_S (0x2000)
#define PS_M (0x1000)
#define arch_has_block_step() (1)
#endif
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _M68K_PTRACE_H */
** Redesign of the boot information structure; moved boot information
** structure to bootinfo.h
*/
-
#ifndef _M68K_SETUP_H
#define _M68K_SETUP_H
+#include <uapi/asm/setup.h>
- /*
- * Linux/m68k Architectures
- */
-
-#define MACH_AMIGA 1
-#define MACH_ATARI 2
-#define MACH_MAC 3
-#define MACH_APOLLO 4
-#define MACH_SUN3 5
-#define MACH_MVME147 6
-#define MACH_MVME16x 7
-#define MACH_BVME6000 8
-#define MACH_HP300 9
-#define MACH_Q40 10
-#define MACH_SUN3X 11
-#define MACH_M54XX 12
-
-#define COMMAND_LINE_SIZE 256
-
-#ifdef __KERNEL__
-
#define CL_SIZE COMMAND_LINE_SIZE
#ifndef __ASSEMBLY__
# define MACH_TYPE (m68k_machtype)
#endif
-#endif /* __KERNEL__ */
-
-
- /*
- * CPU, FPU and MMU types
- *
- * Note: we may rely on the following equalities:
- *
- * CPU_68020 == MMU_68851
- * CPU_68030 == MMU_68030
- * CPU_68040 == FPU_68040 == MMU_68040
- * CPU_68060 == FPU_68060 == MMU_68060
- */
-
-#define CPUB_68020 0
-#define CPUB_68030 1
-#define CPUB_68040 2
-#define CPUB_68060 3
-#define CPUB_COLDFIRE 4
-
-#define CPU_68020 (1<<CPUB_68020)
-#define CPU_68030 (1<<CPUB_68030)
-#define CPU_68040 (1<<CPUB_68040)
-#define CPU_68060 (1<<CPUB_68060)
-#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
-
-#define FPUB_68881 0
-#define FPUB_68882 1
-#define FPUB_68040 2 /* Internal FPU */
-#define FPUB_68060 3 /* Internal FPU */
-#define FPUB_SUNFPA 4 /* Sun-3 FPA */
-#define FPUB_COLDFIRE 5 /* ColdFire FPU */
-
-#define FPU_68881 (1<<FPUB_68881)
-#define FPU_68882 (1<<FPUB_68882)
-#define FPU_68040 (1<<FPUB_68040)
-#define FPU_68060 (1<<FPUB_68060)
-#define FPU_SUNFPA (1<<FPUB_SUNFPA)
-#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
-
-#define MMUB_68851 0
-#define MMUB_68030 1 /* Internal MMU */
-#define MMUB_68040 2 /* Internal MMU */
-#define MMUB_68060 3 /* Internal MMU */
-#define MMUB_APOLLO 4 /* Custom Apollo */
-#define MMUB_SUN3 5 /* Custom Sun-3 */
-#define MMUB_COLDFIRE 6 /* Internal MMU */
-
-#define MMU_68851 (1<<MMUB_68851)
-#define MMU_68030 (1<<MMUB_68030)
-#define MMU_68040 (1<<MMUB_68040)
-#define MMU_68060 (1<<MMUB_68060)
-#define MMU_SUN3 (1<<MMUB_SUN3)
-#define MMU_APOLLO (1<<MMUB_APOLLO)
-#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
-
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern unsigned long m68k_cputype;
extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
#endif
-#endif /* __KERNEL__ */
-
#endif /* _M68K_SETUP_H */
#ifndef _M68K_SIGNAL_H
#define _M68K_SIGNAL_H
-#include <linux/types.h>
+#include <uapi/asm/signal.h>
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
struct k_sigaction {
struct sigaction sa;
};
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
static inline void sigaddset(sigset_t *set, int _sig)
{
asm ("bfset %0{%1,#1}"
- : "+od" (*set)
+ : "+o" (*set)
: "id" ((_sig - 1) ^ 31)
: "cc");
}
static inline void sigdelset(sigset_t *set, int _sig)
{
asm ("bfclr %0{%1,#1}"
- : "+od" (*set)
+ : "+o" (*set)
: "id" ((_sig - 1) ^ 31)
: "cc");
}
int ret;
asm ("bfextu %1{%2,#1},%0"
: "=d" (ret)
- : "od" (*set), "id" ((_sig-1) ^ 31)
+ : "o" (*set), "id" ((_sig-1) ^ 31)
: "cc");
return ret;
}
extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
#endif /* __uClinux__ */
-#endif /* __KERNEL__ */
#endif /* _M68K_SIGNAL_H */
#ifndef _M68K_TERMIOS_H
#define _M68K_TERMIOS_H
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
+#include <uapi/asm/termios.h>
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-#ifdef __KERNEL__
/* intr=^C quit=^| erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-#endif
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
/*
* Translate a "termio" structure into a "termios". Ugh.
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif /* __KERNEL__ */
-
#endif /* _M68K_TERMIOS_H */
#ifndef _ASM_M68K_UNISTD_H_
#define _ASM_M68K_UNISTD_H_
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_chown 16
-/*#define __NR_break 17*/
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-/*#define __NR_stty 31*/
-/*#define __NR_gtty 32*/
-#define __NR_access 33
-#define __NR_nice 34
-/*#define __NR_ftime 35*/
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-/*#define __NR_prof 44*/
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-/*#define __NR_lock 53*/
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-/*#define __NR_mpx 56*/
-#define __NR_setpgid 57
-/*#define __NR_ulimit 58*/
-/*#define __NR_oldolduname 59*/
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-/*#define __NR_profil 98*/
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-/*#define __NR_ioperm 101*/
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-/*#define __NR_olduname 109*/
-/*#define __NR_iopl 110*/ /* not supported */
-#define __NR_vhangup 111
-/*#define __NR_idle 112*/ /* Obsolete */
-/*#define __NR_vm86 113*/ /* not supported */
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_cacheflush 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_getpagesize 166
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_lchown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188 /* some people actually want streams */
-#define __NR_putpmsg 189 /* some people actually want streams */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_chown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_lchown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-/* 218*/
-/* 219*/
-#define __NR_getdents64 220
-#define __NR_gettid 221
-#define __NR_tkill 222
-#define __NR_setxattr 223
-#define __NR_lsetxattr 224
-#define __NR_fsetxattr 225
-#define __NR_getxattr 226
-#define __NR_lgetxattr 227
-#define __NR_fgetxattr 228
-#define __NR_listxattr 229
-#define __NR_llistxattr 230
-#define __NR_flistxattr 231
-#define __NR_removexattr 232
-#define __NR_lremovexattr 233
-#define __NR_fremovexattr 234
-#define __NR_futex 235
-#define __NR_sendfile64 236
-#define __NR_mincore 237
-#define __NR_madvise 238
-#define __NR_fcntl64 239
-#define __NR_readahead 240
-#define __NR_io_setup 241
-#define __NR_io_destroy 242
-#define __NR_io_getevents 243
-#define __NR_io_submit 244
-#define __NR_io_cancel 245
-#define __NR_fadvise64 246
-#define __NR_exit_group 247
-#define __NR_lookup_dcookie 248
-#define __NR_epoll_create 249
-#define __NR_epoll_ctl 250
-#define __NR_epoll_wait 251
-#define __NR_remap_file_pages 252
-#define __NR_set_tid_address 253
-#define __NR_timer_create 254
-#define __NR_timer_settime 255
-#define __NR_timer_gettime 256
-#define __NR_timer_getoverrun 257
-#define __NR_timer_delete 258
-#define __NR_clock_settime 259
-#define __NR_clock_gettime 260
-#define __NR_clock_getres 261
-#define __NR_clock_nanosleep 262
-#define __NR_statfs64 263
-#define __NR_fstatfs64 264
-#define __NR_tgkill 265
-#define __NR_utimes 266
-#define __NR_fadvise64_64 267
-#define __NR_mbind 268
-#define __NR_get_mempolicy 269
-#define __NR_set_mempolicy 270
-#define __NR_mq_open 271
-#define __NR_mq_unlink 272
-#define __NR_mq_timedsend 273
-#define __NR_mq_timedreceive 274
-#define __NR_mq_notify 275
-#define __NR_mq_getsetattr 276
-#define __NR_waitid 277
-/*#define __NR_vserver 278*/
-#define __NR_add_key 279
-#define __NR_request_key 280
-#define __NR_keyctl 281
-#define __NR_ioprio_set 282
-#define __NR_ioprio_get 283
-#define __NR_inotify_init 284
-#define __NR_inotify_add_watch 285
-#define __NR_inotify_rm_watch 286
-#define __NR_migrate_pages 287
-#define __NR_openat 288
-#define __NR_mkdirat 289
-#define __NR_mknodat 290
-#define __NR_fchownat 291
-#define __NR_futimesat 292
-#define __NR_fstatat64 293
-#define __NR_unlinkat 294
-#define __NR_renameat 295
-#define __NR_linkat 296
-#define __NR_symlinkat 297
-#define __NR_readlinkat 298
-#define __NR_fchmodat 299
-#define __NR_faccessat 300
-#define __NR_pselect6 301
-#define __NR_ppoll 302
-#define __NR_unshare 303
-#define __NR_set_robust_list 304
-#define __NR_get_robust_list 305
-#define __NR_splice 306
-#define __NR_sync_file_range 307
-#define __NR_tee 308
-#define __NR_vmsplice 309
-#define __NR_move_pages 310
-#define __NR_sched_setaffinity 311
-#define __NR_sched_getaffinity 312
-#define __NR_kexec_load 313
-#define __NR_getcpu 314
-#define __NR_epoll_pwait 315
-#define __NR_utimensat 316
-#define __NR_signalfd 317
-#define __NR_timerfd_create 318
-#define __NR_eventfd 319
-#define __NR_fallocate 320
-#define __NR_timerfd_settime 321
-#define __NR_timerfd_gettime 322
-#define __NR_signalfd4 323
-#define __NR_eventfd2 324
-#define __NR_epoll_create1 325
-#define __NR_dup3 326
-#define __NR_pipe2 327
-#define __NR_inotify_init1 328
-#define __NR_preadv 329
-#define __NR_pwritev 330
-#define __NR_rt_tgsigqueueinfo 331
-#define __NR_perf_event_open 332
-#define __NR_get_thread_area 333
-#define __NR_set_thread_area 334
-#define __NR_atomic_cmpxchg_32 335
-#define __NR_atomic_barrier 336
-#define __NR_fanotify_init 337
-#define __NR_fanotify_mark 338
-#define __NR_prlimit64 339
-#define __NR_name_to_handle_at 340
-#define __NR_open_by_handle_at 341
-#define __NR_clock_adjtime 342
-#define __NR_syncfs 343
-#define __NR_setns 344
-#define __NR_process_vm_readv 345
-#define __NR_process_vm_writev 346
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
-#define NR_syscalls 347
+#define NR_syscalls 348
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif /* __KERNEL__ */
#endif /* _ASM_M68K_UNISTD_H_ */
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += a.out.h
+header-y += auxvec.h
+header-y += byteorder.h
+header-y += cachectl.h
+header-y += fcntl.h
+header-y += ioctls.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += unistd.h
--- /dev/null
+#ifndef _UAPI_M68K_PTRACE_H
+#define _UAPI_M68K_PTRACE_H
+
+#define PT_D1 0
+#define PT_D2 1
+#define PT_D3 2
+#define PT_D4 3
+#define PT_D5 4
+#define PT_D6 5
+#define PT_D7 6
+#define PT_A0 7
+#define PT_A1 8
+#define PT_A2 9
+#define PT_A3 10
+#define PT_A4 11
+#define PT_A5 12
+#define PT_A6 13
+#define PT_D0 14
+#define PT_USP 15
+#define PT_ORIG_D0 16
+#define PT_SR 17
+#define PT_PC 18
+
+#ifndef __ASSEMBLY__
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long d1;
+ long d2;
+ long d3;
+ long d4;
+ long d5;
+ long a0;
+ long a1;
+ long a2;
+ long d0;
+ long orig_d0;
+ long stkadj;
+#ifdef CONFIG_COLDFIRE
+ unsigned format : 4; /* frame format specifier */
+ unsigned vector : 12; /* vector offset */
+ unsigned short sr;
+ unsigned long pc;
+#else
+ unsigned short sr;
+ unsigned long pc;
+ unsigned format : 4; /* frame format specifier */
+ unsigned vector : 12; /* vector offset */
+#endif
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+ unsigned long d6;
+ unsigned long d7;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long retpc;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+
+#define PTRACE_GET_THREAD_AREA 25
+
+#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_M68K_PTRACE_H */
--- /dev/null
+/*
+** asm/setup.h -- Definition of the Linux/m68k setup information
+**
+** Copyright 1992 by Greg Harp
+**
+** This file is subject to the terms and conditions of the GNU General Public
+** License. See the file COPYING in the main directory of this archive
+** for more details.
+**
+** Created 09/29/92 by Greg Harp
+**
+** 5/2/94 Roman Hodek:
+** Added bi_atari part of the machine dependent union bi_un; for now it
+** contains just a model field to distinguish between TT and Falcon.
+** 26/7/96 Roman Zippel:
+** Renamed to setup.h; added some useful macros to allow gcc some
+** optimizations if possible.
+** 5/10/96 Geert Uytterhoeven:
+** Redesign of the boot information structure; moved boot information
+** structure to bootinfo.h
+*/
+
+#ifndef _UAPI_M68K_SETUP_H
+#define _UAPI_M68K_SETUP_H
+
+
+
+ /*
+ * Linux/m68k Architectures
+ */
+
+#define MACH_AMIGA 1
+#define MACH_ATARI 2
+#define MACH_MAC 3
+#define MACH_APOLLO 4
+#define MACH_SUN3 5
+#define MACH_MVME147 6
+#define MACH_MVME16x 7
+#define MACH_BVME6000 8
+#define MACH_HP300 9
+#define MACH_Q40 10
+#define MACH_SUN3X 11
+#define MACH_M54XX 12
+
+#define COMMAND_LINE_SIZE 256
+
+
+
+ /*
+ * CPU, FPU and MMU types
+ *
+ * Note: we may rely on the following equalities:
+ *
+ * CPU_68020 == MMU_68851
+ * CPU_68030 == MMU_68030
+ * CPU_68040 == FPU_68040 == MMU_68040
+ * CPU_68060 == FPU_68060 == MMU_68060
+ */
+
+#define CPUB_68020 0
+#define CPUB_68030 1
+#define CPUB_68040 2
+#define CPUB_68060 3
+#define CPUB_COLDFIRE 4
+
+#define CPU_68020 (1<<CPUB_68020)
+#define CPU_68030 (1<<CPUB_68030)
+#define CPU_68040 (1<<CPUB_68040)
+#define CPU_68060 (1<<CPUB_68060)
+#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
+
+#define FPUB_68881 0
+#define FPUB_68882 1
+#define FPUB_68040 2 /* Internal FPU */
+#define FPUB_68060 3 /* Internal FPU */
+#define FPUB_SUNFPA 4 /* Sun-3 FPA */
+#define FPUB_COLDFIRE 5 /* ColdFire FPU */
+
+#define FPU_68881 (1<<FPUB_68881)
+#define FPU_68882 (1<<FPUB_68882)
+#define FPU_68040 (1<<FPUB_68040)
+#define FPU_68060 (1<<FPUB_68060)
+#define FPU_SUNFPA (1<<FPUB_SUNFPA)
+#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
+
+#define MMUB_68851 0
+#define MMUB_68030 1 /* Internal MMU */
+#define MMUB_68040 2 /* Internal MMU */
+#define MMUB_68060 3 /* Internal MMU */
+#define MMUB_APOLLO 4 /* Custom Apollo */
+#define MMUB_SUN3 5 /* Custom Sun-3 */
+#define MMUB_COLDFIRE 6 /* Internal MMU */
+
+#define MMU_68851 (1<<MMUB_68851)
+#define MMU_68030 (1<<MMUB_68030)
+#define MMU_68040 (1<<MMUB_68040)
+#define MMU_68060 (1<<MMUB_68060)
+#define MMU_SUN3 (1<<MMUB_SUN3)
+#define MMU_APOLLO (1<<MMUB_APOLLO)
+#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
+
+
+#endif /* _UAPI_M68K_SETUP_H */
--- /dev/null
+#ifndef _UAPI_M68K_SIGNAL_H
+#define _UAPI_M68K_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif /* _UAPI_M68K_SIGNAL_H */
--- /dev/null
+#ifndef _UAPI_M68K_TERMIOS_H
+#define _UAPI_M68K_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+
+#endif /* _UAPI_M68K_TERMIOS_H */
--- /dev/null
+#ifndef _UAPI_ASM_M68K_UNISTD_H_
+#define _UAPI_ASM_M68K_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+/*#define __NR_break 17*/
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+/*#define __NR_stty 31*/
+/*#define __NR_gtty 32*/
+#define __NR_access 33
+#define __NR_nice 34
+/*#define __NR_ftime 35*/
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+/*#define __NR_prof 44*/
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+/*#define __NR_lock 53*/
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+/*#define __NR_mpx 56*/
+#define __NR_setpgid 57
+/*#define __NR_ulimit 58*/
+/*#define __NR_oldolduname 59*/
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+/*#define __NR_profil 98*/
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+/*#define __NR_ioperm 101*/
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+/*#define __NR_olduname 109*/
+/*#define __NR_iopl 110*/ /* not supported */
+#define __NR_vhangup 111
+/*#define __NR_idle 112*/ /* Obsolete */
+/*#define __NR_vm86 113*/ /* not supported */
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_cacheflush 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_getpagesize 166
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_lchown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188 /* some people actually want streams */
+#define __NR_putpmsg 189 /* some people actually want streams */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_chown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_lchown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+/* 218*/
+/* 219*/
+#define __NR_getdents64 220
+#define __NR_gettid 221
+#define __NR_tkill 222
+#define __NR_setxattr 223
+#define __NR_lsetxattr 224
+#define __NR_fsetxattr 225
+#define __NR_getxattr 226
+#define __NR_lgetxattr 227
+#define __NR_fgetxattr 228
+#define __NR_listxattr 229
+#define __NR_llistxattr 230
+#define __NR_flistxattr 231
+#define __NR_removexattr 232
+#define __NR_lremovexattr 233
+#define __NR_fremovexattr 234
+#define __NR_futex 235
+#define __NR_sendfile64 236
+#define __NR_mincore 237
+#define __NR_madvise 238
+#define __NR_fcntl64 239
+#define __NR_readahead 240
+#define __NR_io_setup 241
+#define __NR_io_destroy 242
+#define __NR_io_getevents 243
+#define __NR_io_submit 244
+#define __NR_io_cancel 245
+#define __NR_fadvise64 246
+#define __NR_exit_group 247
+#define __NR_lookup_dcookie 248
+#define __NR_epoll_create 249
+#define __NR_epoll_ctl 250
+#define __NR_epoll_wait 251
+#define __NR_remap_file_pages 252
+#define __NR_set_tid_address 253
+#define __NR_timer_create 254
+#define __NR_timer_settime 255
+#define __NR_timer_gettime 256
+#define __NR_timer_getoverrun 257
+#define __NR_timer_delete 258
+#define __NR_clock_settime 259
+#define __NR_clock_gettime 260
+#define __NR_clock_getres 261
+#define __NR_clock_nanosleep 262
+#define __NR_statfs64 263
+#define __NR_fstatfs64 264
+#define __NR_tgkill 265
+#define __NR_utimes 266
+#define __NR_fadvise64_64 267
+#define __NR_mbind 268
+#define __NR_get_mempolicy 269
+#define __NR_set_mempolicy 270
+#define __NR_mq_open 271
+#define __NR_mq_unlink 272
+#define __NR_mq_timedsend 273
+#define __NR_mq_timedreceive 274
+#define __NR_mq_notify 275
+#define __NR_mq_getsetattr 276
+#define __NR_waitid 277
+/*#define __NR_vserver 278*/
+#define __NR_add_key 279
+#define __NR_request_key 280
+#define __NR_keyctl 281
+#define __NR_ioprio_set 282
+#define __NR_ioprio_get 283
+#define __NR_inotify_init 284
+#define __NR_inotify_add_watch 285
+#define __NR_inotify_rm_watch 286
+#define __NR_migrate_pages 287
+#define __NR_openat 288
+#define __NR_mkdirat 289
+#define __NR_mknodat 290
+#define __NR_fchownat 291
+#define __NR_futimesat 292
+#define __NR_fstatat64 293
+#define __NR_unlinkat 294
+#define __NR_renameat 295
+#define __NR_linkat 296
+#define __NR_symlinkat 297
+#define __NR_readlinkat 298
+#define __NR_fchmodat 299
+#define __NR_faccessat 300
+#define __NR_pselect6 301
+#define __NR_ppoll 302
+#define __NR_unshare 303
+#define __NR_set_robust_list 304
+#define __NR_get_robust_list 305
+#define __NR_splice 306
+#define __NR_sync_file_range 307
+#define __NR_tee 308
+#define __NR_vmsplice 309
+#define __NR_move_pages 310
+#define __NR_sched_setaffinity 311
+#define __NR_sched_getaffinity 312
+#define __NR_kexec_load 313
+#define __NR_getcpu 314
+#define __NR_epoll_pwait 315
+#define __NR_utimensat 316
+#define __NR_signalfd 317
+#define __NR_timerfd_create 318
+#define __NR_eventfd 319
+#define __NR_fallocate 320
+#define __NR_timerfd_settime 321
+#define __NR_timerfd_gettime 322
+#define __NR_signalfd4 323
+#define __NR_eventfd2 324
+#define __NR_epoll_create1 325
+#define __NR_dup3 326
+#define __NR_pipe2 327
+#define __NR_inotify_init1 328
+#define __NR_preadv 329
+#define __NR_pwritev 330
+#define __NR_rt_tgsigqueueinfo 331
+#define __NR_perf_event_open 332
+#define __NR_get_thread_area 333
+#define __NR_set_thread_area 334
+#define __NR_atomic_cmpxchg_32 335
+#define __NR_atomic_barrier 336
+#define __NR_fanotify_init 337
+#define __NR_fanotify_mark 338
+#define __NR_prlimit64 339
+#define __NR_name_to_handle_at 340
+#define __NR_open_by_handle_at 341
+#define __NR_clock_adjtime 342
+#define __NR_syncfs 343
+#define __NR_setns 344
+#define __NR_process_vm_readv 345
+#define __NR_process_vm_writev 346
+#define __NR_kcmp 347
+
+#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
.long sys_setns
.long sys_process_vm_readv /* 345 */
.long sys_process_vm_writev
+ .long sys_kcmp
* measurement, and debugging facilities.
*/
+#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/irqflags.h>
#include <asm/bcache.h>
#endif
#include <linux/compiler.h>
-#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr);
+
+
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_set_bit(nr, addr);
}
/*
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a &= ~mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_clear_bit(nr, addr);
}
/*
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a ^= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_change_bit(nr, addr);
}
/*
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a &= ~mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a ^= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();
static inline int is_compat_task(void)
{
- return test_thread_flag(TIF_32BIT);
+ return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/bug.h>
#include <linux/compiler.h>
#include <asm/hazards.h>
-__asm__(
- " .macro arch_local_irq_enable \n"
- " .set push \n"
- " .set reorder \n"
- " .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
- " ori $1, 0x400 \n"
- " xori $1, 0x400 \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
- " ei \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1e \n"
- " mtc0 $1,$12 \n"
-#endif
- " irq_enable_hazard \n"
- " .set pop \n"
- " .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of call overhead on each local_irq_enable()
- */
- smtc_ipi_replay();
-#endif
- __asm__ __volatile__(
- "arch_local_irq_enable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 \n"
- " ori $1, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1f \n"
- " .set noreorder \n"
- " mtc0 $1,$12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
: "memory");
}
-__asm__(
- " .macro arch_local_save_flags flags \n"
- " .set push \n"
- " .set reorder \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\flags, $2, 1 \n"
-#else
- " mfc0 \\flags, $12 \n"
-#endif
- " .set pop \n"
- " .endm \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
- asm volatile("arch_local_save_flags %0" : "=r" (flags));
- return flags;
-}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\result, $2, 1 \n"
- " ori $1, \\result, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
- " andi \\result, \\result, 0x400 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
-#else
- " mfc0 \\result, $12 \n"
- " ori $1, \\result, 0x1f \n"
- " xori $1, 0x1f \n"
- " .set noreorder \n"
- " mtc0 $1, $12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
return flags;
}
+
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- "mfc0 $1, $2, 1 \n"
- "andi \\flags, 0x400 \n"
- "ori $1, 0x400 \n"
- "xori $1, 0x400 \n"
- "or \\flags, $1 \n"
- "mtc0 \\flags, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
- " di \n"
+ " di \n"
" ei \n"
"1: \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
-#else
- " mfc0 $1, $12 \n"
- " andi \\flags, 1 \n"
- " ori $1, 0x1f \n"
- " xori $1, 0x1f \n"
- " or \\flags, $1 \n"
- " mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
-
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of branch and call overhead on each
- * local_irq_restore()
- */
- if (unlikely(!(flags & 0x0400)))
- smtc_ipi_replay();
-#endif
-
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+ " .macro arch_local_irq_enable \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
+ " ori $1, 0x400 \n"
+ " xori $1, 0x400 \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ " ei \n"
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1e \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_enable_hazard \n"
+ " .set pop \n"
+ " .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
+ __asm__ __volatile__(
+ "arch_local_irq_enable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+}
+
+
+__asm__(
+ " .macro arch_local_save_flags flags \n"
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\flags, $2, 1 \n"
+#else
+ " mfc0 \\flags, $12 \n"
+#endif
+ " .set pop \n"
+ " .endm \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("arch_local_save_flags %0" : "=r" (flags));
+ return flags;
+}
+
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
#endif
}
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
-#ifdef CONFIG_MIPS32_O32
-#define TIF_32BIT TIF_32BIT_REGS
-#elif defined(CONFIG_MIPS32_N32)
-#define TIF_32BIT _TIF_32BIT_ADDR
-#endif /* CONFIG_MIPS32_O32 */
-
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
void __init add_memory_region(phys_t start, phys_t size, long type)
{
int x = boot_mem_map.nr_map;
- struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+ int i;
/* Sanity check */
if (start + size < start) {
}
/*
- * Try to merge with previous entry if any. This is far less than
- * perfect but is sufficient for most real world cases.
+ * Try to merge with existing entry, if any.
*/
- if (x && prev->addr + prev->size == start && prev->type == type) {
- prev->size += size;
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ struct boot_mem_map_entry *entry = boot_mem_map.map + i;
+ unsigned long top;
+
+ if (entry->type != type)
+ continue;
+
+ if (start + size < entry->addr)
+ continue; /* no overlap */
+
+ if (entry->addr + entry->size < start)
+ continue; /* no overlap */
+
+ top = max(entry->addr + entry->size, start + size);
+ entry->addr = min(entry->addr, start);
+ entry->size = top - entry->addr;
+
return;
}
- if (x == BOOT_MEM_MAP_MAX) {
+ if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
pr_err("Ooops! Too many entries in the memory map!\n");
return;
}
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o delay.o memcpy.o memset.o \
- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+ mips-atomic.o strlen_user.o strncpy_user.o \
+ strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory. This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value. This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value. This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+ " .macro arch_local_irq_disable\n"
+ " .set push \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 \n"
+ " ori $1, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace void arch_local_irq_disable(void)
+{
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_disable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+ " .macro arch_local_irq_save result \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\result, $2, 1 \n"
+ " ori $1, \\result, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+ " andi \\result, \\result, 0x400 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 \\result, $12 \n"
+ " ori $1, \\result, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ preempt_disable();
+ asm volatile("arch_local_irq_save\t%0"
+ : "=r" (flags)
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+ " .macro arch_local_irq_restore flags \n"
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ "mfc0 $1, $2, 1 \n"
+ "andi \\flags, 0x400 \n"
+ "ori $1, 0x400 \n"
+ "xori $1, 0x400 \n"
+ "or \\flags, $1 \n"
+ "mtc0 \\flags, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+ /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1, $12 \n"
+ " andi \\flags, 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " or \\flags, $1 \n"
+ " mtc0 \\flags, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of branch and call overhead on each
+ * local_irq_restore()
+ */
+ if (unlikely(!(flags & 0x0400)))
+ smtc_ipi_replay();
+#endif
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+notrace void __arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
+#include <asm/mips-boards/maltaint.h>
#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
SMC_PORT(0x2F8, 3),
{
.mapbase = 0x1f000900, /* The CBUS UART */
- .irq = MIPS_CPU_IRQ_BASE + 2,
+ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
.iotype = UPIO_MEM32,
.flags = CBUS_UART_FLAGS,
{
compat_sigset_t s;
- if (sz != sizeof *set) panic("put_sigset32()");
+ if (sz != sizeof *set)
+ return -EINVAL;
sigset_64to32(&s, set);
return copy_to_user(up, &s, sizeof s);
compat_sigset_t s;
int r;
- if (sz != sizeof *set) panic("put_sigset32()");
+ if (sz != sizeof *set)
+ return -EINVAL;
if ((r = copy_from_user(&s, up, sz)) == 0) {
sigset_32to64(set, &s);
struct vm_area_struct *vma;
int offset = mapping ? get_offset(mapping) : 0;
+ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
addr = DCACHE_ALIGN(addr - offset) + offset;
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
interrupts = <2 7 0>;
};
+ sclpc@3c00 {
+ compatible = "fsl,mpc5200-lpbfifo";
+ reg = <0x3c00 0x60>;
+ interrupts = <2 23 0>;
+ };
+
i2c@3d00 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
};
};
-
- sclpc@3c00 {
- compatible = "fsl,mpc5200-lpbfifo";
- reg = <0x3c00 0x60>;
- interrupts = <3 23 0>;
- };
};
localbus {
#gpio-cells = <2>;
};
- psc@2000 { /* PSC1 in ac97 mode */
+ audioplatform: psc@2000 { /* PSC1 in ac97 mode */
compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
cell-index = <0>;
};
localbus {
status = "disabled";
};
+
+ sound {
+ compatible = "phytec,pcm030-audio-fabric";
+ asoc-platform = <&audioplatform>;
+ };
};
case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
- default:
- pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
- __func__, virq, l1irq, l2irq);
- return -EINVAL;
+ case MPC52xx_IRQ_L1_CRIT:
+ pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
+ __func__, l2irq);
+ irq_set_chip(virq, &no_irq_chip);
+ return 0;
}
irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
if (list_empty(&pe->edevs)) {
cnt = 0;
list_for_each_entry(child, &pe->child_list, child) {
- if (!(pe->type & EEH_PE_INVALID)) {
+ if (!(child->type & EEH_PE_INVALID)) {
cnt++;
break;
}
/* Get the top level device in the PE */
edev = of_node_to_eeh_dev(dn);
- edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
+ if (edev->pe)
+ edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
dn = eeh_dev_to_of_node(edev);
if (!dn)
return NULL;
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_CMPXCHG_LOCAL
select HAVE_CMPXCHG_DOUBLE
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_VIRT_CPU_ACCOUNTING
select VIRT_CPU_ACCOUNTING
select ARCH_DISCARD_MEMBLOCK
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select HAVE_UID16 if 32BIT
select ARCH_WANT_IPC_PARSE_VERSION
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL_OLD
select GENERIC_CLOCKEVENTS
OUTPUT_ARCH(s390:64-bit)
#else
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390)
+OUTPUT_ARCH(s390:31-bit)
#endif
ENTRY(startup)
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
#include <asm/scsw.h>
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
-#define PSW32_MASK_USER 0x00003F00UL
+#define PSW32_MASK_USER 0x0000FF00UL
#define PSW32_ADDR_AMODE 0x80000000UL
#define PSW32_ADDR_INSN 0x7FFFFFFFUL
#include <asm/cpu_mf.h>
/* CPU-measurement counter facility */
-#define PERF_CPUM_CF_MAX_CTR 160
+#define PERF_CPUM_CF_MAX_CTR 256
/* Per-CPU flags for PMU states */
#define PMU_F_RESERVED 0x1000
static inline int pmd_present(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
+ unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
+ return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
+ !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
}
static inline int pmd_none(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
+ !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
}
static inline int pmd_large(pmd_t pmd)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
+#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
+#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
- unsigned long pgprot_pmd = 0;
-
- if (pgprot_val(pgprot) & _PAGE_INVALID) {
- if (pgprot_val(pgprot) & _PAGE_SWT)
- pgprot_pmd |= _HPAGE_TYPE_NONE;
- pgprot_pmd |= _SEGMENT_ENTRY_INV;
- }
- if (pgprot_val(pgprot) & _PAGE_RO)
- pgprot_pmd |= _SEGMENT_ENTRY_RO;
- return pgprot_pmd;
+ /*
+ * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+ * Convert to segment table entry format.
+ */
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+ return pgprot_val(SEGMENT_NONE);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
+ return pgprot_val(SEGMENT_RO);
+ return pgprot_val(SEGMENT_RW);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+ /* Do not clobber _HPAGE_TYPE_NONE pages! */
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
return pmd;
}
#ifdef CONFIG_SCHED_BOOK
+extern unsigned char cpu_socket_id[NR_CPUS];
+#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += kvm_para.h
-
header-y += auxvec.h
header-y += bitsperlong.h
header-y += byteorder.h
/*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#define __MAX_CHPID 255
struct chp_id {
- u8 reserved1;
- u8 cssid;
- u8 reserved2;
- u8 id;
+ __u8 reserved1;
+ __u8 cssid;
+ __u8 reserved2;
+ __u8 id;
} __attribute__((packed));
--- /dev/null
+/*
+ * User API definitions for paravirtual devices on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
#define PSW_MASK_EA 0x00000000UL
#define PSW_MASK_BA 0x00000000UL
-#define PSW_MASK_USER 0x00003F00UL
+#define PSW_MASK_USER 0x0000FF00UL
#define PSW_ADDR_AMODE 0x80000000UL
#define PSW_ADDR_INSN 0x7FFFFFFFUL
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
-#define PSW_MASK_USER 0x00003F8180000000UL
+#define PSW_MASK_USER 0x0000FF8180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
enum {
CACHE_TI_UNIFIED = 0,
- CACHE_TI_INSTRUCTION = 0,
- CACHE_TI_DATA,
+ CACHE_TI_DATA = 0,
+ CACHE_TI_INSTRUCTION,
};
struct cache_info {
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return -ENOMEM;
- ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
+ if (type == CACHE_TYPE_INSTRUCTION)
+ ti = CACHE_TI_INSTRUCTION;
+ else
+ ti = CACHE_TI_UNIFIED;
cache->size = ecag(EXTRACT_SIZE, level, ti);
cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+ /* Check for invalid user address space control. */
+ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = (__u64) regs32.gprs[i];
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
+ /* Force 31 bit amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
+ /* Force 31 bit amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
.align 2
startup_kdump_relocated:
basr %r13,0
-0:
- mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
- sam31 # Switch to 31 bit addr mode
- sr %r1,%r1 # Erase register r1
- sr %r2,%r2 # Erase register r2
- sigp %r1,%r2,SIGP_SET_ARCHITECTURE # Switch to 31 bit arch mode
- lpsw 0 # Start new kernel...
+0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
.align 8
.Lrestart_psw:
- .long 0x00080000,0x80000000 + startup
+ .quad 0x0000000080000000,0x0000000000000000 + startup
#else
.align 2
.Lep_startup_kdump:
set = CPUMF_CTR_SET_USER;
else if (event < 128)
set = CPUMF_CTR_SET_CRYPTO;
- else if (event < 160)
+ else if (event < 256)
set = CPUMF_CTR_SET_EXT;
return set;
case CPUMF_CTR_SET_EXT:
if (cpuhw->info.csvn < 1)
err = -EOPNOTSUPP;
+ if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
+ (cpuhw->info.csvn == 2 && hwc->config > 175) ||
+ (cpuhw->info.csvn > 2 && hwc->config > 255))
+ err = -EOPNOTSUPP;
break;
}
#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
+#ifdef CONFIG_64BIT
+ epsw %r6,%r7 # set current addressing mode
+ nill %r6,0x1 # in new psw (31 or 64 bit mode)
+ nilh %r7,0x8000
+ stm %r6,%r7,0(%r8)
+#endif
lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2
jz .LsetctS1
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
#ifdef CONFIG_64BIT
.LextpswS1_64:
- .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
+ .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
#endif
.LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
/* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(user_sregs.regs.psw.mask & PSW_MASK_USER);
+ /* Check for invalid user address space control. */
+ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
regs->psw.mask |= PSW_MASK_BA;
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
+ /* Force default amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
+ /* Force default amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
+unsigned char cpu_socket_id[NR_CPUS];
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask);
+ cpu_core_id[lcpu] = rcpu;
if (one_core_per_cpu) {
- cpu_core_id[lcpu] = rcpu;
+ cpu_socket_id[lcpu] = rcpu;
core = core->next;
} else {
- cpu_core_id[lcpu] = core->id;
+ cpu_socket_id[lcpu] = core->id;
}
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390)
+OUTPUT_ARCH(s390:31-bit)
ENTRY(startup)
jiffies = jiffies_64 + 4;
#else
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return -0x10UL;
- if (pmd_huge(*pmd)) {
+ if (pmd_large(*pmd)) {
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
return -0x04UL;
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (unlikely(pmd_huge(pmd))) {
+ if (unlikely(pmd_large(pmd))) {
if (!gup_huge_pmd(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
- (void __user *)start, len)))
+ if ((end < start) || (end > TASK_SIZE))
return 0;
local_irq_save(flags);
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (end < start)
+ if ((end < start) || (end > TASK_SIZE))
goto slow_irqon;
/*
select HAVE_ARCH_TRACEHOOK
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS
select RTC_DRV_M48T59
select HAVE_IRQ_WORK
obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
-sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o
-sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o
-sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o
-md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o
+sha1-sparc64-y := sha1_asm.o sha1_glue.o
+sha256-sparc64-y := sha256_asm.o sha256_glue.o
+sha512-sparc64-y := sha512_asm.o sha512_glue.o
+md5-sparc64-y := md5_asm.o md5_glue.o
-aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o
-des-sparc64-y := des_asm.o des_glue.o crop_devid.o
-camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o
+aes-sparc64-y := aes_asm.o aes_glue.o
+des-sparc64-y := des_asm.o des_glue.o
+camellia-sparc64-y := camellia_asm.o camellia_glue.o
-crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o
+crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o
MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
MODULE_ALIAS("aes");
+
+#include "crop_devid.c"
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
MODULE_ALIAS("aes");
+
+#include "crop_devid.c"
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
MODULE_ALIAS("crc32c");
+
+#include "crop_devid.c"
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
MODULE_ALIAS("des");
+
+#include "crop_devid.c"
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
MODULE_ALIAS("md5");
+
+#include "crop_devid.c"
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
MODULE_ALIAS("sha1");
+
+#include "crop_devid.c"
MODULE_ALIAS("sha224");
MODULE_ALIAS("sha256");
+
+#include "crop_devid.c"
MODULE_ALIAS("sha384");
MODULE_ALIAS("sha512");
+
+#include "crop_devid.c"
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
- * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+extern long atomic64_dec_if_positive(atomic64_t *v);
+
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#ifndef _SPARC64_BACKOFF_H
#define _SPARC64_BACKOFF_H
+/* The macros in this file implement an exponential backoff facility
+ * for atomic operations.
+ *
+ * When multiple threads compete on an atomic operation, it is
+ * possible for one thread to be continually denied a successful
+ * completion of the compare-and-swap instruction. Heavily
+ * threaded cpu implementations like Niagara can compound this
+ * problem even further.
+ *
+ * When an atomic operation fails and needs to be retried, we spin a
+ * certain number of times. At each subsequent failure of the same
+ * operation we double the spin count, realizing an exponential
+ * backoff.
+ *
+ * When we spin, we try to use an operation that will cause the
+ * current cpu strand to block, and therefore make the core fully
+ * available to any other other runnable strands. There are two
+ * options, based upon cpu capabilities.
+ *
+ * On all cpus prior to SPARC-T4 we do three dummy reads of the
+ * condition code register. Each read blocks the strand for something
+ * between 40 and 50 cpu cycles.
+ *
+ * For SPARC-T4 and later we have a special "pause" instruction
+ * available. This is implemented using writes to register %asr27.
+ * The cpu will block the number of cycles written into the register,
+ * unless a disrupting trap happens first. SPARC-T4 specifically
+ * implements pause with a granularity of 8 cycles. Each strand has
+ * an internal pause counter which decrements every 8 cycles. So the
+ * chip shifts the %asr27 value down by 3 bits, and writes the result
+ * into the pause counter. If a value smaller than 8 is written, the
+ * chip blocks for 1 cycle.
+ *
+ * To achieve the same amount of backoff as the three %ccr reads give
+ * on earlier chips, we shift the backoff value up by 7 bits. (Three
+ * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
+ * whole amount we want to block into the pause register, rather than
+ * loop writing 128 each time.
+ */
+
#define BACKOFF_LIMIT (4 * 1024)
#ifdef CONFIG_SMP
#define BACKOFF_LABEL(spin_label, continue_label) \
spin_label
-#define BACKOFF_SPIN(reg, tmp, label) \
- mov reg, tmp; \
-88: brnz,pt tmp, 88b; \
- sub tmp, 1, tmp; \
- set BACKOFF_LIMIT, tmp; \
- cmp reg, tmp; \
- bg,pn %xcc, label; \
- nop; \
- ba,pt %xcc, label; \
- sllx reg, 1, reg;
+#define BACKOFF_SPIN(reg, tmp, label) \
+ mov reg, tmp; \
+88: rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ .section .pause_3insn_patch,"ax";\
+ .word 88b; \
+ sllx tmp, 7, tmp; \
+ wr tmp, 0, %asr27; \
+ clr tmp; \
+ .previous; \
+ brnz,pt tmp, 88b; \
+ sub tmp, 1, tmp; \
+ set BACKOFF_LIMIT, tmp; \
+ cmp reg, tmp; \
+ bg,pn %xcc, label; \
+ nop; \
+ ba,pt %xcc, label; \
+ sllx reg, 1, reg;
#else
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
- if (!(test_thread_flag(TIF_32BIT)))
+ if (test_thread_64bit_stack(usp))
usp += STACK_BIAS;
- else
+
+ if (test_thread_flag(TIF_32BIT))
usp &= 0xffffffffUL;
usp -= len;
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
-#define cpu_relax() barrier()
+/* Please see the commentary in asm/backoff.h for a description of
+ * what these instructions are doing and how they have been choosen.
+ * To make a long story short, we are trying to yield the current cpu
+ * strand during busy loops.
+ */
+#define cpu_relax() asm volatile("\n99:\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ ".section .pause_3insn_patch,\"ax\"\n\t"\
+ ".word 99b\n\t" \
+ "wr %%g0, 128, %%asr27\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".previous" \
+ ::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
extern void irq_trans_init(struct device_node *dp);
extern char *build_path_component(struct device_node *dp);
+/* SPARC has local implementations */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+#define of_address_to_resource of_address_to_resource
+
+void __iomem *of_iomap(struct device_node *node, int index);
+#define of_iomap of_iomap
+
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
+#define test_thread_64bit_stack(__SP) \
+ ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
+ false : true)
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
/* Normal 32bit spill */
#define SPILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + %g0] ASI; \
mov 0x04, %g3; \
stwa %l1, [%sp + %g3] ASI; \
stwa %i6, [%g1 + %g0] ASI; \
stwa %i7, [%g1 + %g3] ASI; \
saved; \
- retry; nop; nop; \
+ retry; \
b,a,pt %xcc, spill_fixup_dax; \
b,a,pt %xcc, spill_fixup_mna; \
b,a,pt %xcc, spill_fixup;
#define SPILL_2_GENERIC_ETRAP \
etrap_user_spill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, etrap_user_spill_64bit; \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + 0x00] %asi; \
stwa %l1, [%sp + 0x04] %asi; \
stwa %l2, [%sp + 0x08] %asi; \
ba,pt %xcc, etrap_save; \
wrpr %g1, %cwp; \
nop; nop; nop; nop; \
- nop; nop; nop; nop; \
+ nop; nop; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit;
/* Normal 32bit fill */
#define FILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
lduwa [%sp + %g0] ASI, %l0; \
mov 0x04, %g2; \
mov 0x08, %g3; \
lduwa [%g1 + %g3] ASI, %i6; \
lduwa [%g1 + %g5] ASI, %i7; \
restored; \
- retry; nop; nop; nop; nop; \
+ retry; nop; nop; \
b,a,pt %xcc, fill_fixup_dax; \
b,a,pt %xcc, fill_fixup_mna; \
b,a,pt %xcc, fill_fixup;
#define FILL_2_GENERIC_RTRAP \
user_rtt_fill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, user_rtt_fill_64bit; \
+ srl %sp, 0, %sp; \
lduwa [%sp + 0x00] %asi, %l0; \
lduwa [%sp + 0x04] %asi, %l1; \
lduwa [%sp + 0x08] %asi, %l2; \
ba,pt %xcc, user_rtt_pre_restore; \
restored; \
nop; nop; nop; nop; nop; \
- nop; nop; nop; nop; nop; \
+ nop; nop; nop; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup;
#define __NR_setns 337
#define __NR_process_vm_readv 338
#define __NR_process_vm_writev 339
+#define __NR_kern_features 340
+#define __NR_kcmp 341
-#define NR_syscalls 340
+#define NR_syscalls 342
+
+/* Bitmask values returned from kern_features system call. */
+#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end;
+struct pause_patch_entry {
+ unsigned int addr;
+ unsigned int insns[3];
+};
+extern struct pause_patch_entry __pause_3insn_patch,
+ __pause_3insn_patch_end;
+
extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
struct sun4v_1insn_patch_entry *);
static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int eirq;
+ struct irq_bucket *p;
int cpu = sparc_leon3_cpuid();
eirq = leon_eirq_get(cpu);
- if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
- generic_handle_irq(irq_map[eirq]->irq);
+ p = irq_map[eirq];
+ if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
+ generic_handle_irq(p->irq);
}
/* The extended IRQ controller has been found, this function registers it */
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
- struct sparc_stackf32 *usf, sf;
unsigned long pc;
- usf = (struct sparc_stackf32 *) ufp;
- if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
- break;
+ if (thread32_stack_is_64bit(ufp)) {
+ struct sparc_stackf *usf, sf;
- pc = sf.callers_pc;
- ufp = (unsigned long)sf.fp;
+ ufp += STACK_BIAS;
+ usf = (struct sparc_stackf *) ufp;
+ if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+ break;
+ pc = sf.callers_pc & 0xffffffff;
+ ufp = ((unsigned long) sf.fp) & 0xffffffff;
+ } else {
+ struct sparc_stackf32 *usf, sf;
+ usf = (struct sparc_stackf32 *) ufp;
+ if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+ break;
+ pc = sf.callers_pc;
+ ufp = (unsigned long)sf.fp;
+ }
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
/* It's a bit more tricky when 64-bit tasks are involved... */
static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
{
+ bool stack_64bit = test_thread_64bit_stack(psp);
unsigned long fp, distance, rval;
- if (!(test_thread_flag(TIF_32BIT))) {
+ if (stack_64bit) {
csp += STACK_BIAS;
psp += STACK_BIAS;
__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
fp += STACK_BIAS;
+ if (test_thread_flag(TIF_32BIT))
+ fp &= 0xffffffff;
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
rval = (csp - distance);
if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
rval = 0;
- else if (test_thread_flag(TIF_32BIT)) {
+ else if (!stack_64bit) {
if (put_user(((u32)csp),
&(((struct reg_window32 __user *)rval)->ins[6])))
rval = 0;
flush_user_windows();
if ((window = get_thread_wsaved()) != 0) {
- int winsize = sizeof(struct reg_window);
- int bias = 0;
-
- if (test_thread_flag(TIF_32BIT))
- winsize = sizeof(struct reg_window32);
- else
- bias = STACK_BIAS;
-
window -= 1;
do {
- unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
+ int winsize = sizeof(struct reg_window);
+ unsigned long sp;
+
+ sp = t->rwbuf_stkptrs[window];
+
+ if (test_thread_64bit_stack(sp))
+ sp += STACK_BIAS;
+ else
+ winsize = sizeof(struct reg_window32);
if (!copy_to_user((char __user *)sp, rwin, winsize)) {
shift_window_buffer(window, get_thread_wsaved() - 1, t);
{
struct thread_info *t = current_thread_info();
unsigned long window;
- int winsize = sizeof(struct reg_window);
- int bias = 0;
-
- if (test_thread_flag(TIF_32BIT))
- winsize = sizeof(struct reg_window32);
- else
- bias = STACK_BIAS;
flush_user_windows();
window = get_thread_wsaved();
if (likely(window != 0)) {
window -= 1;
do {
- unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
+ int winsize = sizeof(struct reg_window);
+ unsigned long sp;
+
+ sp = t->rwbuf_stkptrs[window];
+
+ if (test_thread_64bit_stack(sp))
+ sp += STACK_BIAS;
+ else
+ winsize = sizeof(struct reg_window32);
if (unlikely(sp & 0x7UL))
stack_unaligned(sp);
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
- if (test_tsk_thread_flag(current, TIF_32BIT)) {
+ if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
- if (test_tsk_thread_flag(current, TIF_32BIT)) {
+ if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
}
}
+static void __init pause_patch(void)
+{
+ struct pause_patch_entry *p;
+
+ p = &__pause_3insn_patch;
+ while (p < &__pause_3insn_patch_end) {
+ unsigned long i, addr = p->addr;
+
+ for (i = 0; i < 3; i++) {
+ *(unsigned int *) (addr + (i * 4)) = p->insns[i];
+ wmb();
+ __asm__ __volatile__("flush %0"
+ : : "r" (addr + (i * 4)));
+ }
+
+ p++;
+ }
+}
+
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
+ if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
+ pause_patch();
}
void __init setup_arch(char **cmdline_p)
err |= restore_fpu_state(regs, fpu_save);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
- if (err)
+ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
goto segv;
err |= __get_user(rwin_save, &sf->rwin_save);
: "cc");
return __res;
}
+
+asmlinkage long sys_kern_features(void)
+{
+ return KERN_FEATURE_MIXED_MODE_STACK;
+}
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+/*340*/ .long sys_ni_syscall, sys_kcmp
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
+/*340*/ .word sys_kern_features, sys_kcmp
#endif /* CONFIG_COMPAT */
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+/*340*/ .word sys_kern_features, sys_kcmp
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
- unsigned long value;
+ unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
+
+ fp = regs->u_regs[UREG_FP];
+
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
+ unsigned long fp;
+
if (reg < 16)
return ®s->u_regs[reg];
+
+ fp = regs->u_regs[UREG_FP];
+
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 *win32;
- win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
return (unsigned long *)&win32->locals[reg - 16];
} else {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
if (rd)
regs->u_regs[rd] = ret;
} else {
- if (test_thread_flag(TIF_32BIT)) {
+ unsigned long fp = regs->u_regs[UREG_FP];
+
+ if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
- unsigned long value;
+ unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
+
+ fp = regs->u_regs[UREG_FP];
+
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
struct pt_regs *regs)
{
+ unsigned long fp = regs->u_regs[UREG_FP];
+
BUG_ON(reg < 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
- if (test_thread_flag(TIF_32BIT)) {
+ if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
return (unsigned long __user *)&win32->locals[reg - 16];
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
} else {
unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
- if (test_thread_flag(TIF_32BIT))
+ if (!test_thread_64bit_stack(regs->u_regs[UREG_FP]))
__put_user((u32)val, (u32 __user *)rd_user);
else
__put_user(val, rd_user);
*(.popc_6insn_patch)
__popc_6insn_patch_end = .;
}
+ .pause_3insn_patch : {
+ __pause_3insn_patch = .;
+ *(.pause_3insn_patch)
+ __pause_3insn_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
spill_fixup_dax:
TRAP_LOAD_THREAD_REG(%g6, %g1)
ldx [%g6 + TI_FLAGS], %g1
+ andcc %sp, 0x1, %g0
+ movne %icc, 0, %g1
andcc %g1, _TIF_32BIT, %g0
ldub [%g6 + TI_WSAVED], %g1
sll %g1, 3, %g3
/* atomic.S: These things are too big to do inline.
*
- * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
*/
#include <linux/linkage.h>
sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_sub_ret)
+
+ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: ldx [%o0], %g1
+ brlez,pn %g1, 3f
+ sub %g1, 1, %g7
+ casx [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+ nop
+3: retl
+ sub %g1, 1, %o0
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic64_dec_if_positive)
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
+EXPORT_SYMBOL(atomic64_dec_if_positive);
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
XR = 0;
else if (freg < 16)
XR = regs->u_regs[freg];
- else if (test_thread_flag(TIF_32BIT)) {
+ else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
struct reg_window32 __user *win32;
flushw_user ();
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
endif
endif
+# The tile compiler may emit .eh_frame information for backtracing.
+# In kernel modules, this causes load failures due to unsupported relocations.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
endif
#include <asm/homecache.h>
#include <arch/opcode.h>
-#ifdef __tilegx__
-# define Elf_Rela Elf64_Rela
-# define ELF_R_SYM ELF64_R_SYM
-# define ELF_R_TYPE ELF64_R_TYPE
-#else
-# define Elf_Rela Elf32_Rela
-# define ELF_R_SYM ELF32_R_SYM
-# define ELF_R_TYPE ELF32_R_TYPE
-#endif
-
#ifdef MODULE_DEBUG
#define DEBUGP printk
#else
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
config ARCH_MAY_HAVE_PC_FDC
bool
+config ZONE_DMA
+ def_bool y
+
config NEED_DMA_MAP_STATE
def_bool y
bool
depends on !ARCH_FPGA
select GENERIC_GPIO
- select GPIO_SYSFS if EXPERIMENTAL
+ select GPIO_SYSFS
default y
if PUV3_NB0916
-include include/asm-generic/Kbuild.asm
generic-y += atomic.h
generic-y += auxvec.h
extern void uc32_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap);
-extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
-
-extern void __show_regs(struct pt_regs *);
-
#endif /* __UNICORE_BUG_H__ */
: "memory", "cc");
break;
default:
- ret = __xchg_bad_pointer();
+ __xchg_bad_pointer();
}
return ret;
+++ /dev/null
-#include <asm-generic/kvm_para.h>
#define cpu_relax() barrier()
-/*
- * Create a new kernel thread
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
#ifndef __UNICORE_PTRACE_H__
#define __UNICORE_PTRACE_H__
-#define PTRACE_GET_THREAD_AREA 22
-
-/*
- * PSR bits
- */
-#define USER_MODE 0x00000010
-#define REAL_MODE 0x00000011
-#define INTR_MODE 0x00000012
-#define PRIV_MODE 0x00000013
-#define ABRT_MODE 0x00000017
-#define EXTN_MODE 0x0000001b
-#define SUSR_MODE 0x0000001f
-#define MODE_MASK 0x0000001f
-#define PSR_R_BIT 0x00000040
-#define PSR_I_BIT 0x00000080
-#define PSR_V_BIT 0x10000000
-#define PSR_C_BIT 0x20000000
-#define PSR_Z_BIT 0x40000000
-#define PSR_S_BIT 0x80000000
-
-/*
- * Groups of PSR bits
- */
-#define PSR_f 0xff000000 /* Flags */
-#define PSR_c 0x000000ff /* Control */
+#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-/*
- * This struct defines the way the registers are stored on the
- * stack during a system call. Note that sizeof(struct pt_regs)
- * has to be a multiple of 8.
- */
-struct pt_regs {
- unsigned long uregs[34];
-};
-
-#define UCreg_asr uregs[32]
-#define UCreg_pc uregs[31]
-#define UCreg_lr uregs[30]
-#define UCreg_sp uregs[29]
-#define UCreg_ip uregs[28]
-#define UCreg_fp uregs[27]
-#define UCreg_26 uregs[26]
-#define UCreg_25 uregs[25]
-#define UCreg_24 uregs[24]
-#define UCreg_23 uregs[23]
-#define UCreg_22 uregs[22]
-#define UCreg_21 uregs[21]
-#define UCreg_20 uregs[20]
-#define UCreg_19 uregs[19]
-#define UCreg_18 uregs[18]
-#define UCreg_17 uregs[17]
-#define UCreg_16 uregs[16]
-#define UCreg_15 uregs[15]
-#define UCreg_14 uregs[14]
-#define UCreg_13 uregs[13]
-#define UCreg_12 uregs[12]
-#define UCreg_11 uregs[11]
-#define UCreg_10 uregs[10]
-#define UCreg_09 uregs[9]
-#define UCreg_08 uregs[8]
-#define UCreg_07 uregs[7]
-#define UCreg_06 uregs[6]
-#define UCreg_05 uregs[5]
-#define UCreg_04 uregs[4]
-#define UCreg_03 uregs[3]
-#define UCreg_02 uregs[2]
-#define UCreg_01 uregs[1]
-#define UCreg_00 uregs[0]
-#define UCreg_ORIG_00 uregs[33]
-
-#ifdef __KERNEL__
-
#define user_mode(regs) \
(processor_mode(regs) == USER_MODE)
#define instruction_pointer(regs) ((regs)->UCreg_pc)
-#endif /* __KERNEL__ */
-
#endif /* __ASSEMBLY__ */
-
#endif
-
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += byteorder.h
+header-y += kvm_para.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += unistd.h
+
+generic-y += kvm_para.h
--- /dev/null
+/*
+ * linux/arch/unicore32/include/asm/ptrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _UAPI__UNICORE_PTRACE_H__
+#define _UAPI__UNICORE_PTRACE_H__
+
+#define PTRACE_GET_THREAD_AREA 22
+
+/*
+ * PSR bits
+ */
+#define USER_MODE 0x00000010
+#define REAL_MODE 0x00000011
+#define INTR_MODE 0x00000012
+#define PRIV_MODE 0x00000013
+#define ABRT_MODE 0x00000017
+#define EXTN_MODE 0x0000001b
+#define SUSR_MODE 0x0000001f
+#define MODE_MASK 0x0000001f
+#define PSR_R_BIT 0x00000040
+#define PSR_I_BIT 0x00000080
+#define PSR_V_BIT 0x10000000
+#define PSR_C_BIT 0x20000000
+#define PSR_Z_BIT 0x40000000
+#define PSR_S_BIT 0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f 0xff000000 /* Flags */
+#define PSR_c 0x000000ff /* Control */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call. Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+ unsigned long uregs[34];
+};
+
+#define UCreg_asr uregs[32]
+#define UCreg_pc uregs[31]
+#define UCreg_lr uregs[30]
+#define UCreg_sp uregs[29]
+#define UCreg_ip uregs[28]
+#define UCreg_fp uregs[27]
+#define UCreg_26 uregs[26]
+#define UCreg_25 uregs[25]
+#define UCreg_24 uregs[24]
+#define UCreg_23 uregs[23]
+#define UCreg_22 uregs[22]
+#define UCreg_21 uregs[21]
+#define UCreg_20 uregs[20]
+#define UCreg_19 uregs[19]
+#define UCreg_18 uregs[18]
+#define UCreg_17 uregs[17]
+#define UCreg_16 uregs[16]
+#define UCreg_15 uregs[15]
+#define UCreg_14 uregs[14]
+#define UCreg_13 uregs[13]
+#define UCreg_12 uregs[12]
+#define UCreg_11 uregs[11]
+#define UCreg_10 uregs[10]
+#define UCreg_09 uregs[9]
+#define UCreg_08 uregs[8]
+#define UCreg_07 uregs[7]
+#define UCreg_06 uregs[6]
+#define UCreg_05 uregs[5]
+#define UCreg_04 uregs[4]
+#define UCreg_03 uregs[3]
+#define UCreg_02 uregs[2]
+#define UCreg_01 uregs[1]
+#define UCreg_00 uregs[0]
+#define UCreg_ORIG_00 uregs[33]
+
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI__UNICORE_PTRACE_H__ */
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
+#define __ARCH_WANT_SYS_EXECVE
*/
ENTRY(ret_from_fork)
b.l schedule_tail
- get_thread_info tsk
- ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
- mov why, #1
- cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
- beq ret_slow_syscall
- mov r1, sp
- mov r0, #1 @ trace exit [IP = 1]
- b.l syscall_trace
b ret_slow_syscall
ENDPROC(ret_from_fork)
+ENTRY(ret_from_kernel_thread)
+ b.l schedule_tail
+ mov r0, r5
+ adr lr, ret_slow_syscall
+ mov pc, r4
+ENDPROC(ret_from_kernel_thread)
+
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
#endif
.ltorg
-ENTRY(sys_execve)
- add r3, sp, #S_OFF
- b __sys_execve
-ENDPROC(sys_execve)
-
ENTRY(sys_clone)
add ip, sp, #S_OFF
stw ip, [sp+], #4
}
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
int
copy_thread(unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
- *childregs = *regs;
- childregs->UCreg_00 = 0;
- childregs->UCreg_sp = stack_start;
-
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
thread->cpu_context.sp = (unsigned long)childregs;
- thread->cpu_context.pc = (unsigned long)ret_from_fork;
-
- if (clone_flags & CLONE_SETTLS)
- childregs->UCreg_16 = regs->UCreg_03;
+ if (unlikely(!regs)) {
+ thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
+ thread->cpu_context.r4 = stack_start;
+ thread->cpu_context.r5 = stk_sz;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ } else {
+ thread->cpu_context.pc = (unsigned long)ret_from_fork;
+ *childregs = *regs;
+ childregs->UCreg_00 = 0;
+ childregs->UCreg_sp = stack_start;
+ if (clone_flags & CLONE_SETTLS)
+ childregs->UCreg_16 = regs->UCreg_03;
+ }
return 0;
}
}
EXPORT_SYMBOL(dump_fpu);
-/*
- * Shuffle the argument into the correct register before calling the
- * thread function. r1 is the thread argument, r2 is the pointer to
- * the thread function, and r3 points to the exit function.
- */
-asm(".pushsection .text\n"
-" .align\n"
-" .type kernel_thread_helper, #function\n"
-"kernel_thread_helper:\n"
-" mov.a asr, r7\n"
-" mov r0, r4\n"
-" mov lr, r6\n"
-" mov pc, r5\n"
-" .size kernel_thread_helper, . - kernel_thread_helper\n"
-" .popsection");
-
-/*
- * Create a kernel thread.
- */
-pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(®s, 0, sizeof(regs));
-
- regs.UCreg_04 = (unsigned long)arg;
- regs.UCreg_05 = (unsigned long)fn;
- regs.UCreg_06 = (unsigned long)do_exit;
- regs.UCreg_07 = PRIV_MODE;
- regs.UCreg_pc = (unsigned long)kernel_thread_helper;
- regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
-
- return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
extern void kernel_thread_helper(void);
extern void __init early_signal_init(void);
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+extern void __show_regs(struct pt_regs *);
+
#endif
parent_tid, child_tid);
}
-/* sys_execve() executes a new program.
- * This is called indirectly via a small wrapper
- */
-asmlinkage long __sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- struct pt_regs *regs)
-{
- int error;
- struct filename *fn;
-
- fn = getname(filename);
- error = PTR_ERR(fn);
- if (IS_ERR(fn))
- goto out;
- error = do_execve(fn->name, argv, envp, regs);
- putname(fn);
-out:
- return error;
-}
-
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
-{
- struct pt_regs regs;
- int ret;
-
- memset(®s, 0, sizeof(struct pt_regs));
- ret = do_execve(filename,
- (const char __user *const __user *)argv,
- (const char __user *const __user *)envp, ®s);
- if (ret < 0)
- goto out;
-
- /*
- * Save argc to the register structure for userspace.
- */
- regs.UCreg_00 = ret;
-
- /*
- * We were successful. We won't be returning to our caller, but
- * instead to user space by manipulating the kernel stack.
- */
- asm("add r0, %0, %1\n\t"
- "mov r1, %2\n\t"
- "mov r2, %3\n\t"
- "mov r22, #0\n\t" /* not a syscall */
- "mov r23, %0\n\t" /* thread structure */
- "b.l memmove\n\t" /* copy regs to top of stack */
- "mov sp, r0\n\t" /* reposition stack pointer */
- "b ret_to_user"
- :
- : "r" (current_thread_info()),
- "Ir" (THREAD_START_SP - sizeof(regs)),
- "r" (®s),
- "Ir" (sizeof(regs))
- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
-
- out:
- return ret;
-}
-
/* Note: used by the compat code even in 64-bit Linux. */
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
}
static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
- struct task_struct *tsk)
+ unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
- (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR))
- return fault;
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
return fault;
check_stack:
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
+retry:
down_read(&mm->mmap_sem);
} else {
/*
#endif
}
- fault = __do_pf(mm, addr, fsr, tsk);
+ fault = __do_pf(mm, addr, fsr, flags, tsk);
+
+ /* If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because
+ * it would already be released in __lock_page_or_retry in
+ * mm/filemap.c. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
+ if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
+ }
+
up_read(&mm->mmap_sem);
/*
#include <asm/setup.h>
#include <asm/desc.h>
+#undef memcpy /* Use memcpy from misc.c */
+
#include "eboot.h"
static efi_system_table_t *sys_table;
setup_corrupt:
.byte 7
.string "No setup signature found...\n"
-
- .data
-dummy: .long 0
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
-#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
+#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
- u32 type);
+ u32 type, u64 attribute);
#endif /* CONFIG_X86_32 */
extern int efi_memblock_x86_reserve_range(void);
extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void);
+extern void efi_unmap_memmap(void);
+extern void efi_memory_uc(u64 addr, unsigned long size);
#ifndef CONFIG_EFI
/*
}
#endif
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps. The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
#ifdef CONFIG_X86_32
- return (unsigned long)(®s->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
#else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
return regs->sp;
-#endif
}
+#endif
#define GET_IP(regs) ((regs)->ip)
#define GET_FP(regs) ((regs)->bp)
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
}
+extern int __must_check xen_event_channel_op_compat(int, void *);
static inline int
HYPERVISOR_event_channel_op(int cmd, void *arg)
{
int rc = _hypercall2(int, event_channel_op, cmd, arg);
- if (unlikely(rc == -ENOSYS)) {
- struct evtchn_op op;
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, event_channel_op_compat, &op);
- memcpy(arg, &op.u, sizeof(op.u));
- }
+ if (unlikely(rc == -ENOSYS))
+ rc = xen_event_channel_op_compat(cmd, arg);
return rc;
}
return _hypercall3(int, console_io, cmd, count, str);
}
+extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
+
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
- if (unlikely(rc == -ENOSYS)) {
- struct physdev_op op;
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, physdev_op_compat, &op);
- memcpy(arg, &op.u, sizeof(op.u));
- }
+ if (unlikely(rc == -ENOSYS))
+ rc = HYPERVISOR_physdev_op_compat(cmd, arg);
return rc;
}
#ifndef _ASM_X86_XEN_HYPERVISOR_H
#define _ASM_X86_XEN_HYPERVISOR_H
-/* arch/i386/kernel/setup.c */
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
* with Xen so that on ARM we can have one ABI that works for 32 and 64
* bit guests. */
typedef unsigned long xen_pfn_t;
+#define PRI_xen_pfn "lx"
typedef unsigned long xen_ulong_t;
+#define PRI_xen_ulong "lx"
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
-__DEFINE_GUEST_HANDLE(ulong, unsigned long);
DEFINE_GUEST_HANDLE(char);
DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(long);
DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t);
continue;
cfg = irq_cfg(irq);
+ if (!cfg)
+ continue;
+
raw_spin_lock(&desc->lock);
/*
}
}
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
+ */
+ if ((c->x86 == 0x15) &&
+ (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+ u64 val;
+
+ if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+ val |= 0x1E;
+ wrmsrl_safe(0xc0011021, val);
+ }
+ }
+
cpu_detect_cache_sizes(c);
/* Multi core CPU? */
*
* Written by Jacob Shin - AMD, Inc.
*
- * Support: borislav.petkov@amd.com
+ * Maintained by: Borislav Petkov <bp@alien8.de>
*
* April 2006
* - added support for AMD Family 0x10 processors
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
+static long cmci_rediscover_work_func(void *arg)
+{
+ int banks;
+
+ /* Recheck banks in case CPUs don't all have the same */
+ if (cmci_supported(&banks))
+ cmci_discover(banks);
+
+ return 0;
+}
+
/*
* After a CPU went down cycle through all the others and rediscover
* Must run in process context.
*/
void cmci_rediscover(int dying)
{
- int banks;
- int cpu;
- cpumask_var_t old;
+ int cpu, banks;
if (!cmci_supported(&banks))
return;
- if (!alloc_cpumask_var(&old, GFP_KERNEL))
- return;
- cpumask_copy(old, ¤t->cpus_allowed);
for_each_online_cpu(cpu) {
if (cpu == dying)
continue;
- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+
+ if (cpu == smp_processor_id()) {
+ cmci_rediscover_work_func(NULL);
continue;
- /* Recheck banks in case CPUs don't all have the same */
- if (cmci_supported(&banks))
- cmci_discover(banks);
- }
+ }
- set_cpus_allowed_ptr(current, old);
- free_cpumask_var(old);
+ work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
+ }
}
/*
}
/*
- * Now write a value and read it back to see if it matches,
- * this is needed to detect certain hardware emulators (qemu/kvm)
- * that don't trap on the MSR access and always return 0s.
+ * Read the current value, change it and read it back to see if it
+ * matches, this is needed to detect certain hardware emulators
+ * (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/
- val = 0xabcdUL;
reg = x86_pmu_event_addr(0);
+ if (rdmsrl_safe(reg, &val))
+ goto msr_fail;
+ val ^= 0xffffUL;
ret = wrmsrl_safe(reg, val);
ret |= rdmsrl_safe(reg, &val_new);
if (ret || val != val_new)
{
struct pci_dev *pdev = box->pci_dev;
int box_ctl = uncore_pci_box_ctl(box);
- u32 config;
+ u32 config = 0;
- pci_read_config_dword(pdev, box_ctl, &config);
- config |= SNBEP_PMON_BOX_CTL_FRZ;
- pci_write_config_dword(pdev, box_ctl, config);
+ if (!pci_read_config_dword(pdev, box_ctl, &config)) {
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+ }
}
static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = box->pci_dev;
int box_ctl = uncore_pci_box_ctl(box);
- u32 config;
+ u32 config = 0;
- pci_read_config_dword(pdev, box_ctl, &config);
- config &= ~SNBEP_PMON_BOX_CTL_FRZ;
- pci_write_config_dword(pdev, box_ctl, config);
+ if (!pci_read_config_dword(pdev, box_ctl, &config)) {
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+ }
}
static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw;
- u64 count;
+ u64 count = 0;
pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
/*
* build pci bus to socket mapping
*/
-static void snbep_pci2phy_map_init(void)
+static int snbep_pci2phy_map_init(void)
{
struct pci_dev *ubox_dev = NULL;
int i, bus, nodeid;
- u32 config;
+ int err = 0;
+ u32 config = 0;
while (1) {
/* find the UBOX device */
break;
bus = ubox_dev->bus->number;
/* get the Node ID of the local register */
- pci_read_config_dword(ubox_dev, 0x40, &config);
+ err = pci_read_config_dword(ubox_dev, 0x40, &config);
+ if (err)
+ break;
nodeid = config;
/* get the Node ID mapping */
- pci_read_config_dword(ubox_dev, 0x54, &config);
+ err = pci_read_config_dword(ubox_dev, 0x54, &config);
+ if (err)
+ break;
/*
* every three bits in the Node ID mapping register maps
* to a particular node.
}
}
};
- return;
+
+ if (ubox_dev)
+ pci_dev_put(ubox_dev);
+
+ return err ? pcibios_err_to_errno(err) : 0;
}
/* end of Sandy Bridge-EP uncore support */
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- int port;
/* adjust the main event selector and extra register index */
if (reg1->idx % 2) {
}
/* adjust extra register config */
- port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
switch (reg1->idx % 6) {
case 2:
/* shift the 8~15 bits to the 0~7 bits */
switch (boot_cpu_data.x86_model) {
case 45: /* Sandy Bridge-EP */
+ ret = snbep_pci2phy_map_init();
+ if (ret)
+ return ret;
pci_uncores = snbep_pci_uncores;
uncore_pci_driver = &snbep_uncore_pci_driver;
- snbep_pci2phy_map_init();
break;
default:
return 0;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
+ if (cpu_has_hypervisor)
+ return -ENODEV;
+
ret = uncore_pci_init();
if (ret)
goto fail;
#include <linux/perf_event.h>
#include <linux/types.h>
+#include <asm/hardirq.h>
+
#include "perf_event.h"
static const u64 knc_perfmon_event_map[] =
static inline void
knc_pmu_disable_event(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val;
val = hwc->config;
- if (cpuc->enabled)
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+ val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
(void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
}
static void knc_pmu_enable_event(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val;
val = hwc->config;
- if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
}
+static inline u64 knc_pmu_get_status(void)
+{
+ u64 status;
+
+ rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
+
+ return status;
+}
+
+static inline void knc_pmu_ack_status(u64 ack)
+{
+ wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
+}
+
+static int knc_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ int handled = 0;
+ int bit, loops;
+ u64 status;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+
+ knc_pmu_disable_all();
+
+ status = knc_pmu_get_status();
+ if (!status) {
+ knc_pmu_enable_all(0);
+ return handled;
+ }
+
+ loops = 0;
+again:
+ knc_pmu_ack_status(status);
+ if (++loops > 100) {
+ WARN_ONCE(1, "perf: irq loop stuck!\n");
+ perf_event_print_debug();
+ goto done;
+ }
+
+ inc_irq_stat(apic_perf_irqs);
+
+ for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
+ struct perf_event *event = cpuc->events[bit];
+
+ handled++;
+
+ if (!test_bit(bit, cpuc->active_mask))
+ continue;
+
+ if (!intel_pmu_save_and_restart(event))
+ continue;
+
+ perf_sample_data_init(&data, 0, event->hw.last_period);
+
+ if (perf_event_overflow(event, &data, regs))
+ x86_pmu_stop(event, 0);
+ }
+
+ /*
+ * Repeat if there is more work to be done:
+ */
+ status = knc_pmu_get_status();
+ if (status)
+ goto again;
+
+done:
+ knc_pmu_enable_all(0);
+
+ return handled;
+}
+
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
static __initconst struct x86_pmu knc_pmu = {
.name = "knc",
- .handle_irq = x86_pmu_handle_irq,
+ .handle_irq = knc_pmu_handle_irq,
.disable_all = knc_pmu_disable_all,
.enable_all = knc_pmu_enable_all,
.enable = knc_pmu_enable_event,
.event_map = knc_pmu_event_map,
.max_events = ARRAY_SIZE(knc_perfmon_event_map),
.apic = 1,
- .max_period = (1ULL << 31) - 1,
+ .max_period = (1ULL << 39) - 1,
.version = 0,
.num_counters = 2,
- /* in theory 40 bits, early silicon is buggy though */
- .cntval_bits = 32,
- .cntval_mask = (1ULL << 32) - 1,
+ .cntval_bits = 40,
+ .cntval_mask = (1ULL << 40) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = knc_event_constraints,
.format_attrs = intel_knc_formats_attr,
*/
static const u64 p6_perfmon_event_map[] =
{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, /* CPU_CLK_UNHALTED */
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, /* INST_RETIRED */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, /* L2_RQSTS:M:E:S:I */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, /* L2_RQSTS:I */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, /* BR_INST_RETIRED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, /* BR_MISS_PRED_RETIRED */
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, /* BUS_DRDY_CLOCKS */
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a2, /* RESOURCE_STALLS */
+
+};
+
+static __initconst u64 p6_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
+ [ C(RESULT_MISS) ] = 0x0045, /* DCU_LINES_IN */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0f29, /* L2_LD:M:E:S:I */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
+ [ C(RESULT_MISS) ] = 0x0f28, /* L2_IFETCH:M:E:S:I */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0025, /* L2_M_LINES_INM */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
+ [ C(RESULT_MISS) ] = 0x0085, /* ITLB_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED */
+ [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISS_PRED_RETIRED */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
};
static u64 p6_pmu_event_map(int hw_event)
{
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
- INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
+ INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
static inline void
p6_pmu_disable_event(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val = P6_NOP_EVENT;
- if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
-
(void)wrmsrl_safe(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val;
val = hwc->config;
- if (cpuc->enabled)
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
+
+ /*
+ * p6 only has a global event enable, set on PerfEvtSel0
+ * We "disable" events by programming P6_NOP_EVENT
+ * and we rely on p6_pmu_enable_all() being called
+ * to actually enable the events.
+ */
(void)wrmsrl_safe(hwc->config_base, val);
}
x86_pmu = p6_pmu;
+ memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+
return 0;
}
memblock_add(ei->addr, ei->size);
}
+ /* throw away partial pages */
+ memblock_trim_memory(PAGE_SIZE);
+
memblock_dump_all();
}
ENTRY(xen_hypervisor_callback)
CFI_STARTPROC
- pushl_cfi $0
+ pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
TRACE_IRQS_OFF
2: mov 8(%esp),%es
3: mov 12(%esp),%fs
4: mov 16(%esp),%gs
+ /* EAX == 0 => Category 1 (Bad segment)
+ EAX != 0 => Category 2 (Bad IRET) */
testl %eax,%eax
popl_cfi %eax
lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16
jz 5f
addl $16,%esp
- jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
-5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
+ jmp iret_exc
+5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp ret_from_exception
CFI_ENDPROC
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
- ASM_CLAC
XCPT_FRAME
+ ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
/* 0(%rsp): old_rsp-ARGOFFSET */
*/
.macro apicinterrupt num sym do_sym
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
pushq_cfi $~(\num)
.Lcommon_\sym:
interrupt \do_sym
*/
.macro zeroentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp
.macro paranoidzeroentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp
.macro errorentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
XCPT_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
/* error code is on the stack already */
.macro paranoiderrorentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
XCPT_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
CFI_RESTORE r11
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
- pushq_cfi $0
+ pushq_cfi $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
jmp error_exit
CFI_ENDPROC
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
+ rcu_irq_enter();
+ exit_idle();
kvm_async_pf_task_wait((u32)read_cr2());
+ rcu_irq_exit();
break;
case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();
* Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
*
* Maintainers:
- * Andreas Herrmann <andreas.herrmann3@amd.com>
- * Borislav Petkov <borislav.petkov@amd.com>
+ * Andreas Herrmann <herrmann.der.user@googlemail.com>
+ * Borislav Petkov <bp@alien8.de>
*
* This driver allows to upgrade microcode on F10h AMD
* CPUs and later.
#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
switch (c->x86) {
case 0x14:
case 0x15:
max_size = F15H_MPB_MAX_SIZE;
break;
+ case 0x16:
+ max_size = F16H_MPB_MAX_SIZE;
+ break;
default:
max_size = F1XH_MPB_MAX_SIZE;
break;
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/rcupdate.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#define FLAG_MASK FLAG_MASK_32
+/*
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+ * when it traps. The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
+ *
+ * Now, if the stack is empty, '®s->sp' is out of range. In this
+ * case we try to take the previous stack. To always return a non-null
+ * stack pointer we fall back to regs as stack if no previous stack
+ * exists.
+ *
+ * This is valid only for kernel mode traps.
+ */
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+ unsigned long sp = (unsigned long)®s->sp;
+ struct thread_info *tinfo;
+
+ if (context == (sp & ~(THREAD_SIZE - 1)))
+ return sp;
+
+ tinfo = (struct thread_info *)context;
+ if (tinfo->previous_esp)
+ return tinfo->previous_esp;
+
+ return (unsigned long)regs;
+}
+EXPORT_SYMBOL_GPL(kernel_stack_pointer);
+
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
int i;
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+ unsigned long start_pfn, end_pfn;
- if (ei->addr + ei->size <= 1UL << 32)
- continue;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
+ NULL) {
- if (ei->type == E820_RESERVED)
+ end = PFN_PHYS(end_pfn);
+ if (end <= (1UL<<32))
continue;
+ start = PFN_PHYS(start_pfn);
max_pfn_mapped = init_memory_mapping(
- ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
- ei->addr + ei->size);
+ max((1UL<<32), start), end);
}
/* can we preseve max_low_pfn ?*/
arch_init_ideal_nops();
register_refined_jiffies(CLOCK_TICK_RATE);
+
+#ifdef CONFIG_EFI
+ /* Once setup is done above, disable efi_enabled on mismatched
+ * firmware/kernel archtectures since there is no support for
+ * runtime services.
+ */
+ if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
+ pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+ efi_unmap_memmap();
+ efi_enabled = 0;
+ }
+#endif
}
#ifdef CONFIG_X86_32
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
- if (thread_info_flags & _TIF_UPROBE) {
- clear_thread_flag(TIF_UPROBE);
+ if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
- }
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
/*
* Skip these instructions as per the currently known x86 ISA.
- * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 }
+ * rep=0x66*; nop=0x90
*/
static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int i;
for (i = 0; i < MAX_UINSN_BYTES; i++) {
- if ((auprobe->insn[i] == 0x66))
+ if (auprobe->insn[i] == 0x66)
continue;
if (auprobe->insn[i] == 0x90)
return true;
- if (i == (MAX_UINSN_BYTES - 1))
- break;
-
- if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f))
- return true;
-
- if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19))
- return true;
-
- if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0))
- return true;
-
break;
}
return false;
{
struct kvm_cpuid_entry2 *best;
+ if (!static_cpu_has(X86_FEATURE_XSAVE))
+ return 0;
+
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
vcpu->arch.apic_base = value;
if (apic_x2apic_mode(apic)) {
u32 id = kvm_apic_id(apic);
- u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf));
+ u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
kvm_apic_set_ldr(apic, ldr);
}
apic->base_address = apic->vcpu->arch.apic_base &
}
}
- if (!is_error_pfn(pfn))
- kvm_release_pfn_clean(pfn);
+ kvm_release_pfn_clean(pfn);
}
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
}
}
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
} else {
- exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- exec_control);
+ if (cpu_has_secondary_exec_ctrls()) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ }
if (best)
best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
{
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
- memcpy(vcpu->run->mmio.data, frag->data, frag->len);
+ memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
return X86EMUL_CONTINUE;
}
bytes -= handled;
val += handled;
- while (bytes) {
- unsigned now = min(bytes, 8U);
-
- frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
- frag->gpa = gpa;
- frag->data = val;
- frag->len = now;
-
- gpa += now;
- val += now;
- bytes -= now;
- }
+ WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
+ frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
+ frag->gpa = gpa;
+ frag->data = val;
+ frag->len = bytes;
return X86EMUL_CONTINUE;
}
vcpu->mmio_needed = 1;
vcpu->mmio_cur_fragment = 0;
- vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
+ vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
vcpu->run->exit_reason = KVM_EXIT_MMIO;
vcpu->run->mmio.phys_addr = gpa;
*
* read:
* for each fragment
- * write gpa, len
- * exit
- * copy data
+ * for each mmio piece in the fragment
+ * write gpa, len
+ * exit
+ * copy data
* execute insn
*
* write:
* for each fragment
- * write gpa, len
- * copy data
- * exit
+ * for each mmio piece in the fragment
+ * write gpa, len
+ * copy data
+ * exit
*/
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
+ unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
- frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
+ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
+ len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
- memcpy(frag->data, run->mmio.data, frag->len);
+ memcpy(frag->data, run->mmio.data, len);
+
+ if (frag->len <= 8) {
+ /* Switch to the next fragment. */
+ frag++;
+ vcpu->mmio_cur_fragment++;
+ } else {
+ /* Go forward to the next mmio piece. */
+ frag->data += len;
+ frag->gpa += len;
+ frag->len -= len;
+ }
+
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
if (vcpu->mmio_is_write)
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
- /* Initiate next fragment */
- ++frag;
+
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
- memcpy(run->mmio.data, frag->data, frag->len);
- run->mmio.len = frag->len;
+ memcpy(run->mmio.data, frag->data, min(8u, frag->len));
+ run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
int pending_vec, max_bits, idx;
struct desc_ptr dt;
+ if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+ return -EINVAL;
+
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
unsigned page_size_mask;
};
-static void __init find_early_table_space(struct map_range *mr, unsigned long end,
- int use_pse, int use_gbpages)
+/*
+ * First calculate space needed for kernel direct mapping page tables to cover
+ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
+ * pages. Then find enough contiguous space for those page tables.
+ */
+static void __init find_early_table_space(struct map_range *mr, int nr_range)
{
- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
+ int i;
+ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+ unsigned long start = 0, good_end;
phys_addr_t base;
- puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
- tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
-
- if (use_gbpages) {
- unsigned long extra;
-
- extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
- pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
- } else
- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+ for (i = 0; i < nr_range; i++) {
+ unsigned long range, extra;
- tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ range = mr[i].end - mr[i].start;
+ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
- if (use_pse) {
- unsigned long extra;
+ if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
+ extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
+ pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+ } else {
+ pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
+ }
- extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+ if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
+ extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
#ifdef CONFIG_X86_32
- extra += PMD_SIZE;
+ extra += PMD_SIZE;
#endif
- /* The first 2/4M doesn't use large pages. */
- if (mr->start < PMD_SIZE)
- extra += mr->end - mr->start;
-
- ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
- } else
- ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ } else {
+ ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ }
+ }
+ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
#ifdef CONFIG_X86_32
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
- end - 1, pgt_buf_start << PAGE_SHIFT,
+ mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
(pgt_buf_top << PAGE_SHIFT) - 1);
}
* nodes are discovered.
*/
if (!after_bootmem)
- find_early_table_space(&mr[0], end, use_pse, use_gbpages);
+ find_early_table_space(mr, nr_range);
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
* these mappings are more intelligent.
*/
if (pte_val(*pte)) {
- pages++;
+ if (!after_bootmem)
+ pages++;
continue;
}
* attributes.
*/
if (page_size_mask & (1 << PG_LEVEL_2M)) {
+ if (!after_bootmem)
+ pages++;
last_map_addr = next;
continue;
}
* attributes.
*/
if (page_size_mask & (1 << PG_LEVEL_1G)) {
+ if (!after_bootmem)
+ pages++;
last_map_addr = next;
continue;
}
}
if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
- || vmflag == VM_HUGETLB) {
+ || vmflag & VM_HUGETLB) {
local_flush_tlb();
goto flush_all;
}
val |= counter_config->extra;
event &= model->event_mask ? model->event_mask : 0xFF;
val |= event & 0xFF;
- val |= (event & 0x0F00) << 24;
+ val |= (u64)(event & 0x0F00) << 24;
return val;
}
reg_read(reg, value);
}
+static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
+ /* force interrupt pin value to 0 */
+ *value = reg->sim_reg.value & 0xfff00ff;
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+}
+
static struct sim_dev_reg bus1_fixups[] = {
DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
+ DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
+ DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
+ DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
};
static void __init init_sim_regs(void)
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/io_apic.h>
+#include <asm/emergency-restart.h>
static int ce4100_i8042_detect(void)
{
return 0;
}
+/*
+ * The CE4100 platform has an internal 8051 Microcontroller which is
+ * responsible for signaling to the external Power Management Unit the
+ * intention to reset, reboot or power off the system. This 8051 device has
+ * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
+ * to power off the system.
+ */
+static void ce4100_power_off(void)
+{
+ outb(0x4, 0xcf9);
+}
+
#ifdef CONFIG_SERIAL_8250
static unsigned int mem_serial_in(struct uart_port *p, int offset)
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.pci.init = ce4100_pci_init;
+ /*
+ * By default, the reboot method is ACPI which is supported by the
+ * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
+ * the bootloader will however issue a system power off instead of
+ * reboot. By using BOOT_KBD we ensure proper system reboot as
+ * expected.
+ */
+ reboot_type = BOOT_KBD;
+
#ifdef CONFIG_X86_IO_APIC
x86_init.pci.init_irq = sdv_pci_init;
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
#endif
+
+ pm_power_off = ce4100_power_off;
}
struct efi_memory_map memmap;
bool efi_64bit;
-static bool efi_native;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
+static inline bool efi_is_native(void)
+{
+ return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
+}
+
static int __init setup_noefi(char *arg)
{
efi_enabled = 0;
}
}
-static void __init efi_unmap_memmap(void)
+void __init efi_unmap_memmap(void)
{
if (memmap.map) {
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
{
void *p;
- if (!efi_native)
+ if (!efi_is_native())
return;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
return;
}
efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
- efi_native = !efi_64bit;
#else
efi_phys.systab = (efi_system_table_t *)
(boot_params.efi_info.efi_systab |
((__u64)boot_params.efi_info.efi_systab_hi<<32));
- efi_native = efi_64bit;
#endif
if (efi_systab_init(efi_phys.systab)) {
* that doesn't match the kernel 32/64-bit mode.
*/
- if (!efi_native)
+ if (!efi_is_native())
pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
else if (efi_runtime_init()) {
efi_enabled = 0;
return;
}
#ifdef CONFIG_X86_32
- if (efi_native) {
+ if (efi_is_native()) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
return NULL;
}
+void efi_memory_uc(u64 addr, unsigned long size)
+{
+ unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
+ u64 npages;
+
+ npages = round_up(size, page_shift) / page_shift;
+ memrange_efi_to_native(&addr, &npages);
+ set_memory_uc(addr, npages);
+}
+
/*
* This function will switch the EFI runtime services to virtual mode.
* Essentially, look through the EFI memmap and map every region that
efi_memory_desc_t *md, *prev_md = NULL;
efi_status_t status;
unsigned long size;
- u64 end, systab, addr, npages, end_pfn;
+ u64 end, systab, end_pfn;
void *p, *va, *new_memmap = NULL;
int count = 0;
* non-native EFI
*/
- if (!efi_native) {
+ if (!efi_is_native()) {
efi_unmap_memmap();
return;
}
end_pfn = PFN_UP(end);
if (end_pfn <= max_low_pfn_mapped
|| (end_pfn > (1UL << (32 - PAGE_SHIFT))
- && end_pfn <= max_pfn_mapped))
+ && end_pfn <= max_pfn_mapped)) {
va = __va(md->phys_addr);
- else
- va = efi_ioremap(md->phys_addr, size, md->type);
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ efi_memory_uc((u64)(unsigned long)va, size);
+ } else
+ va = efi_ioremap(md->phys_addr, size,
+ md->type, md->attribute);
md->virt_addr = (u64) (unsigned long) va;
continue;
}
- if (!(md->attribute & EFI_MEMORY_WB)) {
- addr = md->virt_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&addr, &npages);
- set_memory_uc(addr, npages);
- }
-
systab = (u64) (unsigned long) efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
- u32 type)
+ u32 type, u64 attribute)
{
unsigned long last_map_pfn;
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
- efi_ioremap(top, size - (top - phys_addr), type);
+ efi_ioremap(top, size - (top - phys_addr), type, attribute);
}
+ if (!(attribute & EFI_MEMORY_WB))
+ efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
+
return (void __iomem *)__va(phys_addr);
}
#include "smp.h"
#include "multicalls.h"
-#include <xen/events.h>
-
EXPORT_SYMBOL_GPL(hypercall_page);
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
return this_cpu_read(xen_vcpu_info.arch.cr2);
}
+void xen_flush_tlb_all(void)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ trace_xen_mmu_flush_tlb_all(0);
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_ALL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
static void xen_flush_tlb(void)
{
struct mmuext_op *op;
err = 0;
out:
- flush_tlb_all();
+ xen_flush_tlb_all();
return err;
}
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
select ARCH_WANT_OPTIONAL_GPIOLIB
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
static inline void iounmap(volatile void __iomem *addr)
{
}
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
#endif /* CONFIG_MMU */
/*
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
+ memset(regs, 0, sizeof(*regs)); \
regs->pc = new_pc; \
regs->ps = USER_PS_VALUE; \
regs->areg[1] = new_sp; \
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
-/* Create a kernel thread without removing it from tasklists */
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
struct pt_regs;
struct sigaction;
-asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
+asmlinkage long sys_execve(char*, char**, char**, struct pt_regs*);
asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
asmlinkage long xtensa_ptrace(long, long, long, long);
asmlinkage long xtensa_sigreturn(struct pt_regs*);
-/*
- * include/asm-xtensa/unistd.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+#define __ARCH_WANT_SYS_EXECVE
#include <uapi/asm/unistd.h>
-
/*
* "Conditional" syscalls
*
#define __IGNORE_mmap /* use mmap2 */
#define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */
+
+#endif /* _XTENSA_UNISTD_H */
-/*
- * include/asm-xtensa/unistd.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2012 Tensilica Inc.
- */
-
-#ifndef _UAPI_XTENSA_UNISTD_H
+#if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL)
#define _UAPI_XTENSA_UNISTD_H
#ifndef __SYSCALL
#define __NR_clone 116
__SYSCALL(116, xtensa_clone, 5)
#define __NR_execve 117
-__SYSCALL(117, xtensa_execve, 3)
+__SYSCALL(117, sys_execve, 3)
#define __NR_exit 118
__SYSCALL(118, sys_exit, 1)
#define __NR_exit_group 119
#define SYS_XTENSA_COUNT 5 /* count */
+#undef __SYSCALL
+
#endif /* _UAPI_XTENSA_UNISTD_H */
/*
- * Create a kernel thread
- *
- * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
- * a2 a2 a3 a4
- */
-
-ENTRY(kernel_thread)
- entry a1, 16
-
- mov a5, a2 # preserve fn over syscall
- mov a7, a3 # preserve args over syscall
-
- movi a3, _CLONE_VM | _CLONE_UNTRACED
- movi a2, __NR_clone
- or a6, a4, a3 # arg0: flags
- mov a3, a1 # arg1: sp
- syscall
-
- beq a3, a1, 1f # branch if parent
- mov a6, a7 # args
- callx4 a5 # fn(args)
-
- movi a2, __NR_exit
- syscall # return value of fn(args) still in a6
-
-1: retw
-
-/*
- * Do a system call from kernel instead of calling sys_execve, so we end up
- * with proper pt_regs.
- *
- * int kernel_execve(const char *fname, char *const argv[], charg *const envp[])
- * a2 a2 a3 a4
- */
-
-ENTRY(kernel_execve)
- entry a1, 16
- mov a6, a2 # arg0 is in a6
- movi a2, __NR_execve
- syscall
-
- retw
-
-/*
* Task switch.
*
* struct task* _switch_to (struct task* prev, struct task* next)
j common_exception_return
+/*
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
+ * left from _switch_to: a6 = prev
+ */
+ENTRY(ret_from_kernel_thread)
+
+ call4 schedule_tail
+ mov a6, a3
+ callx4 a2
+ j common_exception_return
+
+ENDPROC(ret_from_kernel_thread)
#include <asm/regs.h>
extern void ret_from_fork(void);
+extern void ret_from_kernel_thread(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
/*
* Copy thread.
*
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ * regs != NULL, usp_thread_fn is userspace stack pointer.
+ * It is expected to copy parent regs (in case CLONE_VM is not set
+ * in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ * regs == NULL, usp_thread_fn is the function to run in the new thread
+ * and thread_fn_arg is its parameter.
+ * childregs are not used for the kernel threads.
+ *
* The stack layout for the new thread looks like this:
*
- * +------------------------+ <- sp in childregs (= tos)
+ * +------------------------+
* | childregs |
* +------------------------+ <- thread.sp = sp in dummy-frame
* | dummy-frame | (saved in dummy-frame spill-area)
* +------------------------+
*
- * We create a dummy frame to return to ret_from_fork:
- * a0 points to ret_from_fork (simulating a call4)
+ * We create a dummy frame to return to either ret_from_fork or
+ * ret_from_kernel_thread:
+ * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
* sp points to itself (thread.sp)
- * a2, a3 are unused.
+ * a2, a3 are unused for userspace threads,
+ * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
*
* Note: This is a pristine frame, so we don't need any spill region on top of
* childregs.
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg,
+ struct task_struct *p, struct pt_regs *unused)
{
- struct pt_regs *childregs;
- unsigned long tos;
- int user_mode = user_mode(regs);
+ struct pt_regs *childregs = task_pt_regs(p);
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
struct thread_info *ti;
#endif
- /* Set up new TSS. */
- tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
- if (user_mode)
- childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
- else
- childregs = (struct pt_regs*)tos - 1;
-
- /* This does not copy all the regs. In a bout of brilliance or madness,
- ARs beyond a0-a15 exist past the end of the struct. */
- *childregs = *regs;
-
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
*((int*)childregs - 3) = (unsigned long)childregs;
*((int*)childregs - 4) = 0;
- childregs->areg[2] = 0;
- p->set_child_tid = p->clear_child_tid = NULL;
- p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs;
- if (user_mode(regs)) {
+ if (!(p->flags & PF_KTHREAD)) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long usp = usp_thread_fn ?
+ usp_thread_fn : regs->areg[1];
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_fork, 0x1);
+
+ /* This does not copy all the regs.
+ * In a bout of brilliance or madness,
+ * ARs beyond a0-a15 exist past the end of the struct.
+ */
+ *childregs = *regs;
childregs->areg[1] = usp;
+ childregs->areg[2] = 0;
+
+ /* When sharing memory with the parent thread, the child
+ usually starts on a pristine stack, so we have to reset
+ windowbase, windowstart and wmask.
+ (Note that such a new thread is required to always create
+ an initial call4 frame)
+ The exception is vfork, where the new thread continues to
+ run on the parent's stack until it calls execve. This could
+ be a call8 or call12, which requires a legal stack frame
+ of the previous caller for the overflow handlers to work.
+ (Note that it's always legal to overflow live registers).
+ In this case, ensure to spill at least the stack pointer
+ of that frame. */
+
if (clone_flags & CLONE_VM) {
- childregs->wmask = 1; /* can't share live windows */
+ /* check that caller window is live and same stack */
+ int len = childregs->wmask & ~0xf;
+ if (regs->areg[1] == usp && len != 0) {
+ int callinc = (regs->areg[0] >> 30) & 3;
+ int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+ put_user(regs->areg[caller_ars+1],
+ (unsigned __user*)(usp - 12));
+ }
+ childregs->wmask = 1;
+ childregs->windowstart = 1;
+ childregs->windowbase = 0;
} else {
int len = childregs->wmask & ~0xf;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
// FIXME: we need to set THREADPTR in thread_info...
if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6];
-
} else {
- /* In kernel space, we start a new thread with a new stack. */
- childregs->wmask = 1;
- childregs->areg[1] = tos;
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_kernel_thread, 1);
+
+ /* pass parameters to ret_from_kernel_thread:
+ * a2 = thread_fn, a3 = thread_fn arg
+ */
+ *((int *)childregs - 1) = thread_fn_arg;
+ *((int *)childregs - 2) = usp_thread_fn;
+
+ /* Childregs are only used when we're going to userspace
+ * in which case start_thread will set them up.
+ */
}
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
void __user *child_tid, long a5,
struct pt_regs *regs)
{
- if (!newsp)
- newsp = regs->areg[1];
return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
}
-
-/*
- * xtensa_execve() executes a new program.
- */
-
-asmlinkage
-long xtensa_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- long a3, long a4, long a5,
- struct pt_regs *regs)
-{
- long error;
- struct filename *filename;
-
- filename = getname(name);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename->name, argv, envp, regs);
- putname(filename);
-out:
- return error;
-}
-
syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
-#undef __SYSCALL
#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
-#undef __KERNEL_SYSCALLS__
-#include <asm/unistd.h>
+#include <uapi/asm/unistd.h>
};
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
return (long)ret;
}
-asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+ unsigned long long offset, unsigned long long len)
{
return sys_fadvise64_64(fd, offset, len, advice);
}
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(empty_zero_page);
/*
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
- depends on BLK_CGROUP=y && EXPERIMENTAL
+ depends on BLK_CGROUP=y
default n
---help---
Block layer bio throttling support. It can be used to limit
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
+
+ /*
+ * root blkg is destroyed. Just clear the pointer since
+ * root_rl does not take reference on root blkg.
+ */
+ q->root_blkg = NULL;
+ q->root_rl.blkg = NULL;
}
static void blkg_rcu_free(struct rcu_head *rcu_head)
*/
if (rl == &q->root_rl) {
ent = &q->blkg_list;
+ /* There are no more block groups, hence no request lists */
+ if (list_empty(ent))
+ return NULL;
} else {
blkg = container_of(rl, struct blkcg_gq, rl);
ent = &blkg->q_node;
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->q <= rqb->q);
+ return !(rqa->q < rqb->q ||
+ (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}
/*
rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+ bool is_pm_resume;
WARN_ON(irqs_disabled());
rq->rq_disk = bd_disk;
rq->end_io = done;
+ /*
+ * need to check this before __blk_run_queue(), because rq can
+ * be freed before that returns.
+ */
+ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+ if (is_pm_resume)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}
struct crypto_async_request *req, *backlog;
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
- /* Only handle one request at a time to avoid hogging crypto
- * workqueue. preempt_disable/enable is used to prevent
- * being preempted by cryptd_enqueue_request() */
+ /*
+ * Only handle one request at a time to avoid hogging crypto workqueue.
+ * preempt_disable/enable is used to prevent being preempted by
+ * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+ * cryptd_enqueue_request() being accessed from software interrupts.
+ */
+ local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
+ local_bh_enable();
if (!req)
return;
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
mutex_unlock(&acpi_dev->physical_node_lock);
+ kfree(physical_node);
goto err;
}
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
+ break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
{
- int status;
+ int status = 0;
struct acpi_device *dev;
- status = acpi_video_device_enumerate(video);
- if (status)
- return status;
+ /*
+ * There are systems where video module known to work fine regardless
+ * of broken _DOD and ignoring returned value here doesn't cause
+ * any issues later.
+ */
+ acpi_video_device_enumerate(video);
list_for_each_entry(dev, &device->children, node) {
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ahci_suspend(struct device *dev)
{
struct ahci_platform_data *pdata = dev_get_platdata(dev);
struct acpi_device *acpi_dev;
struct acpi_device_power_state *states;
- if (ap->flags & ATA_FLAG_ACPI_SATA)
- ata_dev = &ap->link.device[sdev->channel];
- else
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ if (!sata_pmp_attached(ap))
+ ata_dev = &ap->link.device[sdev->id];
+ else
+ ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id];
+ }
+ else {
ata_dev = &ap->link.device[sdev->id];
+ }
*handle = ata_dev_acpi_handle(ata_dev);
if (xfer_mode == t->mode)
return t;
+
+ WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+ __func__, xfer_mode);
+
return NULL;
}
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+ sdev->no_report_opcodes = 1;
+ sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
return ret;
}
+ ret = clk_set_rate(acdev->clk, 166000000);
+ if (ret) {
+ dev_warn(acdev->host->dev, "clock set rate failed");
+ return ret;
+ }
+
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int arasan_cf_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
-static int __init ahci_highbank_probe(struct platform_device *pdev)
+static int __devinit ahci_highbank_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
return 0;
}
+static int k2_sata_softreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ u8 dmactl;
+ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+ dmactl = readb(mmio + ATA_DMA_CMD);
+
+ /* Clear the start bit */
+ if (dmactl & ATA_DMA_START) {
+ dmactl &= ~ATA_DMA_START;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+ }
+
+ return ata_sff_softreset(link, class, deadline);
+}
+
+static int k2_sata_hardreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ u8 dmactl;
+ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+ dmactl = readb(mmio + ATA_DMA_CMD);
+
+ /* Clear the start bit */
+ if (dmactl & ATA_DMA_START) {
+ dmactl &= ~ATA_DMA_START;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+ }
+
+ return sata_sff_hardreset(link, class, deadline);
+}
static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
+ .softreset = k2_sata_softreset,
+ .hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
res = loader_verify(lb, dev, rec);
if (res)
break;
+ rec = ihex_next_binrec(rec);
}
release_firmware(fw);
if (!res)
choice
prompt "Selected region size"
- default CMA_SIZE_SEL_ABSOLUTE
+ default CMA_SIZE_SEL_MBYTES
config CMA_SIZE_SEL_MBYTES
bool "Use mega bytes value only"
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
- * Returns 1 if we correctly mapped the memory, or 0 if
- * dma_release_coherent() should proceed with mapping memory from
- * generic pools.
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/page-isolation.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
-#ifndef SZ_1M
-#define SZ_1M (1 << 20)
-#endif
-
struct cma {
unsigned long base_pfn;
unsigned long count;
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
-static const char *fw_path[] = {
- "/lib/firmware/updates/" UTS_RELEASE,
- "/lib/firmware/updates",
- "/lib/firmware/" UTS_RELEASE,
- "/lib/firmware"
-};
-
-/* Don't inline this: 'struct kstat' is biggish */
-static noinline long fw_file_size(struct file *file)
-{
- struct kstat st;
- if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
- return -1;
- if (!S_ISREG(st.mode))
- return -1;
- if (st.size != (long)st.size)
- return -1;
- return st.size;
-}
-
-static bool fw_read_file_contents(struct file *file, struct firmware *fw)
-{
- long size;
- char *buf;
-
- size = fw_file_size(file);
- if (size < 0)
- return false;
- buf = vmalloc(size);
- if (!buf)
- return false;
- if (kernel_read(file, 0, buf, size) != size) {
- vfree(buf);
- return false;
- }
- fw->data = buf;
- fw->size = size;
- return true;
-}
-
-static bool fw_get_filesystem_firmware(struct firmware *fw, const char *name)
-{
- int i;
- bool success = false;
- char *path = __getname();
-
- for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
- struct file *file;
- snprintf(path, PATH_MAX, "%s/%s", fw_path[i], name);
-
- file = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(file))
- continue;
- success = fw_read_file_contents(file, fw);
- fput(file);
- if (success)
- break;
- }
- __putname(path);
- return success;
-}
-
/* Builtin firmware support */
#ifdef CONFIG_FW_LOADER
FW_STATUS_ABORT,
};
+enum fw_buf_fmt {
+ VMALLOC_BUF, /* used in direct loading */
+ PAGE_BUF, /* used in loading via userspace */
+};
+
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
spinlock_t name_lock;
struct list_head fw_names;
- wait_queue_head_t wait_queue;
- int cnt;
struct delayed_work work;
struct notifier_block pm_notify;
struct completion completion;
struct firmware_cache *fwc;
unsigned long status;
+ enum fw_buf_fmt fmt;
void *data;
size_t size;
struct page **pages;
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
+ buf->fmt = VMALLOC_BUF;
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
list_del(&buf->list);
spin_unlock(&fwc->lock);
- vunmap(buf->data);
- for (i = 0; i < buf->nr_pages; i++)
- __free_page(buf->pages[i]);
- kfree(buf->pages);
+
+ if (buf->fmt == PAGE_BUF) {
+ vunmap(buf->data);
+ for (i = 0; i < buf->nr_pages; i++)
+ __free_page(buf->pages[i]);
+ kfree(buf->pages);
+ } else
+ vfree(buf->data);
kfree(buf);
}
kref_put(&buf->ref, __fw_free_buf);
}
+/* direct firmware loading support */
+static const char *fw_path[] = {
+ "/lib/firmware/updates/" UTS_RELEASE,
+ "/lib/firmware/updates",
+ "/lib/firmware/" UTS_RELEASE,
+ "/lib/firmware"
+};
+
+/* Don't inline this: 'struct kstat' is biggish */
+static noinline long fw_file_size(struct file *file)
+{
+ struct kstat st;
+ if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+ return -1;
+ if (!S_ISREG(st.mode))
+ return -1;
+ if (st.size != (long)st.size)
+ return -1;
+ return st.size;
+}
+
+static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+{
+ long size;
+ char *buf;
+
+ size = fw_file_size(file);
+ if (size < 0)
+ return false;
+ buf = vmalloc(size);
+ if (!buf)
+ return false;
+ if (kernel_read(file, 0, buf, size) != size) {
+ vfree(buf);
+ return false;
+ }
+ fw_buf->data = buf;
+ fw_buf->size = size;
+ return true;
+}
+
+static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
+{
+ int i;
+ bool success = false;
+ char *path = __getname();
+
+ for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
+ struct file *file;
+ snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
+
+ file = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(file))
+ continue;
+ success = fw_read_file_contents(file, buf);
+ fput(file);
+ if (success)
+ break;
+ }
+ __putname(path);
+ return success;
+}
+
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
#endif
+
+/* one pages buffer should be mapped/unmapped only once */
+static int fw_map_pages_buf(struct firmware_buf *buf)
+{
+ if (buf->fmt != PAGE_BUF)
+ return 0;
+
+ if (buf->data)
+ vunmap(buf->data);
+ buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
+ if (!buf->data)
+ return -ENOMEM;
+ return 0;
+}
+
/**
* firmware_loading_store - set value in the 'loading' control file
* @dev: device pointer
if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
set_bit(FW_STATUS_DONE, &fw_buf->status);
clear_bit(FW_STATUS_LOADING, &fw_buf->status);
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ * */
+ fw_map_pages_buf(fw_buf);
complete_all(&fw_buf->completion);
break;
}
return fw_priv;
}
-/* one pages buffer is mapped/unmapped only once */
-static int fw_map_pages_buf(struct firmware_buf *buf)
-{
- buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
- if (!buf->data)
- return -ENOMEM;
- return 0;
-}
-
/* store the pages buffer info firmware from buf */
static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
{
return NULL;
}
- if (fw_get_filesystem_firmware(firmware, name)) {
- dev_dbg(device, "firmware: direct-loading firmware %s\n", name);
- return NULL;
- }
-
ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
if (!ret)
fw_priv = fw_create_instance(firmware, name, device,
struct device *f_dev = &fw_priv->dev;
struct firmware_buf *buf = fw_priv->buf;
struct firmware_cache *fwc = &fw_cache;
+ int direct_load = 0;
+
+ /* try direct loading from fs first */
+ if (fw_get_filesystem_firmware(buf)) {
+ dev_dbg(f_dev->parent, "firmware: direct-loading"
+ " firmware %s\n", buf->fw_id);
+
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ direct_load = 1;
+ goto handle_fw;
+ }
+
+ /* fall back on userspace loading */
+ buf->fmt = PAGE_BUF;
dev_set_uevent_suppress(f_dev, true);
del_timer_sync(&fw_priv->timeout);
+handle_fw:
mutex_lock(&fw_lock);
if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
retval = -ENOENT;
if (!retval && f_dev->parent)
fw_add_devm_name(f_dev->parent, buf->fw_id);
- if (!retval)
- retval = fw_map_pages_buf(buf);
-
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
fw_priv->buf = NULL;
mutex_unlock(&fw_lock);
+ if (direct_load)
+ goto err_put_dev;
+
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
}
#ifdef CONFIG_PM_SLEEP
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
+
static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
return fce;
}
-static int fw_cache_piggyback_on_request(const char *name)
+static int __fw_entry_found(const char *name)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
- int ret = 0;
- spin_lock(&fwc->name_lock);
list_for_each_entry(fce, &fwc->fw_names, list) {
if (!strcmp(fce->name, name))
- goto found;
+ return 1;
}
+ return 0;
+}
+
+static int fw_cache_piggyback_on_request(const char *name)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+ int ret = 0;
+
+ spin_lock(&fwc->name_lock);
+ if (__fw_entry_found(name))
+ goto found;
fce = alloc_fw_cache_entry(name);
if (fce) {
free_fw_cache_entry(fce);
}
-
- spin_lock(&fwc->name_lock);
- fwc->cnt--;
- spin_unlock(&fwc->name_lock);
-
- wake_up(&fwc->wait_queue);
}
/* called with dev->devres_lock held */
list_del(&fce->list);
spin_lock(&fwc->name_lock);
- fwc->cnt++;
- list_add(&fce->list, &fwc->fw_names);
+ /* only one cache entry for one firmware */
+ if (!__fw_entry_found(fce->name)) {
+ list_add(&fce->list, &fwc->fw_names);
+ } else {
+ free_fw_cache_entry(fce);
+ fce = NULL;
+ }
spin_unlock(&fwc->name_lock);
- async_schedule(__async_dev_cache_fw_image, (void *)fce);
+ if (fce)
+ async_schedule_domain(__async_dev_cache_fw_image,
+ (void *)fce,
+ &fw_cache_domain);
}
}
pr_debug("%s\n", __func__);
+ /* cancel uncache work */
+ cancel_delayed_work_sync(&fwc->work);
+
/*
* use small loading timeout for caching devices' firmware
* because all these firmware images have been loaded
mutex_unlock(&fw_lock);
/* wait for completion of caching firmware for all devices */
- spin_lock(&fwc->name_lock);
- for (;;) {
- prepare_to_wait(&fwc->wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!fwc->cnt)
- break;
-
- spin_unlock(&fwc->name_lock);
-
- schedule();
-
- spin_lock(&fwc->name_lock);
- }
- spin_unlock(&fwc->name_lock);
- finish_wait(&fwc->wait_queue, &wait);
+ async_synchronize_full_domain(&fw_cache_domain);
loading_timeout = old_timeout;
}
#ifdef CONFIG_PM_SLEEP
spin_lock_init(&fw_cache.name_lock);
INIT_LIST_HEAD(&fw_cache.fw_names);
- fw_cache.cnt = 0;
- init_waitqueue_head(&fw_cache.wait_queue);
INIT_DELAYED_WORK(&fw_cache.work,
device_uncache_fw_images_work);
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
+#ifdef CONFIG_SPARC
+ /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+ if (!dev || num >= dev->archdata.num_irqs)
+ return -ENXIO;
+ return dev->archdata.irqs[num];
+#else
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
+#endif
}
EXPORT_SYMBOL_GPL(platform_get_irq);
cpuidle_drv = cpuidle_driver_ref();
if (!cpuidle_drv) {
ret = -ENODEV;
- goto out;
+ goto err_drv;
}
if (cpuidle_drv->state_count <= state) {
ret = -EINVAL;
err:
cpuidle_driver_unref();
+
+ err_drv:
+ kfree(cpu_data);
goto out;
}
if (ancestor)
error = dev_pm_qos_add_request(ancestor, req, value);
- if (error)
+ if (error < 0)
req->dev = NULL;
return error;
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
static void bcma_unregister_cores(struct bcma_bus *bus)
{
- struct bcma_device *core;
+ struct bcma_device *core, *tmp;
- list_for_each_entry(core, &bus->cores, list) {
+ list_for_each_entry_safe(core, tmp, &bus->cores, list) {
+ list_del(&core->list);
if (core->dev_registered)
device_unregister(&core->dev);
}
config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
+ select CHECK_SIGNATURE
help
This is the driver for Compaq Smart Array 5xxx controllers.
Everyone using these boards should say Y here.
module will be called DAC960.
config BLK_DEV_UMEM
- tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Micro Memory MM5415 Battery Backed RAM support"
+ depends on PCI
---help---
Saying Y here will include support for the MM5415 family of
battery backed (Non-volatile) RAM cards.
a disc is opened for writing.
config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching (EXPERIMENTAL)"
- depends on CDROM_PKTCDVD && EXPERIMENTAL
+ bool "Enable write caching"
+ depends on CDROM_PKTCDVD
help
If enabled, write caching will be set for the CD-R/W device. For now
this option is dangerous unless the CD-RW media is known good, as we
config VIRTIO_BLK
- tristate "Virtio block driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && VIRTIO
+ tristate "Virtio block driver"
+ depends on VIRTIO
---help---
This is the virtual block driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
- depends on INET && EXPERIMENTAL && BLOCK
+ depends on INET && BLOCK
select CEPH_LIB
select LIBCRC32C
select CRYPTO_AES
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
- q->request_fn(q);
+ __blk_run_queue(q);
}
static void
return;
}
/* write all data in the battery backed cache to disk */
- memset(flush_buf, 0, 4);
return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
4, 0, CTLR_LUNID, TYPE_CMD);
kfree(flush_buf);
static struct platform_device floppy_device[N_DRIVE];
+static bool floppy_available(int drive)
+{
+ if (!(allowed_drive_mask & (1 << drive)))
+ return false;
+ if (fdc_state[FDC(drive)].version == FDC_NONE)
+ return false;
+ return true;
+}
+
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = (*part & 3) | ((*part & 0x80) >> 5);
- if (drive >= N_DRIVE ||
- !(allowed_drive_mask & (1 << drive)) ||
- fdc_state[FDC(drive)].version == FDC_NONE)
+ if (drive >= N_DRIVE || !floppy_available(drive))
return NULL;
if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
return NULL;
static int __init do_floppy_init(void)
{
- int i, unit, drive;
- int err, dr;
+ int i, unit, drive, err;
set_debugt();
interruptjiffies = resultjiffies = jiffies;
raw_cmd = NULL;
- for (dr = 0; dr < N_DRIVE; dr++) {
- disks[dr] = alloc_disk(1);
- if (!disks[dr]) {
- err = -ENOMEM;
- goto out_put_disk;
- }
+ floppy_wq = alloc_ordered_workqueue("floppy", 0);
+ if (!floppy_wq)
+ return -ENOMEM;
- floppy_wq = alloc_ordered_workqueue("floppy", 0);
- if (!floppy_wq) {
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ disks[drive] = alloc_disk(1);
+ if (!disks[drive]) {
err = -ENOMEM;
goto out_put_disk;
}
- disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
- if (!disks[dr]->queue) {
+ disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
+ if (!disks[drive]->queue) {
err = -ENOMEM;
- goto out_destroy_workq;
+ goto out_put_disk;
}
- blk_queue_max_hw_sectors(disks[dr]->queue, 64);
- disks[dr]->major = FLOPPY_MAJOR;
- disks[dr]->first_minor = TOMINOR(dr);
- disks[dr]->fops = &floppy_fops;
- sprintf(disks[dr]->disk_name, "fd%d", dr);
+ blk_queue_max_hw_sectors(disks[drive]->queue, 64);
+ disks[drive]->major = FLOPPY_MAJOR;
+ disks[drive]->first_minor = TOMINOR(drive);
+ disks[drive]->fops = &floppy_fops;
+ sprintf(disks[drive]->disk_name, "fd%d", drive);
- init_timer(&motor_off_timer[dr]);
- motor_off_timer[dr].data = dr;
- motor_off_timer[dr].function = motor_off_callback;
+ init_timer(&motor_off_timer[drive]);
+ motor_off_timer[drive].data = drive;
+ motor_off_timer[drive].function = motor_off_callback;
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
}
for (drive = 0; drive < N_DRIVE; drive++) {
- if (!(allowed_drive_mask & (1 << drive)))
- continue;
- if (fdc_state[FDC(drive)].version == FDC_NONE)
+ if (!floppy_available(drive))
continue;
floppy_device[drive].name = floppy_device_name;
err = platform_device_register(&floppy_device[drive]);
if (err)
- goto out_release_dma;
+ goto out_remove_drives;
err = device_create_file(&floppy_device[drive].dev,
&dev_attr_cmos);
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
+out_remove_drives:
+ while (drive--) {
+ if (floppy_available(drive)) {
+ del_gendisk(disks[drive]);
+ device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
+ platform_device_unregister(&floppy_device[drive]);
+ }
+ }
out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
-out_destroy_workq:
- destroy_workqueue(floppy_wq);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
- while (dr--) {
- del_timer_sync(&motor_off_timer[dr]);
- if (disks[dr]->queue) {
- blk_cleanup_queue(disks[dr]->queue);
- /*
- * put_disk() is not paired with add_disk() and
- * will put queue reference one extra time. fix it.
- */
- disks[dr]->queue = NULL;
+ destroy_workqueue(floppy_wq);
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ if (!disks[drive])
+ break;
+ if (disks[drive]->queue) {
+ del_timer_sync(&motor_off_timer[drive]);
+ blk_cleanup_queue(disks[drive]->queue);
+ disks[drive]->queue = NULL;
}
- put_disk(disks[dr]);
+ put_disk(disks[drive]);
}
return err;
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
+ destroy_workqueue(floppy_wq);
+
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
- if ((allowed_drive_mask & (1 << drive)) &&
- fdc_state[FDC(drive)].version != FDC_NONE) {
+ if (floppy_available(drive)) {
del_gendisk(disks[drive]);
device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
platform_device_unregister(&floppy_device[drive]);
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
- destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
if (lo->lo_state != Lo_bound)
return -ENXIO;
- if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
- return -EBUSY;
+ /*
+ * If we've explicitly asked to tear down the loop device,
+ * and it has an elevated reference count, set it for auto-teardown when
+ * the last reference goes away. This stops $!~#$@ udev from
+ * preventing teardown because it decided that it needs to run blkid on
+ * the loopback device whenever they appear. xfstests is notorious for
+ * failing tests because blkid via udev races with a losetup
+ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+ * command to fail with EBUSY.
+ */
+ if (lo->lo_refcnt > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+ }
if (filp == NULL)
return -EINVAL;
struct mtip_cmd *command;
int tag, cmdto_cnt = 0;
unsigned int bit, group;
- unsigned int num_command_slots = port->dd->slot_groups * 32;
+ unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+ num_command_slots = port->dd->slot_groups * 32;
for (tag = 0; tag < num_command_slots; tag++) {
/*
}
return rv;
}
-
-static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+static void mtip_set_timeout(struct driver_data *dd,
+ struct host_to_dev_fis *fis,
+ unsigned int *timeout, u8 erasemode)
{
switch (fis->command) {
case ATA_CMD_DOWNLOAD_MICRO:
break;
case ATA_CMD_SEC_ERASE_UNIT:
case 0xFC:
- *timeout = 240000; /* 4 minutes */
+ if (erasemode)
+ *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+ else
+ *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
break;
case ATA_CMD_STANDBYNOW1:
*timeout = 120000; /* 2 minutes */
unsigned int transfer_size;
unsigned long task_file_data;
int intotal = outtotal + req_task->out_size;
+ int erasemode = 0;
taskout = req_task->out_size;
taskin = req_task->in_size;
fis.lba_hi,
fis.device);
- mtip_set_timeout(&fis, &timeout);
+ /* check for erase mode support during secure erase.*/
+ if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
+ (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+ erasemode = 1;
+ }
+
+ mtip_set_timeout(dd, &fis, &timeout, erasemode);
/* Determine the correct transfer size.*/
if (force_single_sector)
* return value
* None
*/
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
void *data, int dir)
{
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ u64 start = sector;
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
fis->opts = 1 << 7;
fis->command =
(dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
- *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
- *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+ fis->lba_low = start & 0xFF;
+ fis->lba_mid = (start >> 8) & 0xFF;
+ fis->lba_hi = (start >> 16) & 0xFF;
+ fis->lba_low_ex = (start >> 24) & 0xFF;
+ fis->lba_mid_ex = (start >> 32) & 0xFF;
+ fis->lba_hi_ex = (start >> 40) & 0xFF;
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
/* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
+/* check for erase mode support during secure erase */
+#define MTIP_SEC_ERASE_MODE 0x2
+
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
MTIP_DDF_REBUILD_FAILED_BIT = 8,
};
-__packed struct smart_attr{
+struct smart_attr {
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
-};
+} __packed;
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
struct block_device *bdev;
/* Cached size parameter. */
sector_t size;
- bool flush_support;
- bool discard_secure;
+ unsigned int flush_support:1;
+ unsigned int discard_secure:1;
};
struct backend_info;
{
struct xen_blkif *blkif;
- blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
+ blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif)
return ERR_PTR(-ENOMEM);
- memset(blkif, 0, sizeof(*blkif));
blkif->domid = domid;
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
}
}
-void xen_blkif_free(struct xen_blkif *blkif)
+static void xen_blkif_free(struct xen_blkif *blkif)
{
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
VBD_SHOW(mode, "%s\n", be->mode);
-int xenvbd_sysfs_addif(struct xenbus_device *dev)
+static int xenvbd_sysfs_addif(struct xenbus_device *dev)
{
int error;
return error;
}
-void xenvbd_sysfs_delif(struct xenbus_device *dev)
+static void xenvbd_sysfs_delif(struct xenbus_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
device_remove_file(&dev->dev, &dev_attr_mode);
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_data/omap_ocp2scp.h>
+
+/**
+ * _count_resources - count for the number of resources
+ * @res: struct resource *
+ *
+ * Count and return the number of resources populated for the device that is
+ * connected to ocp2scp.
+ */
+static unsigned _count_resources(struct resource *res)
+{
+ int cnt = 0;
+
+ while (res->start != res->end) {
+ cnt++;
+ res++;
+ }
+
+ return cnt;
+}
static int ocp2scp_remove_devices(struct device *dev, void *c)
{
static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
{
- int ret;
- struct device_node *np = pdev->dev.of_node;
+ int ret;
+ unsigned res_cnt, i;
+ struct device_node *np = pdev->dev.of_node;
+ struct platform_device *pdev_child;
+ struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_ocp2scp_dev *dev;
if (np) {
ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
if (ret) {
- dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n");
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
goto err0;
}
+ } else if (pdata) {
+ for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++,
+ dev++) {
+ res_cnt = _count_resources(dev->res);
+
+ pdev_child = platform_device_alloc(dev->drv_name,
+ PLATFORM_DEVID_AUTO);
+ if (!pdev_child) {
+ dev_err(&pdev->dev,
+ "failed to allocate mem for ocp2scp child\n");
+ goto err0;
+ }
+
+ ret = platform_device_add_resources(pdev_child,
+ dev->res, res_cnt);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
+ goto err1;
+ }
+
+ pdev_child->dev.parent = &pdev->dev;
+
+ ret = platform_device_add(pdev_child);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register ocp2scp child device\n");
+ goto err1;
+ }
+ }
+ } else {
+ dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n");
+ return -EINVAL;
}
+
pm_runtime_enable(&pdev->dev);
return 0;
+err1:
+ platform_device_put(pdev_child);
+
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
If unsure, say Y.
config HW_RANDOM_IXP4XX
- tristate "Intel IXP4xx NPU HW Random Number Generator support"
+ tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
depends on HW_RANDOM && ARCH_IXP4XX
default HW_RANDOM
---help---
- This driver provides kernel-side support for the Random
- Number Generator hardware found on the Intel IXP4xx NPU.
+ This driver provides kernel-side support for the Pseudo-Random
+ Number Generator hardware found on the Intel IXP45x/46x NPU.
To compile this driver as a module, choose M here: the
module will be called ixp4xx-rng.
void __iomem * rng_base;
int err;
+ if (!cpu_is_ixp46x()) /* includes IXP455 */
+ return -ENOSYS;
+
rng_base = ioremap(0x70002100, 4);
if (!rng_base)
return -ENOMEM;
module_exit(ixp4xx_rng_exit);
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
+MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
MODULE_LICENSE("GPL");
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int old_camera_power;
static int sonypi_suspend(struct device *dev)
CLK_IS_ROOT|CLK_IGNORE_UNUSED,
32768);
clk_register_clkdev(clk, "clk32k", NULL);
- clk_register_clkdev(clk, NULL, "rtc-pl031");
+ clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
/* PRCMU clocks */
fw_version = prcmu_get_fw_version();
clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
BIT(2), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
+
clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
BIT(3), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp0");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
+
clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
BIT(4), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp1");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
BIT(5), 0);
clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
BIT(6), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
BIT(7), 0);
clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
BIT(8), 0);
+ clk_register_clkdev(clk, "apb_pclk", "slimbus0");
clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
BIT(9), 0);
clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
BIT(10), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
+
clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
BIT(11), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp3");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
BIT(0), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
BIT(1), 0);
clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
BIT(5), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp2");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi3");
clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
BIT(1), 0);
+ clk_register_clkdev(clk, "apb_pclk", "ssp0");
+
clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
BIT(2), 0);
+ clk_register_clkdev(clk, "apb_pclk", "ssp1");
+
clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
BIT(3), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
BIT(4), 0);
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp0");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
+
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp1");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.2");
+
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
- /* FIXME: Redefinition of BIT(3). */
+ U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "slimbus0");
+
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.4");
+
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp3");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
/* Periph2 */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.3");
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp2");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
/* Periph3 */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "ssp0");
+
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "ssp1");
+
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.0");
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);
cpufreq_update_policy(cpu);
break;
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu);
break;
case CPU_DEAD:
* http://www.gnu.org/licenses/gpl.html
*
* Maintainer:
- * Andreas Herrmann <andreas.herrmann3@amd.com>
+ * Andreas Herrmann <herrmann.der.user@googlemail.com>
*
* Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones on behalf of SuSE Labs
struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
.relation = relation };
- /*
- * Must run on @pol->cpu. cpufreq core is responsible for ensuring
- * that we're bound to the current CPU and pol->cpu stays online.
- */
- if (smp_processor_id() == pol->cpu)
- return powernowk8_target_fn(&pta);
- else
- return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
/* Driver entry point to verify the policy and range of frequencies */
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
- case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
- case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
- case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
+ case 16: keylen_cfg = MOD_AES128; break;
+ case 24: keylen_cfg = MOD_AES192; break;
+ case 32: keylen_cfg = MOD_AES256; break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else if (cipher_cfg & MOD_3DES) {
Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Atmel AT32ap7000.
+config DW_DMAC_BIG_ENDIAN_IO
+ bool "Use big endian I/O register access"
+ default y if AVR32
+ depends on DW_DMAC
+ help
+ Say yes here to use big endian I/O access when reading and writing
+ to the DMA controller registers. This is needed on some platforms,
+ like the Atmel AVR32 architecture.
+
+ If unsure, use the default setting.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
u32 DW_PARAMS;
};
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+#define dma_readl_native ioread32be
+#define dma_writel_native iowrite32be
+#else
+#define dma_readl_native readl
+#define dma_writel_native writel
+#endif
+
/* To access the registers in early stage of probe */
#define dma_read_byaddr(addr, name) \
- readl((addr) + offsetof(struct dw_dma_regs, name))
+ dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
/* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */
}
#define channel_readl(dwc, name) \
- readl(&(__dwc_regs(dwc)->name))
+ dma_readl_native(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \
- writel((val), &(__dwc_regs(dwc)->name))
+ dma_writel_native((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{
}
#define dma_readl(dw, name) \
- readl(&(__dw_regs(dw)->name))
+ dma_readl_native(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \
- writel((val), &(__dw_regs(dw)->name))
+ dma_writel_native((val), &(__dw_regs(dw)->name))
#define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask))
slot = i;
break;
}
- if (slot < 0)
+ if (slot < 0) {
+ spin_unlock_irqrestore(&imxdma->lock, flags);
return -EBUSY;
+ }
imxdma->slots_2d[slot].xsr = d->x;
imxdma->slots_2d[slot].ysr = d->y;
sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
node);
/* Move the first queued descriptor to active list */
- list_move_tail(&schan->queued, &schan->active);
+ list_move_tail(&sdesc->node, &schan->active);
/* Start the DMA transfer */
writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
unsigned long iflags;
int ret;
- if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+ if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
ret = -EINVAL;
goto err_dir;
}
* memory controller and apply to register. Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that. If at last entry, turn off DRAM scrubbing.
+ *
+ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
+ * by falling back to the last element in scrubrates[].
*/
- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
/*
* skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58)
if (scrubrates[i].bandwidth <= new_bw)
break;
-
- /*
- * if no suitable bandwidth found, turn off DRAM scrubbing
- * entirely by falling back to the last element in the
- * scrubrates array.
- */
}
scrubval = scrubrates[i].scrubval;
* detection. The mods to Rev F required more family
* information detection.
*
- * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ * Changes/Fixes by Borislav Petkov <bp@alien8.de>:
* - misc fixes and code cleanups
*
* This module is based on the following documents
*
* 2007 (c) MontaVista Software, Inc.
* 2010 (c) Advanced Micro Devices Inc.
- * Borislav Petkov <borislav.petkov@amd.com>
+ * Borislav Petkov <bp@alien8.de>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* This file may be distributed under the terms of the GNU General Public
* License version 2.
*
- * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com>
+ * Copyright (c) 2010: Borislav Petkov <bp@alien8.de>
* Advanced Micro Devices Inc.
*/
module_exit(edac_exit_mce_inject);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>");
+MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
*
*/
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
err = request_any_context_irq(data->irq, adc_jack_irq_thread,
pdata->irq_flags, pdata->name, data);
- if (err) {
+ if (err < 0) {
dev_err(&pdev->dev, "error: irq %d\n", data->irq);
- err = -EINVAL;
goto err_irq;
}
- goto out;
+ return 0;
err_irq:
extcon_dev_unregister(&data->edev);
};
module_platform_driver(adc_jack_driver);
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("ADC Jack extcon driver");
+MODULE_LICENSE("GPL v2");
* every single port-type of the following cable names. Please choose cable
* names that are actually used in your extcon device.
*/
-const char *extcon_cable_name[] = {
+const char extcon_cable_name[][CABLE_NAME_MAX + 1] = {
[EXTCON_USB] = "USB",
[EXTCON_USB_HOST] = "USB-Host",
[EXTCON_TA] = "TA",
[EXTCON_VIDEO_IN] = "Video-in",
[EXTCON_VIDEO_OUT] = "Video-out",
[EXTCON_MECHANICAL] = "Mechanical",
-
- NULL,
};
static struct class *extcon_class;
return 0;
for (i = 0; edev->mutually_exclusive[i]; i++) {
- int count = 0, j;
+ int weight;
u32 correspondants = new_state & edev->mutually_exclusive[i];
- u32 exp = 1;
-
- for (j = 0; j < 32; j++) {
- if (exp & correspondants)
- count++;
- if (count > 1)
- return i + 1;
- exp <<= 1;
- }
+
+ /* calculate the total number of bits set */
+ weight = hweight32(correspondants);
+ if (weight > 1)
+ return i + 1;
}
return 0;
EXPORT_SYMBOL_GPL(extcon_get_cable_state);
/**
- * extcon_get_cable_state_() - Set the status of a specific cable.
+ * extcon_set_cable_state_() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
* @index: cable index that can be retrieved by extcon_find_cable_index().
* @cable_state: the new cable status. The default semantics is
EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
/**
- * extcon_get_cable_state() - Set the status of a specific cable.
+ * extcon_set_cable_state() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
* @cable_name: cable name.
* @cable_state: the new cable status. The default semantics is
* extcon device.
* @obj: an empty extcon_specific_cable_nb object to be returned.
* @extcon_name: the name of extcon device.
+ * if NULL, extcon_register_interest will register
+ * every cable with the target cable_name given.
* @cable_name: the target cable name.
* @nb: the notifier block to get notified.
*
const char *extcon_name, const char *cable_name,
struct notifier_block *nb)
{
- if (!obj || !extcon_name || !cable_name || !nb)
+ if (!obj || !cable_name || !nb)
return -EINVAL;
- obj->edev = extcon_get_extcon_dev(extcon_name);
- if (!obj->edev)
- return -ENODEV;
+ if (extcon_name) {
+ obj->edev = extcon_get_extcon_dev(extcon_name);
+ if (!obj->edev)
+ return -ENODEV;
- obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
- if (obj->cable_index < 0)
- return -ENODEV;
+ obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
+ if (obj->cable_index < 0)
+ return -ENODEV;
+
+ obj->user_nb = nb;
- obj->user_nb = nb;
+ obj->internal_nb.notifier_call = _call_per_cable;
- obj->internal_nb.notifier_call = _call_per_cable;
+ return raw_notifier_chain_register(&obj->edev->nh, &obj->internal_nb);
+ } else {
+ struct class_dev_iter iter;
+ struct extcon_dev *extd;
+ struct device *dev;
+
+ if (!extcon_class)
+ return -ENODEV;
+ class_dev_iter_init(&iter, extcon_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ extd = (struct extcon_dev *)dev_get_drvdata(dev);
+
+ if (extcon_find_cable_index(extd, cable_name) < 0)
+ continue;
+
+ class_dev_iter_exit(&iter);
+ return extcon_register_interest(obj, extd->name,
+ cable_name, nb);
+ }
- return raw_notifier_chain_register(&obj->edev->nh, &obj->internal_nb);
+ return -ENODEV;
+ }
}
/**
return 0;
}
-static void extcon_cleanup(struct extcon_dev *edev, bool skip)
-{
- mutex_lock(&extcon_dev_list_lock);
- list_del(&edev->entry);
- mutex_unlock(&extcon_dev_list_lock);
-
- if (!skip && get_device(edev->dev)) {
- int index;
-
- if (edev->mutually_exclusive && edev->max_supported) {
- for (index = 0; edev->mutually_exclusive[index];
- index++)
- kfree(edev->d_attrs_muex[index].attr.name);
- kfree(edev->d_attrs_muex);
- kfree(edev->attrs_muex);
- }
-
- for (index = 0; index < edev->max_supported; index++)
- kfree(edev->cables[index].attr_g.name);
-
- if (edev->max_supported) {
- kfree(edev->extcon_dev_type.groups);
- kfree(edev->cables);
- }
-
- device_unregister(edev->dev);
- put_device(edev->dev);
- }
-
- kfree(edev->dev);
-}
-
static void extcon_dev_release(struct device *dev)
{
- struct extcon_dev *edev = (struct extcon_dev *) dev_get_drvdata(dev);
-
- extcon_cleanup(edev, true);
+ kfree(dev);
}
static const char *muex_name = "mutually_exclusive";
*/
void extcon_dev_unregister(struct extcon_dev *edev)
{
- extcon_cleanup(edev, false);
+ int index;
+
+ mutex_lock(&extcon_dev_list_lock);
+ list_del(&edev->entry);
+ mutex_unlock(&extcon_dev_list_lock);
+
+ if (IS_ERR_OR_NULL(get_device(edev->dev))) {
+ dev_err(edev->dev, "Failed to unregister extcon_dev (%s)\n",
+ dev_name(edev->dev));
+ return;
+ }
+
+ if (edev->mutually_exclusive && edev->max_supported) {
+ for (index = 0; edev->mutually_exclusive[index];
+ index++)
+ kfree(edev->d_attrs_muex[index].attr.name);
+ kfree(edev->d_attrs_muex);
+ kfree(edev->attrs_muex);
+ }
+
+ for (index = 0; index < edev->max_supported; index++)
+ kfree(edev->cables[index].attr_g.name);
+
+ if (edev->max_supported) {
+ kfree(edev->extcon_dev_type.groups);
+ kfree(edev->cables);
+ }
+
+#if defined(CONFIG_ANDROID)
+ if (switch_class)
+ class_compat_remove_link(switch_class, edev->dev, NULL);
+#endif
+ device_unregister(edev->dev);
+ put_device(edev->dev);
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
static void __exit extcon_class_exit(void)
{
+#if defined(CONFIG_ANDROID)
+ class_compat_unregister(switch_class);
+#endif
class_destroy(extcon_class);
}
module_exit(extcon_class_exit);
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/extcon.h>
static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
enum max77693_muic_adc_debounce_time time)
{
- int ret = 0;
- u8 ctrl3;
+ int ret;
switch (time) {
case ADC_DEBOUNCE_TIME_5MS:
case ADC_DEBOUNCE_TIME_10MS:
case ADC_DEBOUNCE_TIME_25MS:
case ADC_DEBOUNCE_TIME_38_62MS:
- ret = max77693_read_reg(info->max77693->regmap_muic,
- MAX77693_MUIC_REG_CTRL3, &ctrl3);
- ctrl3 &= ~CONTROL3_ADCDBSET_MASK;
- ctrl3 |= (time << CONTROL3_ADCDBSET_SHIFT);
-
- ret = max77693_write_reg(info->max77693->regmap_muic,
- MAX77693_MUIC_REG_CTRL3, ctrl3);
- if (ret) {
+ ret = max77693_update_reg(info->max77693->regmap_muic,
+ MAX77693_MUIC_REG_CTRL3,
+ time << CONTROL3_ADCDBSET_SHIFT,
+ CONTROL3_ADCDBSET_MASK);
+ if (ret)
dev_err(info->dev, "failed to set ADC debounce time\n");
- ret = -EINVAL;
- }
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
static int __devinit max77693_muic_probe(struct platform_device *pdev)
{
struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
+ struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
+ struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
struct max77693_muic_info *info;
int ret, i;
u8 id;
goto err_extcon;
}
+ /* Initialize MUIC register by using platform data */
+ for (i = 0 ; i < muic_pdata->num_init_data ; i++) {
+ enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR;
+
+ max77693_write_reg(info->max77693->regmap_muic,
+ muic_pdata->init_data[i].addr,
+ muic_pdata->init_data[i].data);
+
+ switch (muic_pdata->init_data[i].addr) {
+ case MAX77693_MUIC_REG_INTMASK1:
+ irq_src = MUIC_INT1;
+ break;
+ case MAX77693_MUIC_REG_INTMASK2:
+ irq_src = MUIC_INT2;
+ break;
+ case MAX77693_MUIC_REG_INTMASK3:
+ irq_src = MUIC_INT3;
+ break;
+ }
+
+ if (irq_src < MAX77693_IRQ_GROUP_NR)
+ info->max77693->irq_masks_cur[irq_src]
+ = muic_pdata->init_data[i].data;
+ }
+
/* Check revision number of MUIC device*/
ret = max77693_read_reg(info->max77693->regmap_muic,
MAX77693_MUIC_REG_ID, &id);
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
extcon_dev_unregister(info->edev);
+ kfree(info->edev);
kfree(info);
return 0;
static int max8997_muic_handle_charger_type_detach(
struct max8997_muic_info *info)
{
- int ret = 0;
-
switch (info->pre_charger_type) {
case MAX8997_CHARGER_TYPE_USB:
extcon_set_cable_state(info->edev, "USB", false);
extcon_set_cable_state(info->edev, "Fast-charger", false);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
break;
}
- return ret;
+ return 0;
}
static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->use_10_for_rw = 1;
+ sdev->no_report_opcodes = 1;
+ sdev->no_write_same = 1;
if (sbp2_param_exclusive_login)
sdev->manage_start_stop = 1;
config OF_GPIO
def_bool y
- depends on OF && !SPARC
+ depends on OF
config DEBUG_GPIO
bool "Debug GPIO calls"
config GPIO_ADNP
tristate "Avionic Design N-bit GPIO expander"
- depends on I2C && OF
+ depends on I2C && OF_GPIO
help
This option enables support for N GPIOs found on Avionic Design
I2C GPIO expanders. The register space will be extended by powers
}
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
- chip->buffer = devm_kzalloc(&spi->dev, chip->gpio_chip.ngpio, GFP_KERNEL);
+ chip->buffer = devm_kzalloc(&spi->dev, chip->registers, GFP_KERNEL);
if (!chip->buffer) {
ret = -ENOMEM;
goto exit_destroy;
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
{
break;
#endif /* CONFIG_SPI_MASTER */
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
case MCP_TYPE_008:
mcp->ops = &mcp23008_ops;
mcp->chip.ngpio = 8;
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static int __devinit mcp230xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
return mvchip->membase + GPIO_OUT_OFF;
}
+static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_BLINK_EN_OFF;
+}
+
static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_IO_CONF_OFF;
return (u >> pin) & 1;
}
+static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ unsigned long flags;
+ u32 u;
+
+ spin_lock_irqsave(&mvchip->lock, flags);
+ u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+ if (value)
+ u |= 1 << pin;
+ else
+ u &= ~(1 << pin);
+ writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
+ spin_unlock_irqrestore(&mvchip->lock, flags);
+}
+
static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
{
struct mvebu_gpio_chip *mvchip =
if (ret)
return ret;
+ mvebu_gpio_blink(chip, pin, 0);
+ mvebu_gpio_set(chip, pin, value);
+
spin_lock_irqsave(&mvchip->lock, flags);
u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
u &= ~(1 << pin);
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE,
+ irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
/* Setup irq domain on top of the generic chip. */
}
}
+/**
+ * _clear_gpio_debounce - clear debounce settings for a gpio
+ * @bank: the gpio bank we're acting upon
+ * @gpio: the gpio number on this @gpio
+ *
+ * If a gpio is using debounce, then clear the debounce enable bit and if
+ * this is the only gpio in this bank using debounce, then clear the debounce
+ * time too. The debounce clock will also be disabled when calling this function
+ * if this is the only gpio in the bank using debounce.
+ */
+static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
+{
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+
+ if (!bank->dbck_flag)
+ return;
+
+ if (!(bank->dbck_enable_mask & gpio_bit))
+ return;
+
+ bank->dbck_enable_mask &= ~gpio_bit;
+ bank->context.debounce_en &= ~gpio_bit;
+ __raw_writel(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
+
+ if (!bank->dbck_enable_mask) {
+ bank->context.debounce = 0;
+ __raw_writel(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ clk_disable(bank->dbck);
+ bank->dbck_enabled = false;
+ }
+}
+
static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
_set_gpio_irqenable(bank, gpio, 0);
_clear_gpio_irqstatus(bank, gpio);
_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
+ _clear_gpio_debounce(bank, gpio);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
- tgpio->last_ier &= ~(1 << offset);
+ tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
- tgpio->last_ier |= 1 << offset;
+ tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
}
*/
status = gpio_request(gpio, "sysfs");
- if (status < 0)
+ if (status < 0) {
+ if (status == -EPROBE_DEFER)
+ status = -ENODEV;
goto done;
-
+ }
status = gpio_export(gpio, true);
if (status < 0)
gpio_free(gpio);
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!gpio_is_valid(gpio)) {
+ status = -EINVAL;
goto done;
+ }
desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip == NULL)
size_t size;
int ret;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
size = mode_cmd.pitches[0] * mode_cmd.height;
obj = drm_gem_cma_create(dev, size);
- if (!obj)
+ if (IS_ERR(obj))
return -ENOMEM;
fbi = framebuffer_alloc(0, dev->dev);
int minor_id = iminor(inode);
struct drm_minor *minor;
int retcode = 0;
+ int need_setup = 0;
+ struct address_space *old_mapping;
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
if (drm_device_is_unplugged(dev))
return -ENODEV;
+ if (!dev->open_count++)
+ need_setup = 1;
+ mutex_lock(&dev->struct_mutex);
+ old_mapping = dev->dev_mapping;
+ if (old_mapping == NULL)
+ dev->dev_mapping = &inode->i_data;
+ /* ihold ensures nobody can remove inode with our i_data */
+ ihold(container_of(dev->dev_mapping, struct inode, i_data));
+ inode->i_mapping = dev->dev_mapping;
+ filp->f_mapping = dev->dev_mapping;
+ mutex_unlock(&dev->struct_mutex);
+
retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- if (!dev->open_count++)
- retcode = drm_setup(dev);
- }
- if (!retcode) {
- mutex_lock(&dev->struct_mutex);
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = &inode->i_data;
- /* ihold ensures nobody can remove inode with our i_data */
- ihold(container_of(dev->dev_mapping, struct inode, i_data));
- inode->i_mapping = dev->dev_mapping;
- filp->f_mapping = dev->dev_mapping;
- mutex_unlock(&dev->struct_mutex);
+ if (retcode)
+ goto err_undo;
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ if (need_setup) {
+ retcode = drm_setup(dev);
+ if (retcode)
+ goto err_undo;
}
+ return 0;
+err_undo:
+ mutex_lock(&dev->struct_mutex);
+ filp->f_mapping = old_mapping;
+ inode->i_mapping = old_mapping;
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+ dev->dev_mapping = old_mapping;
+ mutex_unlock(&dev->struct_mutex);
+ dev->open_count--;
return retcode;
}
EXPORT_SYMBOL(drm_open);
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
- seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
-
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
atomic_read(&obj->handle_count),
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev_set_drvdata(&platdev->dev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g1;
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
- depends on DRM && PLAT_SAMSUNG
+ depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
exynos_connector->encoder_id = encoder->base.id;
exynos_connector->manager = manager;
exynos_connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->dpms = DRM_MODE_DPMS_OFF;
connector->encoder = encoder;
err = drm_mode_connector_attach_encoder(connector, encoder);
* @manager: specific encoder has its own manager to control a hardware
* appropriately and we can access a hardware drawing on this manager.
* @dpms: store the encoder dpms value.
+ * @updated: indicate whether overlay data updating is needed or not.
*/
struct exynos_drm_encoder {
struct drm_crtc *old_crtc;
struct drm_encoder drm_encoder;
struct exynos_drm_manager *manager;
- int dpms;
+ int dpms;
+ bool updated;
};
static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
if (manager_ops && manager_ops->apply)
- manager_ops->apply(manager->dev);
+ if (!exynos_encoder->updated)
+ manager_ops->apply(manager->dev);
+
exynos_drm_connector_power(encoder, mode);
exynos_encoder->dpms = mode;
break;
case DRM_MODE_DPMS_OFF:
exynos_drm_connector_power(encoder, mode);
exynos_encoder->dpms = mode;
+ exynos_encoder->updated = false;
break;
default:
DRM_ERROR("unspecified mode %d\n", mode);
static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
- struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
struct exynos_drm_manager_ops *manager_ops = manager->ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
+
+ /*
+ * this will avoid one issue that overlay data is updated to
+ * real hardware two times.
+ * And this variable will be used to check if the data was
+ * already updated or not by exynos_drm_encoder_dpms function.
+ */
+ exynos_encoder->updated = true;
+
+ /*
+ * In case of setcrtc, there is no way to update encoder's dpms
+ * so update it here.
+ */
+ exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
manager_ops->dpms(manager->dev, mode);
/*
- * set current mode to new one so that data aren't updated into
- * registers by drm_helper_connector_dpms two times.
- *
- * in case that drm_crtc_helper_set_mode() is called,
- * overlay_ops->commit() and manager_ops->commit() callbacks
- * can be called two times, first at drm_crtc_helper_set_mode()
- * and second at drm_helper_connector_dpms().
- * so with this setting, when drm_helper_connector_dpms() is called
- * encoder->funcs->dpms() will be ignored.
- */
- exynos_encoder->dpms = mode;
-
- /*
* if this condition is ok then it means that the crtc is already
* detached from encoder and last function for detaching is properly
* done, so clear pipe from manager to prevent repeated call.
* because the setting for disabling the overlay will be updated
* at vsync.
*/
- if (overlay_ops->wait_for_vblank)
+ if (overlay_ops && overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
}
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
+ fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
+ offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
unsigned int timing_base;
};
-struct fimd_driver_data exynos4_fimd_driver_data = {
+static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
};
-struct fimd_driver_data exynos5_fimd_driver_data = {
+static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
};
return ret;
plane->crtc = crtc;
- plane->fb = crtc->fb;
exynos_plane_commit(plane);
exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
const struct of_device_id *match;
match = of_match_node(of_match_ptr(mixer_match_types),
pdev->dev.of_node);
- drv = match->data;
+ drv = (struct mixer_drv_data *)match->data;
} else {
drv = (struct mixer_drv_data *)
platform_get_device_id(pdev)->driver_data;
goto put_gmch;
}
- i915_kick_out_firmware_fb(dev_priv);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kick_out_firmware_fb(dev_priv);
pci_set_master(dev->pdev);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
+unsigned int i915_preliminary_hw_support __read_mostly = 0;
+module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+ "Enable preliminary hardware support. "
+ "Enable Haswell and ValleyView Support. "
+ "(default: false)");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
+ if (intel_info->is_haswell || intel_info->is_valleyview)
+ if(!i915_preliminary_hw_support) {
+ DRM_ERROR("Preliminary hardware support disabled\n");
+ return -ENODEV;
+ }
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
+extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
+ case -ENOSPC:
+ return VM_FAULT_SIGBUS;
default:
- WARN_ON_ONCE(ret);
+ WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
return VM_FAULT_SIGBUS;
}
}
sg_set_page(sg, page, PAGE_SIZE, 0);
}
+ obj->pages = st;
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
- obj->pages = st;
return 0;
err_pages:
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume %dbpp panel color "
- "depth.\n",
- dev_priv->edp.bpp);
- }
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
- /* eDP data */
- dev_priv->edp.bpp = 18;
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
int old_dpms;
/* PCH platforms and VLV only support on/off. */
- if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_md_reg;
- u32 adpa, dpll_md;
-
- dpll_md_reg = DPLL_MD(intel_crtc->pipe);
-
- /*
- * Disable separate mode multiplier used when cloning SDVO to CRT
- * XXX this needs to be adjusted when we really are cloning
- */
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- dpll_md = I915_READ(dpll_md_reg);
- I915_WRITE(dpll_md_reg,
- dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
- }
+ u32 adpa;
adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
}
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc && edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)
void (*hook)(struct drm_device *dev);
};
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_device *dev);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(dev);
+ }
}
/* Disable the VGA plane that we never use */
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count && voltage_tries == 5) {
- if (++loop_tries == 5) {
+ ++loop_tries;
+ if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
break;
}
}
/* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- voltage_tries = 0;
- } else
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ break;
+ }
+ } else
+ voltage_tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
{ } /* terminating entry */
};
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ if (IS_I830(dev)) {
+ /* Workaround: Don't disable the overlay fully, since otherwise
+ * it dies on the next OVERLAY_ON cmd. */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ } else {
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ }
intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
if (props.max_brightness == 0) {
- DRM_ERROR("Failed to get maximum backlight value\n");
+ DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;
}
dev_priv->backlight =
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- if (INTEL_INFO(dev)->gen == 5) {
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable rc6 on ilk if VT-d is on. */
- if (intel_iommu_gfx_mapped)
- return false;
-#endif
- DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
- return INTEL_RC6_ENABLE;
- }
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
if (IS_HASWELL(dev)) {
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
+
+ /*
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+ */
+ uint8_t dtd_sdvo_flags;
};
struct intel_sdvo_connector {
}
#endif
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned if_index, uint8_t tx_rate,
+ uint8_t *data, unsigned length)
+{
+ uint8_t set_buf_index[2] = { if_index, 0 };
+ uint8_t hbuf_size, tmp[8];
+ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ memset(tmp, 0, 8);
+ if (i < length)
+ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
{
struct dip_infoframe avi_if = {
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
- uint8_t set_buf_index[2] = { 1, 0 };
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
- uint64_t *data = (uint64_t *)sdvo_data;
- unsigned i;
intel_dip_infoframe_csum(&avi_if);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (i = 0; i < sizeof(sdvo_data); i += 8) {
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_DATA,
- data, 8))
- return false;
- data++;
- }
-
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_TXRATE,
- &tx_rate, 1);
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, sizeof(sdvo_data));
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
return false;
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
return true;
}
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.cloneable = true;
-
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
return true;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
- * as opposed to native LVDS, where we upscale with the panel-fitter
- * (and hence only the native LVDS resolution could be cloned). */
- intel_sdvo->base.cloneable = true;
-
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
return true;
}
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
- goto err;
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
}
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err;
+ goto err_output;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err;
+ goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
#define SDVO_CMD_SET_HBUF_INDEX 0x93
+ #define SDVO_HBUF_INDEX_ELD 0
+ #define SDVO_HBUF_INDEX_AVI_IF 1
#define SDVO_CMD_GET_HBUF_INDEX 0x94
#define SDVO_CMD_GET_HBUF_INFO 0x95
#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
nv_wo32(gpuobj, i, 0x00000000);
}
+ if (gpuobj->node) {
+ nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
+ &gpuobj->node);
+ }
+
if (gpuobj->heap.block_size)
nouveau_mm_fini(&gpuobj->heap);
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
- node->offset = roundup(offset, mm->block_size);
- node->length = rounddown(offset + length, mm->block_size) - node->offset;
+
+ if (length) {
+ node->offset = roundup(offset, mm->block_size);
+ node->length = rounddown(offset + length, mm->block_size);
+ node->length -= node->offset;
+ }
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
mm->heap_nodes++;
- mm->heap_size += length;
return 0;
}
int nodes = 0;
list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (nodes++ == mm->heap_nodes)
+ if (WARN_ON(nodes++ == mm->heap_nodes))
return -EBUSY;
}
* Authors: Ben Skeggs
*/
+#include <subdev/bar.h>
+
#include <engine/software.h>
#include <engine/disp.h>
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
+ struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
struct nouveau_software_chan *chan, *temp;
unsigned long flags;
if (chan->vblank.crtc != crtc)
continue;
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
+ if (nv_device(priv)->chipset >= 0xc0) {
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c,
+ upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010,
+ lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
} else {
- if (nv_device(priv)->chipset >= 0xc0) {
- nv_wr32(priv, 0x06000c,
- upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ bar->flush(bar);
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
}
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
}
list_del(&chan->vblank.head);
});
}
-void
+int
nv40_grctx_init(struct nouveau_device *device, u32 *size)
{
- u32 ctxprog[256], i;
+ u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
struct nouveau_grctx ctx = {
.device = device,
.mode = NOUVEAU_GRCTX_PROG,
.data = ctxprog,
- .ctxprog_max = ARRAY_SIZE(ctxprog)
+ .ctxprog_max = 256,
};
+ if (!ctxprog)
+ return -ENOMEM;
+
nv40_grctx_generate(&ctx);
nv_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
+
+ kfree(ctxprog);
+ return 0;
}
static int
nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
- struct nv04_graph_chan *chan = (void *)object;
+ struct nv40_graph_priv *priv = (void *)object->engine;
+ struct nv40_graph_chan *chan = (void *)object;
u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
int ret = 0;
return ret;
/* generate and upload context program */
- nv40_grctx_init(nv_device(priv), &priv->size);
+ ret = nv40_grctx_init(nv_device(priv), &priv->size);
+ if (ret)
+ return ret;
/* No context present currently */
nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
return !(0x0baf & (1 << (device->chipset & 0x0f)));
}
-void nv40_grctx_init(struct nouveau_device *, u32 *size);
+int nv40_grctx_init(struct nouveau_device *, u32 *size);
void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
#endif
};
struct nv40_mpeg_chan {
- struct nouveau_mpeg base;
+ struct nouveau_mpeg_chan base;
};
/*******************************************************************************
u32 block_size;
int heap_nodes;
- u32 heap_size;
};
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
return temp;
}
-static inline bool
-nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
+static inline int
+nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
{
+ unsigned char c1, c2;
+
while (len--) {
- if (nv_ro08(obj, addr++) != *(str++))
- return false;
+ c1 = nv_ro08(obj, addr++);
+ c2 = *(str++);
+ if (c1 != c2)
+ return c1 - c2;
}
- return true;
+ return 0;
}
#endif
int clk, struct nouveau_pll_vals *);
int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
struct nouveau_pll_vals *);
-
+int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+ int clk, struct nouveau_pll_vals *);
#endif
}
data = of_get_property(dn, "NVDA,BMP", &size);
- if (data) {
+ if (data && size) {
bios->size = size;
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data)
goto out;
bios->size = nv_rd08(bios, 0x700002) * 512;
+ if (!bios->size)
+ goto out;
+
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data) {
for (i = 0; i < bios->size; i++)
/* read entire bios image to system memory */
bios->size = nv_rd08(bios, 0x300002) * 512;
+ if (!bios->size)
+ goto out;
+
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data) {
for (i = 0; i < bios->size; i++)
{
struct pci_dev *pdev = nv_device(bios)->pdev;
int ret, cnt, i;
- u8 data[3];
- if (!nouveau_acpi_rom_supported(pdev))
+ if (!nouveau_acpi_rom_supported(pdev)) {
+ bios->data = NULL;
return;
+ }
bios->size = 0;
- if (nouveau_acpi_get_bios_chunk(data, 0, 3) == 3)
- bios->size = data[2] * 512;
+ bios->data = kmalloc(4096, GFP_KERNEL);
+ if (bios->data) {
+ if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
+ bios->size = bios->data[2] * 512;
+ kfree(bios->data);
+ }
+
+ if (!bios->size)
+ return;
bios->data = kmalloc(bios->size, GFP_KERNEL);
for (i = 0; bios->data && i < bios->size; i += cnt) {
static int
nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
{
- if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
+ if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
+ bios->data[1] != 0xAA) {
nv_info(bios, "... signature not found\n");
return 0;
}
- if (nvbios_checksum(bios->data, bios->data[2] * 512)) {
+ if (nvbios_checksum(bios->data,
+ min_t(u32, bios->data[2] * 512, bios->size))) {
nv_info(bios, "... checksum invalid\n");
/* if a ro image is somewhat bad, it's probably all rubbish */
return writeable ? 2 : 1;
}
} else
if (*ver >= 0x15) {
- if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
+ if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
u16 i2c = nv_ro16(bios, dcb + 2);
*hdr = 4;
*cnt = (i2c - dcb) / 10;
while (map->reg) {
if (map->reg == reg && *ver >= 0x20) {
u16 addr = (data += hdr);
+ *type = map->type;
while (cnt--) {
- if (nv_ro32(bios, data) == map->reg) {
- *type = map->type;
+ if (nv_ro32(bios, data) == map->reg)
return data;
- }
data += *len;
}
return addr;
while (map->reg) {
if (map->type == type && *ver >= 0x20) {
u16 addr = (data += hdr);
+ *reg = map->reg;
while (cnt--) {
- if (nv_ro32(bios, data) == map->reg) {
- *reg = map->reg;
+ if (nv_ro32(bios, data) == map->reg)
return data;
- }
data += *len;
}
return addr;
return ret;
}
+int
+nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+ int clk, struct nouveau_pll_vals *pv)
+{
+ int ret, N, M, P;
+
+ ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
+
+ if (ret > 0) {
+ pv->refclk = info->refclk;
+ pv->N1 = N;
+ pv->M1 = M;
+ pv->log2P = P;
+ }
+ return ret;
+}
+
+
static int
nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
return ret;
priv->base.pll_set = nva3_clock_pll_set;
+ priv->base.pll_calc = nva3_clock_pll_calc;
return 0;
}
return ret;
priv->base.pll_set = nvc0_clock_pll_set;
+ priv->base.pll_calc = nva3_clock_pll_calc;
return 0;
}
((priv->base.ram.size & 0x000000ff) << 32);
tags = nv_rd32(priv, 0x100320);
- if (tags) {
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
+ ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
+ if (ret)
+ return ret;
- nv_debug(priv, "%d compression tags\n", tags);
- }
+ nv_debug(priv, "%d compression tags\n", tags);
size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
switch (device->chipset) {
return ret;
priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
+ priv->base.ram.type = NV_MEM_TYPE_STOLEN;
break;
default:
ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
case DCB_I2C_NVIO_BIT:
port->drive = info.drive & 0x0f;
if (device->card_type < NV_D0) {
- if (info.drive >= ARRAY_SIZE(nv50_i2c_port))
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
break;
port->drive = nv50_i2c_port[port->drive];
port->sense = port->drive;
static void
nv41_vm_flush(struct nouveau_vm *vm)
{
- struct nv04_vm_priv *priv = (void *)vm->vmm;
+ struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
mutex_lock(&nv_subdev(priv)->mutex);
nv_wr32(priv, 0x100810, 0x00000022);
struct nv04_vmmgr_priv *priv;
int ret;
- if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+ if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+ !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
data, size, pobject);
}
struct nv04_vmmgr_priv *priv;
int ret;
- if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+ if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+ !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
data, size, pobject);
}
if (unlikely(!abi16))
return -ENOMEM;
+
+ if (!drm->channel)
+ return nouveau_abi16_put(abi16, -ENODEV);
+
client = nv_client(abi16->client);
if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
- if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+ if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
status = connector_status_connected;
goto out;
}
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct nouveau_display *disp;
+ u32 pclass = dev->pdev->class >> 8;
int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- if (nv_device(drm->device)->card_type < NV_D0)
- ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
- if (ret)
- goto disp_create_err;
-
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (nouveau_modeset == 1 ||
+ (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
+ if (nv_device(drm->device)->card_type < NV_50)
+ ret = nv04_display_create(dev);
+ else
+ if (nv_device(drm->device)->card_type < NV_D0)
+ ret = nv50_display_create(dev);
+ else
+ ret = nvd0_display_create(dev);
if (ret)
- goto vblank_err;
+ goto disp_create_err;
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto vblank_err;
+ }
+
+ nouveau_backlight_init(dev);
}
- nouveau_backlight_init(dev);
return 0;
vblank_err:
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
- disp->dtor(dev);
+ if (disp->dtor)
+ disp->dtor(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
if (ret)
goto fail;
- ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
- if (ret)
- goto fail_unreserve;
+ if (likely(old_bo != new_bo)) {
+ ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_unreserve;
+ }
return 0;
nouveau_bo_fence(new_bo, fence);
ttm_bo_unreserve(&new_bo->bo);
- nouveau_bo_fence(old_bo, fence);
- ttm_bo_unreserve(&old_bo->bo);
+ if (likely(old_bo != new_bo)) {
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
+ }
nouveau_bo_unpin(old_bo);
}
static int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
-MODULE_PARM_DESC(modeset, "enable driver");
-static int nouveau_modeset = -1;
+MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
+ "0 = disabled, 1 = enabled, 2 = headless)");
+int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+ else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
else ret = nvc0_fence_create(drm);
if (ret) {
nouveau_pm_fini(dev);
- nouveau_display_fini(dev);
+ if (dev->mode_config.num_crtc)
+ nouveau_display_fini(dev);
nouveau_display_destroy(dev);
nouveau_irq_fini(dev);
pm_state.event == PM_EVENT_PRETHAW)
return 0;
- NV_INFO(drm, "suspending fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "suspending fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
- NV_INFO(drm, "suspending display...\n");
- ret = nouveau_display_suspend(dev);
- if (ret)
- return ret;
+ NV_INFO(drm, "suspending display...\n");
+ ret = nouveau_display_suspend(dev);
+ if (ret)
+ return ret;
+ }
NV_INFO(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
nouveau_client_init(&cli->base);
}
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "resuming display...\n");
+ nouveau_display_resume(dev);
+ }
return ret;
}
nouveau_irq_postinstall(dev);
nouveau_pm_resume(dev);
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "resuming display...\n");
+ nouveau_display_resume(dev);
+ }
return 0;
}
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
nouveau_modeset = 0;
- else
#endif
- nouveau_modeset = 1;
}
if (!nouveau_modeset)
nv_info((cli), fmt, ##args); \
} while (0)
+extern int nouveau_modeset;
+
#endif
nv_subdev(pmc)->intr(nv_subdev(pmc));
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
+ if (dev->mode_config.num_crtc) {
+ if (device->card_type >= NV_D0) {
+ if (nv_rd32(device, 0x000100) & 0x04000000)
+ nvd0_display_intr(dev);
+ } else
+ if (device->card_type >= NV_50) {
+ if (nv_rd32(device, 0x000100) & 0x04000000)
+ nv50_display_intr(dev);
+ }
}
return IRQ_HANDLED;
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
- NV_INFO(drm, "Load detected on head A\n");
+ NV_DEBUG(drm, "Load detected on head A\n");
return connector_status_connected;
}
if (nv17_dac_sample_load(encoder) &
NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
- NV_INFO(drm, "Load detected on output %c\n",
- '@' + ffs(dcb->or));
+ NV_DEBUG(drm, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
return connector_status_connected;
} else {
return connector_status_disconnected;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
if (was_powersaving && is_powersaving_dpms(mode))
return;
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
uint8_t crtc1A;
- NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
- '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_tv_destroy(struct drm_encoder *encoder)
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
- if (ASIC_IS_AVIVO(rdev)) {
- /* in DP mode, the DP ref clock can come from either PPLL
- * depending on the asic:
- * DCE3: PPLL1 or PPLL2
- */
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
- /* use the same PPLL for all DP monitors */
- pll = radeon_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = radeon_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- /* all other cases */
- pll_in_use = radeon_get_pll_use_mask(crtc);
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
- return radeon_crtc->crtc_id;
- }
+ /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+ /* some atombios (observed in some DCE2/DCE3) code have a bug,
+ * the matching btw pll and crtc is done through
+ * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+ * pll (1 or 2) to select which register to write. ie if using
+ * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+ * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+ * choose which value to write. Which is reverse order from
+ * register logic. So only case that works is when pllid is
+ * same as crtcid or when both pll and crtc are enabled and
+ * both use same clock.
+ *
+ * So just return crtc id as if crtc and pll were hard linked
+ * together even if they aren't
+ */
+ return radeon_crtc->crtc_id;
}
}
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
u8 backlight_level;
+ char bl_name[16];
if (!radeon_encoder->enc_priv)
return;
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
- bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ snprintf(bl_name, sizeof(bl_name),
+ "radeon_bl%d", dev->primary->index);
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
break;
udelay(1);
}
+ } else {
+ save->crtc_enabled[i] = false;
}
}
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
for (i = 0; i < rdev->num_crtc; i++) {
- if (save->crtc_enabled) {
+ if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
/* macro tile width & height */
palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
- mtileb = (palign / 8) * (halign / 8) * tileb;;
+ mtileb = (palign / 8) * (halign / 8) * tileb;
mtile_pr = surf->nbx / palign;
mtile_ps = (mtile_pr * surf->nby) / halign;
surf->layer_size = mtile_ps * mtileb * slice_pt;
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
+ case CP_COHER_CNTL:
+ case CP_COHER_SIZE:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_GS_VERTEX_REUSE:
case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
return true;
default:
+ DRM_ERROR("Invalid register 0x%x in CS\n", reg);
return false;
}
}
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
+#define CP_STRMOUT_CNTL 0x84FC
+
+#define CP_COHER_CNTL 0x85F0
+#define CP_COHER_SIZE 0x85F4
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- int i;
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (i = 0; i < count; ++i) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- addr += incr;
- }
+ while (count) {
+ unsigned ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ uint64_t value = 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ addr += incr;
+
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ addr += incr;
+ }
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
}
}
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm->id);
+
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
}
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
PCI_VENDOR_ID_DELL, 0x00e3, 2},
- /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
PCI_VENDOR_ID_DELL, 0x0149, 1},
+ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_IBM, 0x0531, 1},
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
0x1025, 0x0061, 1},
atpx_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
}
/**
- * radeon_atpx_switchto - switch to the requested GPU
+ * radeon_atpx_power_state - power down/up the requested GPU
*
- * @id: GPU to switch to
+ * @id: GPU to power down/up
* @state: requested power state (0 = off, 1 = on)
*
* Execute the necessary ATPX function to power down/up the discrete GPU
}
/**
- * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
*
* @pdev: pci device
*
- * Look up the ATPX and ATRM handles (all asics).
+ * Look up the ATPX handles (all asics).
* Returns true if the handles are found, false if not.
*/
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
- bool dret = false;
+ bool dret = false, broken_edid = false;
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
radeon_connector->ddc_bus = NULL;
+ } else {
+ ret = connector_status_connected;
+ broken_edid = true; /* defer use_digital to later */
}
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
- if (ret != connector_status_connected) {
- ret = encoder_funcs->detect(encoder, connector);
- if (ret == connector_status_connected) {
- radeon_connector->use_digital = false;
+ if (!broken_edid) {
+ if (ret != connector_status_connected) {
+ /* deal with analog monitors without DDC */
+ ret = encoder_funcs->detect(encoder, connector);
+ if (ret == connector_status_connected) {
+ radeon_connector->use_digital = false;
+ }
+ if (ret != connector_status_disconnected)
+ radeon_connector->detected_by_load = true;
}
- if (ret != connector_status_disconnected)
- radeon_connector->detected_by_load = true;
+ } else {
+ enum drm_connector_status lret;
+ /* assume digital unless load detected otherwise */
+ radeon_connector->use_digital = true;
+ lret = encoder_funcs->detect(encoder, connector);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ if (lret == connector_status_connected)
+ radeon_connector->use_digital = false;
}
break;
}
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
+ uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
mc->vram_start = base;
if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
- mc->real_vram_size = radeon_vram_limit;
+ if (limit && limit < mc->real_vram_size)
+ mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+ return (arg & (arg - 1)) == 0;
+}
+
+/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
- switch (radeon_vram_limit) {
- case 0:
- case 4:
- case 8:
- case 16:
- case 32:
- case 64:
- case 128:
- case 256:
- case 512:
- case 1024:
- case 2048:
- case 4096:
- break;
- default:
+ if (!radeon_check_pot_argument(radeon_vram_limit)) {
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
- break;
}
- radeon_vram_limit = radeon_vram_limit << 20;
+
/* gtt size must be power of two and greater or equal to 32M */
- switch (radeon_gart_size) {
- case 4:
- case 8:
- case 16:
+ if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
radeon_gart_size = 512;
- break;
- case 32:
- case 64:
- case 128:
- case 256:
- case 512:
- case 1024:
- case 2048:
- case 4096:
- break;
- default:
+
+ } else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = 512;
- break;
}
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
- GFP_KERNEL);
+ rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages, GFP_KERNEL);
+ rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+ rdev->gart.num_cpu_pages);
if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
- kfree(rdev->gart.pages);
- kfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages);
+ vfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
*
* Global and local mutex must be locked!
*/
-int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
pte += (addr & mask) * 8;
- if (((last_pte + 8 * count) != pte) ||
- ((count + nptes) > 1 << 11)) {
+ if ((last_pte + 8 * count) != pte) {
if (count) {
radeon_asic_vm_set_page(rdev, last_pte,
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 3;
+ ndw += (nptes >> 11) * 4;
else
/* reserve space for one header for
every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
+ ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
/* reserve space for pte addresses */
ndw += nptes * 2;
/* reserve space for one header for every 2k dwords */
- ndw += (npdes >> 11) * 3;
+ ndw += (npdes >> 11) * 4;
/* reserve space for pde addresses */
ndw += npdes * 2;
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
+ unsigned long max_size;
int r;
*obj = NULL;
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
+
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if (size > max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+ __func__, __LINE__, size >> 20, max_size >> 20);
+ return -ENOMEM;
+ }
+
+retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
- if (r != -ERESTARTSYS)
+ if (r != -ERESTARTSYS) {
+ if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+ initial_domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
+ }
return r;
}
*obj = &robj->gem_base;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc_ext_cntl = 0;
uint32_t mask;
if (radeon_crtc->crtc_id)
RADEON_CRTC_VSYNC_DIS |
RADEON_CRTC_HSYNC_DIS);
+ /*
+ * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
+ * Therefore it is set in the DAC DMPS function.
+ * This is different for GPU's with a single CRTC but a primary and a
+ * TV DAC: here it controls the single CRTC no matter where it is
+ * routed. Therefore we set it here.
+ */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ crtc_ext_cntl = RADEON_CRTC_CRT_ON;
+
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
- WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
+ WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
- WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
+ WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
}
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
uint8_t backlight_level;
+ char bl_name[16];
if (!radeon_encoder->enc_priv)
return;
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
- bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ snprintf(bl_name, sizeof(bl_name),
+ "radeon_bl%d", dev->primary->index);
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
break;
}
- WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ /* handled in radeon_crtc_dpms() */
+ if (!(rdev->flags & RADEON_SINGLE_CRTC))
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
WREG32(RADEON_DAC_CNTL, dac_cntl);
WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
if (ASIC_IS_R300(rdev))
tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+ else if (ASIC_IS_RV100(rdev))
+ tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
else
tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
WREG32(RADEON_DAC_CNTL, tmp);
+ tmp = dac_macro_cntl;
tmp &= ~(RADEON_DAC_PDWN_R |
RADEON_DAC_PDWN_G |
RADEON_DAC_PDWN_B);
} else {
if (is_tv)
WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
- else
+ /* handled in radeon_crtc_dpms() */
+ else if (!(rdev->flags & RADEON_SINGLE_CRTC))
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
}
return found;
}
+static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
+ uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
+ uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
+ uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
+ uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
+ bool found = false;
+ int i;
+
+ /* save the regs we need */
+ gpio_monid = RREG32(RADEON_GPIO_MONID);
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
+ disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
+ disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
+ disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
+ disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
+ disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
+ crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
+ crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
+ crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
+ crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
+
+ tmp = RREG32(RADEON_GPIO_MONID);
+ tmp &= ~RADEON_GPIO_A_0;
+ WREG32(RADEON_GPIO_MONID, tmp);
+
+ WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
+ RADEON_FP2_PANEL_FORMAT |
+ R200_FP2_SOURCE_SEL_TRANS_UNIT |
+ RADEON_FP2_DVO_EN |
+ R200_FP2_DVO_RATE_SEL_SDR));
+
+ WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
+ RADEON_DISP_TRANS_MATRIX_GRAPHICS));
+
+ WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
+ RADEON_CRTC2_DISP_REQ_EN_B));
+
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
+
+ WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
+ WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
+ WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
+ WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
+
+ for (i = 0; i < 200; i++) {
+ tmp = RREG32(RADEON_GPIO_MONID);
+ if (tmp & RADEON_GPIO_Y_0)
+ found = true;
+
+ if (found)
+ break;
+
+ if (!drm_can_sleep())
+ mdelay(1);
+ else
+ msleep(1);
+ }
+
+ /* restore the regs we used */
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
+ WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
+ WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
+ WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
+ WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ WREG32(RADEON_GPIO_MONID, gpio_monid);
+
+ return found;
+}
+
static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
- uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
+ uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+ uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
+ uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
enum drm_connector_status found = connector_status_disconnected;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
return connector_status_disconnected;
}
+ /* R200 uses an external DAC for secondary DAC */
+ if (rdev->family == CHIP_R200) {
+ if (radeon_legacy_ext_dac_detect(encoder, connector))
+ found = connector_status_connected;
+ return found;
+ }
+
/* save the regs we need */
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
- gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0;
- disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0;
- disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG);
- crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ } else {
+ if (ASIC_IS_R300(rdev)) {
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ } else {
+ disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ }
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ }
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
| RADEON_PIX2CLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
- if (ASIC_IS_R300(rdev))
- WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
-
- tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
- tmp |= RADEON_CRTC2_CRT2_ON |
- (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
-
- WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
-
- if (ASIC_IS_R300(rdev)) {
- tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
- tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
- WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+ WREG32(RADEON_CRTC_EXT_CNTL, tmp);
} else {
- tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
- WREG32(RADEON_DISP_HW_DEBUG, tmp);
+ tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+ tmp |= RADEON_CRTC2_CRT2_ON |
+ (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+ if (ASIC_IS_R300(rdev)) {
+ WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+ } else {
+ tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+ WREG32(RADEON_DISP_HW_DEBUG, tmp);
+ }
}
tmp = RADEON_TV_DAC_NBLANK |
WREG32(RADEON_DAC_CNTL2, dac_cntl2);
WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
- if (ASIC_IS_R300(rdev)) {
- WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
- WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
} else {
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ if (ASIC_IS_R300(rdev)) {
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ } else {
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ }
}
+
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
return found;
struct radeon_bo *bo;
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
- unsigned long max_size = 0;
size_t acc_size;
int r;
}
*bo_ptr = NULL;
- /* maximun bo size is the minimun btw visible vram and gtt size */
- max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
- if ((page_align << PAGE_SHIFT) >= max_size) {
- printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
- __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
- return -ENOMEM;
- }
-
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo));
-retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- if (domain == RADEON_GEM_DOMAIN_VRAM) {
- domain |= RADEON_GEM_DOMAIN_GTT;
- goto retry;
- }
- dev_err(rdev->dev,
- "object_init failed for (%lu, 0x%08X)\n",
- size, domain);
- }
return r;
}
*bo_ptr = bo;
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_ESGS_RING_SIZE:
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- int i;
- uint64_t value;
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
- for (i = 0; i < count; ++i) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- addr += incr;
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ while (count) {
+ unsigned ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ uint64_t value;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
}
}
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm->id);
+
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
}
/*
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
+#define CP_STRMOUT_CNTL 0x84FC
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
goto done;
}
+ platform_set_drvdata(pdev, sdev);
+
done:
if (ret)
shmob_drm_unload(dev);
#if CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct shmob_drm_device *sdev = ddev->dev_private;
+ struct shmob_drm_device *sdev = dev_get_drvdata(dev);
- drm_kms_helper_poll_disable(ddev);
+ drm_kms_helper_poll_disable(sdev->ddev);
shmob_drm_crtc_suspend(&sdev->crtc);
return 0;
static int shmob_drm_pm_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct shmob_drm_device *sdev = ddev->dev_private;
+ struct shmob_drm_device *sdev = dev_get_drvdata(dev);
mutex_lock(&sdev->ddev->mode_config.mutex);
shmob_drm_crtc_resume(&sdev->crtc);
if (unlikely(ret != 0))
return ret;
+retry_reserve:
spin_lock(&glob->lru_lock);
if (unlikely(list_empty(&bo->ddestroy))) {
return 0;
}
- ret = ttm_bo_reserve_locked(bo, interruptible,
- no_wait_reserve, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret != 0)) {
+ if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- return ret;
+ if (likely(!no_wait_reserve))
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ goto retry_reserve;
}
+ BUG_ON(ret != 0);
+
/**
* We can re-check for sync object without taking
* the bo::lock since setting the sync object requires
no_wait_reserve, no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
-
- goto retry;
+ return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_gpu))
+ if (likely(!no_wait_reserve))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
- clear_page(page_address(p));
+ if (PageHighMem(p))
+ clear_highpage(p);
+ else
+ clear_page(page_address(p));
}
}
if (unlikely(to_page == NULL))
goto out_err;
- preempt_disable();
copy_highpage(to_page, from_page);
- preempt_enable();
page_cache_release(from_page);
}
ret = PTR_ERR(to_page);
goto out_err;
}
- preempt_disable();
copy_highpage(to_page, from_page);
- preempt_enable();
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);
int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
- u32 byte_offset, u32 byte_width,
+ u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
int udl_dumb_create(struct drm_file *file_priv,
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
- &urb, (char *) info->fix.smem_start,
- &cmd, cur->index << PAGE_SHIFT,
- PAGE_SIZE, &bytes_identical, &bytes_sent))
+ &urb, (char *) info->fix.smem_start,
+ &cmd, cur->index << PAGE_SHIFT,
+ cur->index << PAGE_SHIFT,
+ PAGE_SIZE, &bytes_identical, &bytes_sent))
goto error;
bytes_rendered += PAGE_SIZE;
}
for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
-
+ const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
- &cmd, byte_offset, width * bpp,
+ &cmd, byte_offset, dev_byte_offset,
+ width * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
*/
int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
- u32 byte_offset, u32 byte_width,
+ u32 byte_offset, u32 device_byte_offset,
+ u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
- u32 base16 = 0 + (byte_offset / bpp) * 2;
+ u32 base16 = 0 + (device_byte_offset / bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
BUG_ON(!atomic_read(&bo->reserved));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_FLAG_GMR);
+ old_mem_type != VMW_PL_GMR);
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
if (pin)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+ (void) vmw_read(dev_priv, SVGA_REG_ID);
+ mutex_unlock(&dev_priv->hw_mutex);
+
/**
* Reclaim 3d reference held by fbdev and potentially
* start fifo.
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
ret = copy_to_user(buffer, bounce, size);
+ if (ret)
+ ret = -EFAULT;
vfree(bounce);
if (unlikely(ret != 0))
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
#define MS_RDESC 0x08
#define MS_NOGET 0x10
#define MS_DUPLICATE_USAGES 0x20
+#define MS_RDESC_3K 0x40
-/*
- * Microsoft Wireless Desktop Receiver (Model 1028) has
- * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
- */
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ /*
+ * Microsoft Wireless Desktop Receiver (Model 1028) has
+ * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
+ */
if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
rdesc[559] == 0x29) {
hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
rdesc[557] = 0x35;
rdesc[559] = 0x45;
}
+ /* the same as above (s/usage/physical/) */
+ if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
+ rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
+ rdesc[97] == 0xff) {
+ rdesc[94] = 0x35;
+ rdesc[96] = 0x45;
+ }
return rdesc;
}
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
- .driver_data = MS_ERGONOMY },
+ .driver_data = MS_ERGONOMY | MS_RDESC_3K },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
},
{ .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER,
- .maxcontacts = 10
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER
},
{ .name = MT_CLS_FLATFROG,
* contact max are global to the report */
td->last_field_index = field->index;
return -1;
- }
case HID_DG_TOUCH:
/* Legacy devices use TIPSWITCH and not TOUCH.
* Let's just ignore this field. */
return -1;
+ }
/* let hid-input decide for the others */
return 0;
static struct class *hidraw_class;
static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
static DEFINE_MUTEX(minors_lock);
-static void drop_ref(struct hidraw *hid, int exists_bit);
static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
__u8 *buf;
int ret = 0;
- if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ if (!hidraw_table[minor]) {
ret = -ENODEV;
goto out;
}
}
mutex_lock(&minors_lock);
- if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ if (!hidraw_table[minor]) {
err = -ENODEV;
goto out_unlock;
}
static int hidraw_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
+ struct hidraw *dev;
struct hidraw_list *list = file->private_data;
+ int ret;
+ int i;
+
+ mutex_lock(&minors_lock);
+ if (!hidraw_table[minor]) {
+ ret = -ENODEV;
+ goto unlock;
+ }
- drop_ref(hidraw_table[minor], 0);
list_del(&list->node);
+ dev = hidraw_table[minor];
+ if (!--dev->open) {
+ if (list->hidraw->exist) {
+ hid_hw_power(dev->hid, PM_HINT_NORMAL);
+ hid_hw_close(dev->hid);
+ } else {
+ kfree(list->hidraw);
+ }
+ }
+
+ for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
+ kfree(list->buffer[i].value);
kfree(list);
- return 0;
+ ret = 0;
+unlock:
+ mutex_unlock(&minors_lock);
+
+ return ret;
}
static long hidraw_ioctl(struct file *file, unsigned int cmd,
void hidraw_disconnect(struct hid_device *hid)
{
struct hidraw *hidraw = hid->hidraw;
- drop_ref(hidraw, 1);
+
+ mutex_lock(&minors_lock);
+ hidraw->exist = 0;
+
+ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+
+ hidraw_table[hidraw->minor] = NULL;
+
+ if (hidraw->open) {
+ hid_hw_close(hid);
+ wake_up_interruptible(&hidraw->wait);
+ } else {
+ kfree(hidraw);
+ }
+ mutex_unlock(&minors_lock);
}
EXPORT_SYMBOL_GPL(hidraw_disconnect);
unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
}
-
-static void drop_ref(struct hidraw *hidraw, int exists_bit)
-{
- mutex_lock(&minors_lock);
- if (exists_bit) {
- hid_hw_close(hidraw->hid);
- hidraw->exist = 0;
- if (hidraw->open)
- wake_up_interruptible(&hidraw->wait);
- } else {
- --hidraw->open;
- }
-
- if (!hidraw->open && !hidraw->exist) {
- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
- hidraw_table[hidraw->minor] = NULL;
- kfree(hidraw);
- }
- mutex_unlock(&minors_lock);
-}
if (ret != 0) {
err = ret;
- goto errorout;
+ goto error0;
}
ret = hv_ringbuffer_init(
&newchannel->inbound, in, recv_ringbuffer_size);
if (ret != 0) {
err = ret;
- goto errorout;
+ goto error0;
}
if (ret != 0) {
err = ret;
- goto errorout;
+ goto error0;
}
/* Create and init the channel open message */
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
- goto errorout;
+ goto error0;
}
init_completion(&open_info->waitevent);
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
- goto errorout;
+ goto error0;
}
if (userdatalen)
sizeof(struct vmbus_channel_open_channel));
if (ret != 0)
- goto cleanup;
+ goto error1;
t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
if (t == 0) {
err = -ETIMEDOUT;
- goto errorout;
+ goto error1;
}
if (open_info->response.open_result.status)
err = open_info->response.open_result.status;
-cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
kfree(open_info);
return err;
-errorout:
- hv_ringbuffer_cleanup(&newchannel->outbound);
- hv_ringbuffer_cleanup(&newchannel->inbound);
+error1:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+error0:
free_pages((unsigned long)out,
get_order(send_ringbuffer_size + recv_ringbuffer_size));
kfree(open_info);
* ASB100-A supports pwm1, while plain ASB100 does not. There is no known
* way for the driver to tell which one is there.
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* asb100 7 3 1 4 0x31 0x0694 yes no
*/
* fam15h_power.c - AMD Family 15h processor power monitoring
*
* Copyright (c) 2011 Advanced Micro Devices, Inc.
- * Author: Andreas Herrmann <andreas.herrmann3@amd.com>
+ * Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
*
*
* This driver is free software; you can redistribute it and/or
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
-MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>");
+MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>");
MODULE_LICENSE("GPL");
/* D18F3 */
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
+#ifdef CONFIG_OF_GPIO
.of_match_table = of_match_ptr(of_gpio_fan_match),
+#endif
},
};
mutex_init(&data->lock);
mutex_init(&data->update_lock);
data->name = w83627ehf_device_names[sio_data->kind];
+ data->bank = 0xff; /* Force initial bank selection */
platform_set_drvdata(pdev, data);
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC)
* w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC)
* w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC)
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* as99127f 7 3 0 3 0x31 0x12c3 yes no
* as99127f rev.2 (type_name = as99127f) 0x31 0x5ca3 yes no
* w83781d 7 3 0 3 0x10-1 0x5ca3 yes yes
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83791d 10 5 5 3 0x71 0x5ca3 yes no
*
* The w83791d chip appears to be part way between the 83781d and the
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83792d 9 7 7 3 0x7a 0x5ca3 yes no
*/
/*
* Supports following chips:
*
- * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
+ * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83l786ng 3 2 2 2 0x7b 0x5ca3 yes no
*/
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
obj-y += algos/ busses/ muxes/
+obj-$(CONFIG_I2C_STUB) += i2c-stub.o
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
CFLAGS_i2c-core.o := -Wno-deprecated-declarations
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
select CHECK_SIGNATURE if X86 && DMI
- select GPIOLIB if I2C_MUX
help
If you say yes to this option, support will be included for the Intel
801 family of mainboard I2C interfaces. Specifically, the following
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
-obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
#define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
#define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
#define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
+#define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
#define AT91_TWI_SWRST 0x0080 /* Software Reset */
#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
INIT_COMPLETION(dev->cmd_complete);
dev->transfer_status = 0;
- if (dev->msg->flags & I2C_M_RD) {
+
+ if (!dev->buf_len) {
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ } else if (dev->msg->flags & I2C_M_RD) {
unsigned start_flags = AT91_TWI_START;
if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
#include <linux/wait.h>
#include <linux/err.h>
-#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
+ defined CONFIG_DMI
#include <linux/gpio.h>
#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
int len;
u8 *data;
-#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
+ defined CONFIG_DMI
const struct i801_mux_config *mux_drvdata;
struct platform_device *mux_pdev;
#endif
static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
-#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
+ defined CONFIG_DMI
static struct i801_mux_config i801_mux_config_asus_z8_d12 = {
.gpio_chip = "gpio_ich",
.values = { 0x02, 0x03 },
id = dmi_first_match(mux_dmi_table);
if (id) {
- /* Remove from branch classes from trunk */
+ /* Remove branch classes from trunk */
mux_config = id->driver_data;
for (i = 0; i < mux_config->n_values; i++)
class &= ~mux_config->classes[i];
/*
* Freescale MXS I2C bus driver
*
- * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K.
*
* based on a (non-working) driver which was:
*
#define DRIVER_NAME "mxs-i2c"
-static bool use_pioqueue;
-module_param(use_pioqueue, bool, 0);
-MODULE_PARM_DESC(use_pioqueue, "Use PIOQUEUE mode for transfer instead of DMA");
-
#define MXS_I2C_CTRL0 (0x00)
#define MXS_I2C_CTRL0_SET (0x04)
MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \
MXS_I2C_CTRL1_SLAVE_IRQ)
-#define MXS_I2C_QUEUECTRL (0x60)
-#define MXS_I2C_QUEUECTRL_SET (0x64)
-#define MXS_I2C_QUEUECTRL_CLR (0x68)
-
-#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20
-#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04
-
-#define MXS_I2C_QUEUESTAT (0x70)
-#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
-#define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F
-
-#define MXS_I2C_QUEUECMD (0x80)
-
-#define MXS_I2C_QUEUEDATA (0x90)
-
-#define MXS_I2C_DATA (0xa0)
-
#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \
MXS_I2C_CTRL0_PRE_SEND_START | \
const struct mxs_i2c_speed_config *speed;
/* DMA support components */
- bool dma_mode;
int dma_channel;
struct dma_chan *dmach;
struct mxs_dma_data dma_data;
writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
- if (i2c->dma_mode)
- writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
- i2c->regs + MXS_I2C_QUEUECTRL_CLR);
- else
- writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
- i2c->regs + MXS_I2C_QUEUECTRL_SET);
-}
-
-static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
- int flags)
-{
- u32 data;
-
- writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD);
-
- data = (addr << 1) | I2C_SMBUS_READ;
- writel(data, i2c->regs + MXS_I2C_DATA);
-
- data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags;
- writel(data, i2c->regs + MXS_I2C_QUEUECMD);
-}
-
-static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c,
- u8 addr, u8 *buf, int len, int flags)
-{
- u32 data;
- int i, shifts_left;
-
- data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags;
- writel(data, i2c->regs + MXS_I2C_QUEUECMD);
-
- /*
- * We have to copy the slave address (u8) and buffer (arbitrary number
- * of u8) into the data register (u32). To achieve that, the u8 are put
- * into the MSBs of 'data' which is then shifted for the next u8. When
- * appropriate, 'data' is written to MXS_I2C_DATA. So, the first u32
- * looks like this:
- *
- * 3 2 1 0
- * 10987654|32109876|54321098|76543210
- * --------+--------+--------+--------
- * buffer+2|buffer+1|buffer+0|slave_addr
- */
-
- data = ((addr << 1) | I2C_SMBUS_WRITE) << 24;
-
- for (i = 0; i < len; i++) {
- data >>= 8;
- data |= buf[i] << 24;
- if ((i & 3) == 2)
- writel(data, i2c->regs + MXS_I2C_DATA);
- }
-
- /* Write out the remaining bytes if any */
- shifts_left = 24 - (i & 3) * 8;
- if (shifts_left)
- writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA);
-}
-
-/*
- * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the
- * rd_threshold to 1). Couldn't get this to work, though.
- */
-static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- while (readl(i2c->regs + MXS_I2C_QUEUESTAT)
- & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- cond_resched();
- }
-
- return 0;
-}
-
-static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len)
-{
- u32 uninitialized_var(data);
- int i;
-
- for (i = 0; i < len; i++) {
- if ((i & 3) == 0) {
- if (mxs_i2c_wait_for_data(i2c))
- return -ETIMEDOUT;
- data = readl(i2c->regs + MXS_I2C_QUEUEDATA);
- }
- buf[i] = data & 0xff;
- data >>= 8;
- }
-
- return 0;
}
static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c)
select_init_dma_fail:
dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
select_init_pio_fail:
+ dmaengine_terminate_all(i2c->dmach);
return -EINVAL;
/* Write failpath. */
write_init_dma_fail:
dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
write_init_pio_fail:
+ dmaengine_terminate_all(i2c->dmach);
return -EINVAL;
}
init_completion(&i2c->cmd_complete);
i2c->cmd_err = 0;
- if (i2c->dma_mode) {
- ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
- if (ret)
- return ret;
- } else {
- if (msg->flags & I2C_M_RD) {
- mxs_i2c_pioq_setup_read(i2c, msg->addr,
- msg->len, flags);
- } else {
- mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf,
- msg->len, flags);
- }
-
- writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
- i2c->regs + MXS_I2C_QUEUECTRL_SET);
- }
+ ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
+ if (ret)
+ return ret;
ret = wait_for_completion_timeout(&i2c->cmd_complete,
msecs_to_jiffies(1000));
if (ret == 0)
goto timeout;
- if (!i2c->dma_mode && !i2c->cmd_err && (msg->flags & I2C_M_RD)) {
- ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len);
- if (ret)
- goto timeout;
- }
-
if (i2c->cmd_err == -ENXIO)
mxs_i2c_reset(i2c);
- else
- writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
- i2c->regs + MXS_I2C_QUEUECTRL_CLR);
dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
timeout:
dev_dbg(i2c->dev, "Timeout!\n");
- if (i2c->dma_mode)
- mxs_i2c_dma_finish(i2c);
+ mxs_i2c_dma_finish(i2c);
mxs_i2c_reset(i2c);
return -ETIMEDOUT;
}
{
struct mxs_i2c_dev *i2c = dev_id;
u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
- bool is_last_cmd;
if (!stat)
return IRQ_NONE;
/* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
i2c->cmd_err = -EIO;
- if (!i2c->dma_mode) {
- is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
- MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
-
- if (is_last_cmd || i2c->cmd_err)
- complete(&i2c->cmd_complete);
- }
-
writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
return IRQ_HANDLED;
int ret;
/*
- * The MXS I2C DMA mode is prefered and enabled by default.
- * The PIO mode is still supported, but should be used only
- * for debuging purposes etc.
- */
- i2c->dma_mode = !use_pioqueue;
- if (!i2c->dma_mode)
- dev_info(dev, "Using PIOQUEUE mode for I2C transfers!\n");
-
- /*
* TODO: This is a temporary solution and should be changed
* to use generic DMA binding later when the helpers get in.
*/
ret = of_property_read_u32(node, "fsl,i2c-dma-channel",
&i2c->dma_channel);
if (ret) {
- dev_warn(dev, "Failed to get DMA channel, using PIOQUEUE!\n");
- i2c->dma_mode = 0;
+ dev_err(dev, "Failed to get DMA channel!\n");
+ return -ENODEV;
}
ret = of_property_read_u32(node, "clock-frequency", &speed);
}
/* Setup the DMA */
- if (i2c->dma_mode) {
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- i2c->dma_data.chan_irq = dmairq;
- i2c->dmach = dma_request_channel(mask, mxs_i2c_dma_filter, i2c);
- if (!i2c->dmach) {
- dev_err(dev, "Failed to request dma\n");
- return -ENODEV;
- }
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ i2c->dma_data.chan_irq = dmairq;
+ i2c->dmach = dma_request_channel(mask, mxs_i2c_dma_filter, i2c);
+ if (!i2c->dmach) {
+ dev_err(dev, "Failed to request dma\n");
+ return -ENODEV;
}
platform_set_drvdata(pdev, i2c);
pm_runtime_get_sync(&dev->adev->dev);
- clk_enable(dev->clk);
+ status = clk_prepare_enable(dev->clk);
+ if (status) {
+ dev_err(&dev->adev->dev, "can't prepare_enable clock\n");
+ goto out_clk;
+ }
status = init_hw(dev);
if (status)
}
out:
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
+out_clk:
pm_runtime_put_sync(&dev->adev->dev);
dev->busy = false;
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
int reg_shift; /* bit shift for I2C register addresses */
struct completion cmd_complete;
struct resource *ioarea;
- u32 latency; /* maximum MPU wkup latency */
- struct pm_qos_request pm_qos_request;
+ u32 latency; /* maximum mpu wkup latency */
+ void (*set_mpu_wkup_lat)(struct device *dev,
+ long latency);
u32 speed; /* Speed of bus in kHz */
u32 dtrev; /* extra revision from DT */
u32 flags;
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->threshold) /
+ (1000 * dev->speed / 8);
}
/*
dev->buf = msg->buf;
dev->buf_len = msg->len;
+ /* make sure writes to dev->buf_len are ordered */
+ barrier();
+
omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
/* Clear the FIFO Buffers */
*/
timeout = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
- dev->buf_len = 0;
if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
omap_i2c_init(dev);
if (r < 0)
goto out;
- /*
- * When waiting for completion of a i2c transfer, we need to
- * set a wake up latency constraint for the MPU. This is to
- * ensure quick enough wakeup from idle, when transfer
- * completes.
- */
- if (dev->latency)
- pm_qos_add_request(&dev->pm_qos_request,
- PM_QOS_CPU_DMA_LATENCY,
- dev->latency);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
break;
}
- if (dev->latency)
- pm_qos_remove_request(&dev->pm_qos_request);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
if (r == 0)
r = num;
} else if (pdata != NULL) {
dev->speed = pdata->clkrate;
dev->flags = pdata->flags;
+ dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
dev->dtrev = pdata->rev;
}
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- dev->latency = (1000000 * dev->fifo_size) /
- (1000 * dev->speed / 8);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->fifo_size) /
+ (1000 * dev->speed / 8);
}
/* reset ASAP, clearing any IRQs */
dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
goto free_gpio;
}
+ i2c->gpios[idx] = gpio;
ret = gpio_request(gpio, "i2c-bus");
if (ret) {
}
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
- tegra_i2c_isr, 0, pdev->name, i2c_dev);
+ tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
return ret;
i2c-stub.c - I2C/SMBus chip emulator
Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
- Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
static struct stub_chip *stub_chips;
/* Return negative errno on error. */
-static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
- char read_write, u8 command, int size, union i2c_smbus_data * data)
+static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command, int size, union i2c_smbus_data *data)
{
s32 ret;
int i, len;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_WRITE) {
chip->pointer = command;
- dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
- "wrote 0x%02x.\n",
- addr, command);
+ dev_dbg(&adap->dev,
+ "smbus byte - addr 0x%02x, wrote 0x%02x.\n",
+ addr, command);
} else {
data->byte = chip->words[chip->pointer++] & 0xff;
- dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
- "read 0x%02x.\n",
- addr, data->byte);
+ dev_dbg(&adap->dev,
+ "smbus byte - addr 0x%02x, read 0x%02x.\n",
+ addr, data->byte);
}
ret = 0;
if (read_write == I2C_SMBUS_WRITE) {
chip->words[command] &= 0xff00;
chip->words[command] |= data->byte;
- dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
- "wrote 0x%02x at 0x%02x.\n",
- addr, data->byte, command);
+ dev_dbg(&adap->dev,
+ "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n",
+ addr, data->byte, command);
} else {
data->byte = chip->words[command] & 0xff;
- dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
- "read 0x%02x at 0x%02x.\n",
- addr, data->byte, command);
+ dev_dbg(&adap->dev,
+ "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n",
+ addr, data->byte, command);
}
chip->pointer = command + 1;
case I2C_SMBUS_WORD_DATA:
if (read_write == I2C_SMBUS_WRITE) {
chip->words[command] = data->word;
- dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, "
- "wrote 0x%04x at 0x%02x.\n",
- addr, data->word, command);
+ dev_dbg(&adap->dev,
+ "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n",
+ addr, data->word, command);
} else {
data->word = chip->words[command];
- dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, "
- "read 0x%04x at 0x%02x.\n",
- addr, data->word, command);
+ dev_dbg(&adap->dev,
+ "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n",
+ addr, data->word, command);
}
ret = 0;
chip->words[command + i] &= 0xff00;
chip->words[command + i] |= data->block[1 + i];
}
- dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, "
- "wrote %d bytes at 0x%02x.\n",
- addr, len, command);
+ dev_dbg(&adap->dev,
+ "i2c block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n",
+ addr, len, command);
} else {
for (i = 0; i < len; i++) {
data->block[1 + i] =
chip->words[command + i] & 0xff;
}
- dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, "
- "read %d bytes at 0x%02x.\n",
- addr, len, command);
+ dev_dbg(&adap->dev,
+ "i2c block data - addr 0x%02x, read %d bytes at 0x%02x.\n",
+ addr, len, command);
}
ret = 0;
int i, ret;
if (!chip_addr[0]) {
- printk(KERN_ERR "i2c-stub: Please specify a chip address\n");
+ pr_err("i2c-stub: Please specify a chip address\n");
return -ENODEV;
}
for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) {
- printk(KERN_ERR "i2c-stub: Invalid chip address "
- "0x%02x\n", chip_addr[i]);
+ pr_err("i2c-stub: Invalid chip address 0x%02x\n",
+ chip_addr[i]);
return -EINVAL;
}
- printk(KERN_INFO "i2c-stub: Virtual chip at 0x%02x\n",
- chip_addr[i]);
+ pr_info("i2c-stub: Virtual chip at 0x%02x\n", chip_addr[i]);
}
/* Allocate memory for all chips at once */
stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL);
if (!stub_chips) {
- printk(KERN_ERR "i2c-stub: Out of memory\n");
+ pr_err("i2c-stub: Out of memory\n");
return -ENOMEM;
}
module_init(i2c_stub_init);
module_exit(i2c_stub_exit);
-
mux->busses = devm_kzalloc(&pdev->dev,
sizeof(mux->busses) * mux->pdata->bus_count,
GFP_KERNEL);
- if (!mux->states) {
+ if (!mux->busses) {
dev_err(&pdev->dev, "Cannot allocate busses\n");
ret = -ENOMEM;
goto err;
source "drivers/iio/dac/Kconfig"
source "drivers/iio/common/Kconfig"
source "drivers/iio/gyro/Kconfig"
-source "drivers/iio/light/Kconfig"
source "drivers/iio/magnetometer/Kconfig"
endif # IIO
obj-y += dac/
obj-y += common/
obj-y += gyro/
-obj-y += light/
obj-y += magnetometer/
ret = alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
- goto err_pbl;
+ goto err;
}
ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
{
if (index >= NUM_ALIAS_GUID_PER_PORT) {
pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
- return (__force __be64) ((u64) 0xFFFFFFFFFFFFFFFFUL);
+ return (__force __be64) -1;
}
return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
}
}
-static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
- u8 *full_pk_ix, u8 *partial_pk_ix,
- int *is_full_member)
+static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
+ u8 port, u16 pkey, u16 *ix)
{
- u16 search_pkey;
- int fm;
- int err = 0;
- u16 pk;
+ int i, ret;
+ u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
+ u16 slot_pkey;
- err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
- if (err)
- return err;
+ if (slave == mlx4_master_func_num(dev->dev))
+ return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
- fm = (search_pkey & 0x8000) ? 1 : 0;
- if (fm) {
- *full_pk_ix = ph_pkey_ix;
- search_pkey &= 0x7FFF;
- } else {
- *partial_pk_ix = ph_pkey_ix;
- search_pkey |= 0x8000;
- }
+ unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
- if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
- pk = 0xFFFF;
+ for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
+ if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
+ continue;
- if (fm)
- *partial_pk_ix = (pk & 0xFF);
- else
- *full_pk_ix = (pk & 0xFF);
+ pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
- *is_full_member = fm;
- return err;
+ ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
+ if (ret)
+ continue;
+ if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
+ if (slot_pkey & 0x8000) {
+ *ix = (u16) pkey_ix;
+ return 0;
+ } else {
+ /* take first partial pkey index found */
+ if (partial_ix == 0xFF)
+ partial_ix = pkey_ix;
+ }
+ }
+ }
+
+ if (partial_ix < 0xFF) {
+ *ix = (u16) partial_ix;
+ return 0;
+ }
+
+ return -EINVAL;
}
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
unsigned tun_tx_ix = 0;
int dqpn;
int ret = 0;
- int i;
- int is_full_member = 0;
u16 tun_pkey_ix;
- u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
+ u16 cached_pkey;
if (dest_qpt > IB_QPT_GSI)
return -EINVAL;
else
tun_qp = &tun_ctx->qp[1];
- /* compute pkey index for slave */
- /* get physical pkey -- virtualized Dom0 pkey to phys*/
+ /* compute P_Key index to put in tunnel header for slave */
if (dest_qpt) {
- ph_pkey_ix =
- dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
-
- /* now, translate this to the slave pkey index */
- ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
- &partial_pk_ix, &is_full_member);
+ u16 pkey_ix;
+ ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
if (ret)
return -EINVAL;
- for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
- if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
- (is_full_member &&
- (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
- break;
- }
- if (i == dev->dev->caps.pkey_table_len[port])
+ ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
+ if (ret)
return -EINVAL;
- tun_pkey_ix = i;
+ tun_pkey_ix = pkey_ix;
} else
tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
- wc.pkey_index = 0;
+ if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
+ return -EINVAL;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = ctx->port;
unsigned long end;
int count;
- if (ctx->flushing)
- return;
-
- ctx->flushing = 1;
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
force_clean_group(group);
}
mutex_unlock(&ctx->mcg_table_lock);
-
- if (!destroy_wq)
- ctx->flushing = 0;
}
struct clean_work {
struct clean_work *cw = container_of(work, struct clean_work, work);
_mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
+ cw->ctx->flushing = 0;
kfree(cw);
}
{
struct clean_work *work;
+ if (ctx->flushing)
+ return;
+
+ ctx->flushing = 1;
+
if (destroy_wq) {
_mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
+ ctx->flushing = 0;
return;
}
work = kmalloc(sizeof *work, GFP_KERNEL);
if (!work) {
+ ctx->flushing = 0;
mcg_warn("failed allocating work for cleanup\n");
return;
}
kfree(client);
evdev_close_device(evdev);
- put_device(&evdev->dev);
return 0;
}
file->private_data = client;
nonseekable_open(inode, file);
- get_device(&evdev->dev);
return 0;
err_free_client:
goto err_free_evdev;
cdev_init(&evdev->cdev, &evdev_fops);
+ evdev->cdev.kobj.parent = &evdev->dev.kobj;
error = cdev_add(&evdev->cdev, evdev->dev.devt, 1);
if (error)
goto err_unregister_handle;
* input_mt_init_slots() - initialize MT input slots
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
+ * @flags: mt tasks to handle in core
*
* This function allocates all necessary memory for MT slot handling
* in the input device, prepares the ABS_MT_SLOT and
* ABS_MT_TRACKING_ID events for use and sets up appropriate buffers.
+ * Depending on the flags set, it also performs pointer emulation and
+ * frame synchronization.
+ *
* May be called repeatedly. Returns -EINVAL if attempting to
* reinitialize with a different number of slots.
*/
kfree(client);
joydev_close_device(joydev);
- put_device(&joydev->dev);
return 0;
}
file->private_data = client;
nonseekable_open(inode, file);
- get_device(&joydev->dev);
return 0;
err_free_client:
goto err_free_joydev;
cdev_init(&joydev->cdev, &joydev_fops);
+ joydev->cdev.kobj.parent = &joydev->dev.kobj;
error = cdev_add(&joydev->cdev, joydev->dev.devt, 1);
if (error)
goto err_unregister_handle;
config KEYBOARD_LPC32XX
tristate "LPC32XX matrix key scanner support"
depends on ARCH_LPC32XX && OF
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use NXP LPC32XX SoC key scanner interface,
connected to a key matrix.
unsigned int mask = 0, direct_key_num = 0;
unsigned long kpc = 0;
+ /* clear pending interrupt bit */
+ keypad_readl(KPC);
+
/* enable matrix keys with automatic scan */
if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+/* MacbookPro10,2 (unibody, October 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ /* MacbookPro10,2 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
/* Terminating entry */
{}
};
{ SN_COORD, -150, 6730 },
{ SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { SN_PRESSURE, 0, 300 },
+ { SN_WIDTH, 0, 2048 },
+ { SN_COORD, -4750, 5280 },
+ { SN_COORD, -150, 6730 },
+ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+ },
{}
};
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MOUSEDEV_MINOR_BASE 32
-#define MOUSEDEV_MINORS 32
-#define MOUSEDEV_MIX 31
+#define MOUSEDEV_MINORS 31
+#define MOUSEDEV_MIX 63
#include <linux/sched.h>
#include <linux/slab.h>
kfree(client);
mousedev_close_device(mousedev);
- put_device(&mousedev->dev);
return 0;
}
file->private_data = client;
nonseekable_open(inode, file);
- get_device(&mousedev->dev);
return 0;
err_free_client:
}
cdev_init(&mousedev->cdev, &mousedev_fops);
+ mousedev->cdev.kobj.parent = &mousedev->dev.kobj;
error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1);
if (error)
goto err_unregister_handle;
features->pktlen = WACOM_PKGLEN_TPC2FG;
}
- if (features->type == MTSCREEN)
+ if (features->type == MTSCREEN || features->type == WACOM_24HDT)
features->pktlen = WACOM_PKGLEN_MTOUCH;
if (features->type == BAMBOO_PT) {
features->x_max =
get_unaligned_le16(&report[i + 8]);
i += 15;
+ } else if (features->type == WACOM_24HDT) {
+ features->x_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->x_phy =
+ get_unaligned_le16(&report[i + 8]);
+ features->unit = report[i - 1];
+ features->unitExpo = report[i - 3];
+ i += 12;
} else {
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
+ } else if (type == WACOM_24HDT) {
+ features->y_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->y_phy =
+ get_unaligned_le16(&report[i - 2]);
+ i += 7;
} else if (type == BAMBOO_PT) {
features->y_phy =
get_unaligned_le16(&report[i + 3]);
/* MT Tablet PC touch */
return wacom_set_device_mode(intf, 3, 4, 4);
}
+ else if (features->type == WACOM_24HDT) {
+ return wacom_set_device_mode(intf, 18, 3, 2);
+ }
} else if (features->device_type == BTN_TOOL_PEN) {
if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
return wacom_set_device_mode(intf, 2, 2, 2);
static LIST_HEAD(wacom_udev_list);
static DEFINE_MUTEX(wacom_udev_list_lock);
+static struct usb_device *wacom_get_sibling(struct usb_device *dev, int vendor, int product)
+{
+ int port1;
+ struct usb_device *sibling;
+
+ if (vendor == 0 && product == 0)
+ return dev;
+
+ if (dev->parent == NULL)
+ return NULL;
+
+ usb_hub_for_each_child(dev->parent, port1, sibling) {
+ struct usb_device_descriptor *d;
+ if (sibling == NULL)
+ continue;
+
+ d = &sibling->descriptor;
+ if (d->idVendor == vendor && d->idProduct == product)
+ return sibling;
+ }
+
+ return NULL;
+}
+
static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
{
struct wacom_usbdev_data *data;
strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
+ struct usb_device *other_dev;
+
/* Append the device type to the name */
strlcat(wacom_wac->name,
features->device_type == BTN_TOOL_PEN ?
" Pen" : " Finger",
sizeof(wacom_wac->name));
- error = wacom_add_shared_data(wacom_wac, dev);
+
+ other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
+ if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
+ other_dev = dev;
+ error = wacom_add_shared_data(wacom_wac, other_dev);
if (error)
goto fail3;
}
return -1;
}
+static int int_dist(int x1, int y1, int x2, int y2)
+{
+ int x = x2 - x1;
+ int y = y2 - y1;
+
+ return int_sqrt(x*x + y*y);
+}
+
+static int wacom_24hdt_irq(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ char *data = wacom->data;
+ int i;
+ int current_num_contacts = data[61];
+ int contacts_to_send = 0;
+
+ /*
+ * First packet resets the counter since only the first
+ * packet in series will have non-zero current_num_contacts.
+ */
+ if (current_num_contacts)
+ wacom->num_contacts_left = current_num_contacts;
+
+ /* There are at most 4 contacts per packet */
+ contacts_to_send = min(4, wacom->num_contacts_left);
+
+ for (i = 0; i < contacts_to_send; i++) {
+ int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
+ bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
+ int id = data[offset + 1];
+ int slot = find_slot_from_contactid(wacom, id);
+
+ if (slot < 0)
+ continue;
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+
+ if (touch) {
+ int t_x = le16_to_cpup((__le16 *)&data[offset + 2]);
+ int c_x = le16_to_cpup((__le16 *)&data[offset + 4]);
+ int t_y = le16_to_cpup((__le16 *)&data[offset + 6]);
+ int c_y = le16_to_cpup((__le16 *)&data[offset + 8]);
+ int w = le16_to_cpup((__le16 *)&data[offset + 10]);
+ int h = le16_to_cpup((__le16 *)&data[offset + 12]);
+
+ input_report_abs(input, ABS_MT_POSITION_X, t_x);
+ input_report_abs(input, ABS_MT_POSITION_Y, t_y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, min(w,h));
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR, min(w, h) + int_dist(t_x, t_y, c_x, c_y));
+ input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
+ input_report_abs(input, ABS_MT_ORIENTATION, w > h);
+ }
+ wacom->slots[slot] = touch ? id : -1;
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+
+ wacom->num_contacts_left -= contacts_to_send;
+ if (wacom->num_contacts_left <= 0)
+ wacom->num_contacts_left = 0;
+
+ return 1;
+}
+
static int wacom_mt_touch(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
sync = wacom_intuos_irq(wacom_wac);
break;
+ case WACOM_24HDT:
+ sync = wacom_24hdt_irq(wacom_wac);
+ break;
+
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
/* these device have multiple inputs */
if (features->type >= WIRELESS ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L))
+ (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+ (features->oVid && features->oPid))
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
/* quirk for bamboo touch with 2 low res touches */
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
wacom_setup_cintiq(wacom_wac);
break;
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case WACOM_24HDT:
+ if (features->device_type == BTN_TOOL_FINGER) {
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ }
+ /* fall through */
+
case MTSCREEN:
if (features->device_type == BTN_TOOL_FINGER) {
wacom_wac->slots = kmalloc(features->touch_max *
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
- 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, /* Pen */
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
+static const struct wacom_features wacom_features_0xF6 =
+ { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
+ { USB_DEVICE_WACOM(0xF6) },
{ USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
+#define WACOM_BYTES_PER_24HDT_PACKET 14
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
#define WACOM_REPORT_TPCHID 15
#define WACOM_REPORT_TPCST 16
#define WACOM_REPORT_TPC1FGE 18
+#define WACOM_REPORT_24HDT 1
/* device quirks */
#define WACOM_QUIRK_MULTI_INPUT 0x0001
WACOM_MO,
WIRELESS,
BAMBOO_PT,
+ WACOM_24HDT,
TABLETPC, /* add new TPC below */
TABLETPCE,
TABLETPC2FG,
int distance_fuzz;
unsigned quirks;
unsigned touch_max;
+ int oVid;
+ int oPid;
};
struct wacom_shared {
config TOUCHSCREEN_EGALAX
tristate "EETI eGalax multi-touch panel support"
- depends on I2C
+ depends on I2C && OF
help
Say Y here to enable support for I2C connected EETI
eGalax multi-touch panels.
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
-static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
+static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+ struct ads7846 *ts)
{
struct ads7846_platform_data *pdata = spi->dev.platform_data;
int err;
ts->gpio_pendown = pdata->gpio_pendown;
+ if (pdata->gpio_pendown_debounce)
+ gpio_set_debounce(pdata->gpio_pendown,
+ pdata->gpio_pendown_debounce);
} else {
dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
return -EINVAL;
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/input/mt.h>
+#include <linux/of_gpio.h>
/*
* Mouse Mode: some panel may configure the controller to mouse mode,
/* wake up controller by an falling edge of interrupt gpio. */
static int egalax_wake_up_device(struct i2c_client *client)
{
- int gpio = irq_to_gpio(client->irq);
+ struct device_node *np = client->dev.of_node;
+ int gpio;
int ret;
+ if (!np)
+ return -ENODEV;
+
+ gpio = of_get_named_gpio(np, "wakeup-gpios", 0);
+ if (!gpio_is_valid(gpio))
+ return -ENODEV;
+
ret = gpio_request(gpio, "egalax_irq");
if (ret < 0) {
dev_err(&client->dev,
ts->input_dev = input_dev;
/* controller may be in sleep, wake it up. */
- egalax_wake_up_device(client);
+ error = egalax_wake_up_device(client);
+ if (error) {
+ dev_err(&client->dev, "Failed to wake up the controller\n");
+ goto err_free_dev;
+ }
ret = egalax_firmware_version(client);
if (ret < 0) {
static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
+static struct of_device_id egalax_ts_dt_ids[] = {
+ { .compatible = "eeti,egalax_ts" },
+ { /* sentinel */ }
+};
+
static struct i2c_driver egalax_ts_driver = {
.driver = {
.name = "egalax_ts",
.owner = THIS_MODULE,
.pm = &egalax_ts_pm_ops,
+ .of_match_table = of_match_ptr(egalax_ts_dt_ids),
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
__set_bit(BTN_TOUCH, input_dev->keybit);
input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0);
input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0);
- input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0);
serio_set_drvdata(serio, ptsc);
#endif
}
+/* SB IOAPIC is always on this device in AMD systems */
+#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
+
static bool __init check_ioapic_information(void)
{
+ bool ret, has_sb_ioapic;
int idx;
- for (idx = 0; idx < nr_ioapics; idx++) {
- int id = mpc_ioapic_id(idx);
+ has_sb_ioapic = false;
+ ret = false;
- if (get_ioapic_devid(id) < 0) {
- pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id);
- pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n");
- return false;
+ for (idx = 0; idx < nr_ioapics; idx++) {
+ int devid, id = mpc_ioapic_id(idx);
+
+ devid = get_ioapic_devid(id);
+ if (devid < 0) {
+ pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
+ ret = false;
+ } else if (devid == IOAPIC_SB_DEVID) {
+ has_sb_ioapic = true;
+ ret = true;
}
}
- return true;
+ if (!has_sb_ioapic) {
+ /*
+ * We expect the SB IOAPIC to be listed in the IVRS
+ * table. The system timer is connected to the SB IOAPIC
+ * and if we don't have it in the list the system will
+ * panic at boot time. This situation usually happens
+ * when the BIOS is buggy and provides us the wrong
+ * device id for the IOAPIC in the system.
+ */
+ pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
+ }
+
+ if (!ret)
+ pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
+
+ return ret;
}
static void __init free_dma_resources(void)
static int intel_iommu_add_device(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_dev *bridge, *dma_pdev;
+ struct pci_dev *bridge, *dma_pdev = NULL;
struct iommu_group *group;
int ret;
dma_pdev = pci_get_domain_bus_and_slot(
pci_domain_nr(pdev->bus),
bridge->subordinate->number, 0);
- else
+ if (!dma_pdev)
dma_pdev = pci_dev_get(bridge);
} else
dma_pdev = pci_dev_get(pdev);
#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
-#define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22)
+#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
stats[i], val, offs);
}
seq_printf(s, "\n");
+ dput(dent);
return 0;
}
}
static struct of_device_id irq_of_match[] __initconst = {
- { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }
+ { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init },
+ { }
};
void __init bcm2835_init_irq(void)
menuconfig ISDN
bool "ISDN support"
- depends on NET
+ depends on NET && NETDEVICES
depends on !S390 && !UML
---help---
ISDN ("Integrated Services Digital Network", called RNIS in France)
if (rc == 0)
/* success, resubmit interrupt read URB */
rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc != 0 && rc != -ENODEV) {
+
+ switch (rc) {
+ case 0: /* success */
+ case -ENODEV: /* device gone */
+ case -EINVAL: /* URB already resubmitted, or terminal badness */
+ break;
+ default: /* failure: try to recover by resetting the device */
dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
if (rc == 0) {
}
/* gigaset_suspend
- * This function is called before the USB connection is suspended.
+ * This function is called before the USB connection is suspended
+ * or before the USB device is reset.
+ * In the latter case, message == PMSG_ON.
*/
static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
{
del_timer_sync(&ucs->timer_atrdy);
del_timer_sync(&ucs->timer_cmd_in);
del_timer_sync(&ucs->timer_int_in);
- cancel_work_sync(&ucs->int_in_wq);
+
+ /* don't try to cancel int_in_wq from within reset as it
+ * might be the one requesting the reset
+ */
+ if (message.event != PM_EVENT_ON)
+ cancel_work_sync(&ucs->int_in_wq);
gig_dbg(DEBUG_SUSPEND, "suspend complete");
return 0;
config ISDN_PPP
bool "Support synchronous PPP"
- depends on INET && NETDEVICES
+ depends on INET
select SLHC
help
Over digital connections such as ISDN, there is no need to
} else
return -EINVAL;
break;
-#ifdef CONFIG_NETDEVICES
case IIOCNETGPN:
/* Get peer phone number of a connected
* isdn network interface */
return isdn_net_getpeer(&phone, argp);
} else
return -EINVAL;
-#endif
default:
return -EINVAL;
}
case IIOCNETLCR:
printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n");
return -ENODEV;
-#ifdef CONFIG_NETDEVICES
case IIOCNETAIF:
/* Add a network-interface */
if (arg) {
return -EFAULT;
return isdn_net_force_hangup(name);
break;
-#endif /* CONFIG_NETDEVICES */
case IIOCSETVER:
dev->net_verbose = arg;
printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose);
struct led_trigger_cpu {
char name[MAX_NAME_LEN];
struct led_trigger *_trig;
- struct mutex lock;
- int lock_is_inited;
};
static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
{
struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig);
- /* mutex lock should be initialized before calling mutex_call() */
- if (!trig->lock_is_inited)
- return;
-
- mutex_lock(&trig->lock);
-
/* Locate the correct CPU LED */
switch (ledevt) {
case CPU_LED_IDLE_END:
/* Will leave the LED as it is */
break;
}
-
- mutex_unlock(&trig->lock);
}
EXPORT_SYMBOL(ledtrig_cpu);
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
- mutex_init(&trig->lock);
-
snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
- mutex_lock(&trig->lock);
led_trigger_register_simple(trig->name, &trig->_trig);
- trig->lock_is_inited = 1;
- mutex_unlock(&trig->lock);
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
- mutex_lock(&trig->lock);
-
led_trigger_unregister_simple(trig->_trig);
trig->_trig = NULL;
memset(trig->name, 0, MAX_NAME_LEN);
- trig->lock_is_inited = 0;
-
- mutex_unlock(&trig->lock);
- mutex_destroy(&trig->lock);
}
unregister_syscore_ops(&ledtrig_cpu_syscore_ops);
if (!md_in_flight(md))
wake_up(&md->wait);
+ /*
+ * Run this off this callpath, as drivers could invoke end_io while
+ * inside their request_fn (and holding the queue lock). Calling
+ * back into ->request_fn() could deadlock attempting to grab the
+ * queue lock again.
+ */
if (run_queue)
- blk_run_queue(md->queue);
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
}
conf->nfaults = 0;
- rdev_for_each(rdev, mddev)
+ rdev_for_each(rdev, mddev) {
conf->rdev = rdev;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ }
md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
mddev->private = conf;
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
- u64 internal_bb = *p++;
+ u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
- *bbp++ = cpu_to_le64(store_bb);
+ bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
}
EXPORT_SYMBOL_GPL(md_stop_writes);
-void md_stop(struct mddev *mddev)
+static void __md_stop(struct mddev *mddev)
{
mddev->ready = 0;
mddev->pers->stop(mddev);
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
+
+void md_stop(struct mddev *mddev)
+{
+ /* stop the array and free an attached data structures.
+ * This is called from dm-raid
+ */
+ __md_stop(mddev);
+ bitmap_destroy(mddev);
+ if (mddev->bio_set)
+ bioset_free(mddev->bio_set);
+}
+
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
- md_stop(mddev);
+ __md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
sector_t *first_bad, int *bad_sectors)
{
int hi;
- int lo = 0;
+ int lo;
u64 *p = bb->page;
- int rv = 0;
+ int rv;
sector_t target = s + sectors;
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
-
+ lo = 0;
+ rv = 0;
hi = bb->count;
/* Binary search between lo and hi for 'target'
|| disk_idx < 0)
continue;
if (test_bit(Replacement, &rdev->flags))
- disk = conf->mirrors + conf->raid_disks + disk_idx;
+ disk = conf->mirrors + mddev->raid_disks + disk_idx;
else
disk = conf->mirrors + disk_idx;
*/
one_write_done(r10_bio);
if (dec_rdev)
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
}
/*
blocked_rdev = rrdev;
break;
}
+ if (rdev && (test_bit(Faulty, &rdev->flags)
+ || test_bit(Unmerged, &rdev->flags)))
+ rdev = NULL;
if (rrdev && (test_bit(Faulty, &rrdev->flags)
|| test_bit(Unmerged, &rrdev->flags)))
rrdev = NULL;
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags)) {
+
+ if (!rdev && !rrdev) {
set_bit(R10BIO_Degraded, &r10_bio->state);
continue;
}
- if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
int bad_sectors;
max_sectors = good_sectors;
}
}
- r10_bio->devs[i].bio = bio;
- atomic_inc(&rdev->nr_pending);
+ if (rdev) {
+ r10_bio->devs[i].bio = bio;
+ atomic_inc(&rdev->nr_pending);
+ }
if (rrdev) {
r10_bio->devs[i].repl_bio = bio;
atomic_inc(&rrdev->nr_pending);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
int d = r10_bio->devs[i].devnum;
- if (!r10_bio->devs[i].bio)
- continue;
+ if (r10_bio->devs[i].bio) {
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].bio = mbio;
+
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ choose_data_offset(r10_bio,
+ rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_private = r10_bio;
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
- r10_bio->devs[i].bio = mbio;
+ atomic_inc(&r10_bio->remaining);
- mbio->bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio,
- conf->mirrors[d].rdev));
- mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
- mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
- mbio->bi_private = r10_bio;
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!plug)
+ md_wakeup_thread(mddev->thread);
+ }
- atomic_inc(&r10_bio->remaining);
+ if (r10_bio->devs[i].repl_bio) {
+ struct md_rdev *rdev = conf->mirrors[d].replacement;
+ if (rdev == NULL) {
+ /* Replacement just got moved to main 'rdev' */
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].repl_bio = mbio;
+
+ mbio->bi_sector = (r10_bio->devs[i].addr +
+ choose_data_offset(
+ r10_bio, rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_private = r10_bio;
- cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
- if (cb)
- plug = container_of(cb, struct raid10_plug_cb, cb);
- else
- plug = NULL;
- spin_lock_irqsave(&conf->device_lock, flags);
- if (plug) {
- bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
- } else {
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!plug)
- md_wakeup_thread(mddev->thread);
-
- if (!r10_bio->devs[i].repl_bio)
- continue;
-
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
- r10_bio->devs[i].repl_bio = mbio;
-
- /* We are actively writing to the original device
- * so it cannot disappear, so the replacement cannot
- * become NULL here
- */
- mbio->bi_sector = (r10_bio->devs[i].addr +
- choose_data_offset(
- r10_bio,
- conf->mirrors[d].replacement));
- mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
- mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
- mbio->bi_private = r10_bio;
-
- atomic_inc(&r10_bio->remaining);
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
- md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
discard_supported = true;
}
- if (discard_supported)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
-
+ if (mddev->queue) {
+ if (discard_supported)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ mddev->queue);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+ mddev->queue);
+ }
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
(test_bit(R5_UPTODATE, &dev->flags) ||
- test_and_clear_bit(R5_Discard, &dev->flags))) {
+ test_bit(R5_Discard, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
pr_debug("Return write for disc %d\n", i);
+ if (test_and_clear_bit(R5_Discard, &dev->flags))
+ clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_sector <
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
}
- }
+ } else if (test_bit(R5_Discard, &sh->dev[i].flags))
+ clear_bit(R5_Discard, &sh->dev[i].flags);
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
handle_failed_sync(conf, sh, &s);
}
- /*
- * might be able to return some write requests if the parity blocks
- * are safe, or on a failed drive
- */
- pdev = &sh->dev[sh->pd_idx];
- s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
- || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
- qdev = &sh->dev[sh->qd_idx];
- s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
- || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
- || conf->level < 6;
-
- if (s.written &&
- (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
- && !test_bit(R5_LOCKED, &pdev->flags)
- && (test_bit(R5_UPTODATE, &pdev->flags) ||
- test_bit(R5_Discard, &pdev->flags))))) &&
- (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
- && !test_bit(R5_LOCKED, &qdev->flags)
- && (test_bit(R5_UPTODATE, &qdev->flags) ||
- test_bit(R5_Discard, &qdev->flags))))))
- handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
-
- /* Now we might consider reading some blocks, either to check/generate
- * parity, or to satisfy requests
- * or to load a block that is being partially written.
- */
- if (s.to_read || s.non_overwrite
- || (conf->level == 6 && s.to_write && s.failed)
- || (s.syncing && (s.uptodate + s.compute < disks))
- || s.replacing
- || s.expanding)
- handle_stripe_fill(sh, &s, disks);
-
/* Now we check to see if any write operations have recently
* completed
*/
s.dec_preread_active = 1;
}
+ /*
+ * might be able to return some write requests if the parity blocks
+ * are safe, or on a failed drive
+ */
+ pdev = &sh->dev[sh->pd_idx];
+ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+ qdev = &sh->dev[sh->qd_idx];
+ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+ || conf->level < 6;
+
+ if (s.written &&
+ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ && !test_bit(R5_LOCKED, &pdev->flags)
+ && (test_bit(R5_UPTODATE, &pdev->flags) ||
+ test_bit(R5_Discard, &pdev->flags))))) &&
+ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ && !test_bit(R5_LOCKED, &qdev->flags)
+ && (test_bit(R5_UPTODATE, &qdev->flags) ||
+ test_bit(R5_Discard, &qdev->flags))))))
+ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+
+ /* Now we might consider reading some blocks, either to check/generate
+ * parity, or to satisfy requests
+ * or to load a block that is being partially written.
+ */
+ if (s.to_read || s.non_overwrite
+ || (conf->level == 6 && s.to_write && s.failed)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
+ handle_stripe_fill(sh, &s, disks);
+
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
+ /* Round up to power of 2, as discard handling
+ * currently assumes that */
+ while ((stripe-1) & stripe)
+ stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
/*
{
u32 m_div, clk_sel;
- dprintk("%s: Mclk set to %d, Quartz = %d\n", __func__, mclk,
- intp->quartz);
-
if (intp == NULL)
return STV0900_INVALID_HANDLE;
if (intp->errs)
return STV0900_I2C_ERROR;
+ dprintk("%s: Mclk set to %d, Quartz = %d\n", __func__, mclk,
+ intp->quartz);
+
clk_sel = ((stv0900_get_bits(intp, F0900_SELX1RATIO) == 1) ? 4 : 6);
m_div = ((clk_sel * mclk) / intp->quartz) - 1;
stv0900_write_bits(intp, F0900_M_DIV, m_div);
/* ADV7604 system clock frequency */
#define ADV7604_fsc (28636360)
-#define DIGITAL_INPUT ((state->prim_mode == ADV7604_PRIM_MODE_HDMI_COMP) || \
- (state->prim_mode == ADV7604_PRIM_MODE_HDMI_GR))
+#define DIGITAL_INPUT (state->mode == ADV7604_MODE_HDMI)
/*
**********************************************************************
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
- enum adv7604_prim_mode prim_mode;
+ enum adv7604_mode mode;
struct v4l2_dv_timings timings;
u8 edid[256];
unsigned edid_blocks;
struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
bool connector_hdmi;
+ bool restart_stdi_once;
/* i2c clients */
struct i2c_client *i2c_avlink;
V4L2_DV_BT_CEA_720X576P50,
V4L2_DV_BT_CEA_1280X720P24,
V4L2_DV_BT_CEA_1280X720P25,
- V4L2_DV_BT_CEA_1280X720P30,
V4L2_DV_BT_CEA_1280X720P50,
V4L2_DV_BT_CEA_1280X720P60,
V4L2_DV_BT_CEA_1920X1080P24,
V4L2_DV_BT_CEA_1920X1080P50,
V4L2_DV_BT_CEA_1920X1080P60,
+ /* sorted by DMT ID */
V4L2_DV_BT_DMT_640X350P85,
V4L2_DV_BT_DMT_640X400P85,
V4L2_DV_BT_DMT_720X400P85,
{ },
};
+struct adv7604_video_standards {
+ struct v4l2_dv_timings timings;
+ u8 vid_std;
+ u8 v_freq;
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_comp[] = {
+ /* { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 }, TODO flickering */
+ { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x19, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x19, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x1e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x1e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x1e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x1e, 0x00 },
+ /* TODO add 1920x1080P60_RB (CVT timing) */
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_gr[] = {
+ { V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x0b, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P56, 0x00, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x01, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x02, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x03, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x04, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P60, 0x0c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x0d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x0e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x0f, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x05, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x06, 0x00 },
+ { V4L2_DV_BT_DMT_1360X768P60, 0x12, 0x00 },
+ { V4L2_DV_BT_DMT_1366X768P60, 0x13, 0x00 },
+ { V4L2_DV_BT_DMT_1400X1050P60, 0x14, 0x00 },
+ { V4L2_DV_BT_DMT_1400X1050P75, 0x15, 0x00 },
+ { V4L2_DV_BT_DMT_1600X1200P60, 0x16, 0x00 }, /* TODO not tested */
+ /* TODO add 1600X1200P60_RB (not a DMT timing) */
+ { V4L2_DV_BT_DMT_1680X1050P60, 0x18, 0x00 },
+ { V4L2_DV_BT_DMT_1920X1200P60_RB, 0x19, 0x00 }, /* TODO not tested */
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_hdmi_comp[] = {
+ { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 },
+ { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x13, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x13, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x1e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x1e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x1e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x1e, 0x00 },
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_hdmi_gr[] = {
+ { V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x0b, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P56, 0x00, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x01, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x02, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x03, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x04, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P60, 0x0c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x0d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x0e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x0f, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x05, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x06, 0x00 },
+ { },
+};
+
/* ----------------------------------------------------------------------- */
static inline struct adv7604_state *to_state(struct v4l2_subdev *sd)
((io_read(sd, 0x6f) & 0x10) >> 4));
}
-static void configure_free_run(struct v4l2_subdev *sd, const struct v4l2_bt_timings *timings)
+static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
+ u8 prim_mode,
+ const struct adv7604_video_standards *predef_vid_timings,
+ const struct v4l2_dv_timings *timings)
+{
+ struct adv7604_state *state = to_state(sd);
+ int i;
+
+ for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
+ if (!v4l_match_dv_timings(timings, &predef_vid_timings[i].timings,
+ DIGITAL_INPUT ? 250000 : 1000000))
+ continue;
+ io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */
+ io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) +
+ prim_mode); /* v_freq and prim mode */
+ return 0;
+ }
+
+ return -1;
+}
+
+static int configure_predefined_video_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
{
+ struct adv7604_state *state = to_state(sd);
+ int err;
+
+ v4l2_dbg(1, debug, sd, "%s", __func__);
+
+ /* reset to default values */
+ io_write(sd, 0x16, 0x43);
+ io_write(sd, 0x17, 0x5a);
+ /* disable embedded syncs for auto graphics mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x00);
+ cp_write(sd, 0x8f, 0x00);
+ cp_write(sd, 0x90, 0x00);
+ cp_write(sd, 0xa2, 0x00);
+ cp_write(sd, 0xa3, 0x00);
+ cp_write(sd, 0xa4, 0x00);
+ cp_write(sd, 0xa5, 0x00);
+ cp_write(sd, 0xa6, 0x00);
+ cp_write(sd, 0xa7, 0x00);
+ cp_write(sd, 0xab, 0x00);
+ cp_write(sd, 0xac, 0x00);
+
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
+ err = find_and_set_predefined_video_timings(sd,
+ 0x01, adv7604_prim_mode_comp, timings);
+ if (err)
+ err = find_and_set_predefined_video_timings(sd,
+ 0x02, adv7604_prim_mode_gr, timings);
+ break;
+ case ADV7604_MODE_HDMI:
+ err = find_and_set_predefined_video_timings(sd,
+ 0x05, adv7604_prim_mode_hdmi_comp, timings);
+ if (err)
+ err = find_and_set_predefined_video_timings(sd,
+ 0x06, adv7604_prim_mode_hdmi_gr, timings);
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ err = -1;
+ break;
+ }
+
+
+ return err;
+}
+
+static void configure_custom_video_timings(struct v4l2_subdev *sd,
+ const struct v4l2_bt_timings *bt)
+{
+ struct adv7604_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- u32 width = htotal(timings);
- u32 height = vtotal(timings);
- u16 ch1_fr_ll = (((u32)timings->pixelclock / 100) > 0) ?
- ((width * (ADV7604_fsc / 100)) / ((u32)timings->pixelclock / 100)) : 0;
+ u32 width = htotal(bt);
+ u32 height = vtotal(bt);
+ u16 cp_start_sav = bt->hsync + bt->hbackporch - 4;
+ u16 cp_start_eav = width - bt->hfrontporch;
+ u16 cp_start_vbi = height - bt->vfrontporch;
+ u16 cp_end_vbi = bt->vsync + bt->vbackporch;
+ u16 ch1_fr_ll = (((u32)bt->pixelclock / 100) > 0) ?
+ ((width * (ADV7604_fsc / 100)) / ((u32)bt->pixelclock / 100)) : 0;
+ const u8 pll[2] = {
+ 0xc0 | ((width >> 8) & 0x1f),
+ width & 0xff
+ };
v4l2_dbg(2, debug, sd, "%s\n", __func__);
- cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7); /* CH1_FR_LL */
- cp_write(sd, 0x90, ch1_fr_ll & 0xff); /* CH1_FR_LL */
- cp_write(sd, 0xab, (height >> 4) & 0xff); /* CP_LCOUNT_MAX */
- cp_write(sd, 0xac, (height & 0x0f) << 4); /* CP_LCOUNT_MAX */
- /* TODO support interlaced */
- cp_write(sd, 0x91, 0x10); /* INTERLACED */
-
- /* Should only be set in auto-graphics mode [REF_02 p. 91-92] */
- if ((io_read(sd, 0x00) == 0x07) && (io_read(sd, 0x01) == 0x02)) {
- u16 cp_start_sav, cp_start_eav, cp_start_vbi, cp_end_vbi;
- const u8 pll[2] = {
- (0xc0 | ((width >> 8) & 0x1f)),
- (width & 0xff)
- };
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
+ /* auto graphics */
+ io_write(sd, 0x00, 0x07); /* video std */
+ io_write(sd, 0x01, 0x02); /* prim mode */
+ /* enable embedded syncs for auto graphics mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x10);
+ /* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
/* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
/* IO-map reg. 0x16 and 0x17 should be written in sequence */
if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll)) {
v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
- return;
+ break;
}
/* active video - horizontal timing */
- cp_start_sav = timings->hsync + timings->hbackporch - 4;
- cp_start_eav = width - timings->hfrontporch;
cp_write(sd, 0xa2, (cp_start_sav >> 4) & 0xff);
- cp_write(sd, 0xa3, ((cp_start_sav & 0x0f) << 4) | ((cp_start_eav >> 8) & 0x0f));
+ cp_write(sd, 0xa3, ((cp_start_sav & 0x0f) << 4) |
+ ((cp_start_eav >> 8) & 0x0f));
cp_write(sd, 0xa4, cp_start_eav & 0xff);
/* active video - vertical timing */
- cp_start_vbi = height - timings->vfrontporch;
- cp_end_vbi = timings->vsync + timings->vbackporch;
cp_write(sd, 0xa5, (cp_start_vbi >> 4) & 0xff);
- cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) | ((cp_end_vbi >> 8) & 0xf));
+ cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) |
+ ((cp_end_vbi >> 8) & 0xf));
cp_write(sd, 0xa7, cp_end_vbi & 0xff);
- } else {
- /* reset to default values */
- io_write(sd, 0x16, 0x43);
- io_write(sd, 0x17, 0x5a);
- cp_write(sd, 0xa2, 0x00);
- cp_write(sd, 0xa3, 0x00);
- cp_write(sd, 0xa4, 0x00);
- cp_write(sd, 0xa5, 0x00);
- cp_write(sd, 0xa6, 0x00);
- cp_write(sd, 0xa7, 0x00);
+ break;
+ case ADV7604_MODE_HDMI:
+ /* set default prim_mode/vid_std for HDMI
+ accoring to [REF_03, c. 4.2] */
+ io_write(sd, 0x00, 0x02); /* video std */
+ io_write(sd, 0x01, 0x06); /* prim mode */
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
}
-}
+ cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7);
+ cp_write(sd, 0x90, ch1_fr_ll & 0xff);
+ cp_write(sd, 0xab, (height >> 4) & 0xff);
+ cp_write(sd, 0xac, (height & 0x0f) << 4);
+}
static void set_rgb_quantization_range(struct v4l2_subdev *sd)
{
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
/* automatic */
- if ((hdmi_read(sd, 0x05) & 0x80) ||
- (state->prim_mode == ADV7604_PRIM_MODE_COMP) ||
- (state->prim_mode == ADV7604_PRIM_MODE_RGB)) {
- /* receiving HDMI or analog signal */
- io_write_and_or(sd, 0x02, 0x0f, 0xf0);
- } else {
+ if (DIGITAL_INPUT && !(hdmi_read(sd, 0x05) & 0x80)) {
/* receiving DVI-D signal */
/* ADV7604 selects RGB limited range regardless of
/* RGB full range (0-255) */
io_write_and_or(sd, 0x02, 0x0f, 0x10);
}
+ } else {
+ /* receiving HDMI or analog signal, set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
}
break;
case V4L2_DV_RGB_RANGE_LIMITED:
state->aspect_ratio, timings))
return 0;
- v4l2_dbg(2, debug, sd, "%s: No format candidate found for lcf=%d, bl = %d\n",
- __func__, stdi->lcf, stdi->bl);
+ v4l2_dbg(2, debug, sd,
+ "%s: No format candidate found for lcvs = %d, lcf=%d, bl = %d, %chsync, %cvsync\n",
+ __func__, stdi->lcvs, stdi->lcf, stdi->bl,
+ stdi->hs_pol, stdi->vs_pol);
return -1;
}
adv7604_fill_optional_dv_timings_fields(sd, timings);
} else {
/* find format
- * Since LCVS values are inaccurate (REF_03, page 275-276),
+ * Since LCVS values are inaccurate [REF_03, p. 275-276],
* stdi2dv_timings() is called with lcvs +-1 if the first attempt fails.
*/
if (!stdi2dv_timings(sd, &stdi, timings))
stdi.lcvs -= 2;
v4l2_dbg(1, debug, sd, "%s: lcvs - 1 = %d\n", __func__, stdi.lcvs);
if (stdi2dv_timings(sd, &stdi, timings)) {
+ /*
+ * The STDI block may measure wrong values, especially
+ * for lcvs and lcf. If the driver can not find any
+ * valid timing, the STDI block is restarted to measure
+ * the video timings again. The function will return an
+ * error, but the restart of STDI will generate a new
+ * STDI interrupt and the format detection process will
+ * restart.
+ */
+ if (state->restart_stdi_once) {
+ v4l2_dbg(1, debug, sd, "%s: restart STDI\n", __func__);
+ /* TODO restart STDI for Sync Channel 2 */
+ /* enter one-shot mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x00);
+ /* trigger STDI restart */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x04);
+ /* reset to continuous mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x02);
+ state->restart_stdi_once = false;
+ return -ENOLINK;
+ }
v4l2_dbg(1, debug, sd, "%s: format not supported\n", __func__);
return -ERANGE;
}
+ state->restart_stdi_once = true;
}
found:
{
struct adv7604_state *state = to_state(sd);
struct v4l2_bt_timings *bt;
+ int err;
if (!timings)
return -EINVAL;
__func__, (u32)bt->pixelclock);
return -ERANGE;
}
+
adv7604_fill_optional_dv_timings_fields(sd, timings);
state->timings = *timings;
- /* freerun */
- configure_free_run(sd, bt);
+ cp_write(sd, 0x91, bt->interlaced ? 0x50 : 0x10);
+
+ /* Use prim_mode and vid_std when available */
+ err = configure_predefined_video_timings(sd, timings);
+ if (err) {
+ /* custom settings when the video format
+ does not have prim_mode/vid_std */
+ configure_custom_video_timings(sd, bt);
+ }
set_rgb_quantization_range(sd);
return 0;
}
-static void enable_input(struct v4l2_subdev *sd, enum adv7604_prim_mode prim_mode)
+static void enable_input(struct v4l2_subdev *sd)
{
- switch (prim_mode) {
- case ADV7604_PRIM_MODE_COMP:
- case ADV7604_PRIM_MODE_RGB:
+ struct adv7604_state *state = to_state(sd);
+
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
/* enable */
io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
break;
- case ADV7604_PRIM_MODE_HDMI_COMP:
- case ADV7604_PRIM_MODE_HDMI_GR:
+ case ADV7604_MODE_HDMI:
/* enable */
hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
io_write(sd, 0x15, 0xa0); /* Disable Tristate of Pins */
break;
default:
- v4l2_err(sd, "%s: reserved primary mode 0x%0x\n",
- __func__, prim_mode);
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
break;
}
}
hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
}
-static void select_input(struct v4l2_subdev *sd, enum adv7604_prim_mode prim_mode)
+static void select_input(struct v4l2_subdev *sd)
{
- switch (prim_mode) {
- case ADV7604_PRIM_MODE_COMP:
- case ADV7604_PRIM_MODE_RGB:
- /* set mode and select free run resolution */
- io_write(sd, 0x00, 0x07); /* video std */
- io_write(sd, 0x01, 0x02); /* prim mode */
- /* enable embedded syncs for auto graphics mode */
- cp_write_and_or(sd, 0x81, 0xef, 0x10);
+ struct adv7604_state *state = to_state(sd);
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
/* reset ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
hdmi_write(sd, 0x0d, 0x04); /* HDMI filter optimization */
cp_write(sd, 0x40, 0x5c); /* CP core pre-gain control. Graphics mode */
break;
- case ADV7604_PRIM_MODE_HDMI_COMP:
- case ADV7604_PRIM_MODE_HDMI_GR:
- /* set mode and select free run resolution */
- /* video std */
- io_write(sd, 0x00,
- (prim_mode == ADV7604_PRIM_MODE_HDMI_GR) ? 0x02 : 0x1e);
- io_write(sd, 0x01, prim_mode); /* prim mode */
- /* disable embedded syncs for auto graphics mode */
- cp_write_and_or(sd, 0x81, 0xef, 0x00);
-
+ case ADV7604_MODE_HDMI:
/* set ADI recommended settings for HDMI: */
/* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
hdmi_write(sd, 0x0d, 0x84); /* HDMI filter optimization */
break;
default:
- v4l2_err(sd, "%s: reserved primary mode 0x%0x\n", __func__, prim_mode);
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
break;
}
}
v4l2_dbg(2, debug, sd, "%s: input %d", __func__, input);
- switch (input) {
- case 0:
- /* TODO select HDMI_COMP or HDMI_GR */
- state->prim_mode = ADV7604_PRIM_MODE_HDMI_COMP;
- break;
- case 1:
- state->prim_mode = ADV7604_PRIM_MODE_RGB;
- break;
- case 2:
- state->prim_mode = ADV7604_PRIM_MODE_COMP;
- break;
- default:
- return -EINVAL;
- }
+ state->mode = input;
disable_input(sd);
- select_input(sd, state->prim_mode);
+ select_input(sd);
- enable_input(sd, state->prim_mode);
+ enable_input(sd);
return 0;
}
v4l2_info(sd, "CP locked: %s\n", no_lock_cp(sd) ? "false" : "true");
v4l2_info(sd, "CP free run: %s\n",
(!!(cp_read(sd, 0xff) & 0x10) ? "on" : "off"));
- v4l2_info(sd, "Prim-mode = 0x%x, video std = 0x%x\n",
- io_read(sd, 0x01) & 0x0f, io_read(sd, 0x00) & 0x3f);
+ v4l2_info(sd, "Prim-mode = 0x%x, video std = 0x%x, v_freq = 0x%x\n",
+ io_read(sd, 0x01) & 0x0f, io_read(sd, 0x00) & 0x3f,
+ (io_read(sd, 0x01) & 0x70) >> 4);
v4l2_info(sd, "-----Video Timings-----\n");
if (read_stdi(sd, &stdi))
cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01); /* HDMI free run */
cp_write(sd, 0xf3, 0xdc); /* Low threshold to enter/exit free run mode */
cp_write(sd, 0xf9, 0x23); /* STDI ch. 1 - LCVS change threshold -
- ADI recommended setting [REF_01 c. 2.3.3] */
+ ADI recommended setting [REF_01, c. 2.3.3] */
cp_write(sd, 0x45, 0x23); /* STDI ch. 2 - LCVS change threshold -
- ADI recommended setting [REF_01 c. 2.3.3] */
+ ADI recommended setting [REF_01, c. 2.3.3] */
cp_write(sd, 0xc9, 0x2d); /* use prim_mode and vid_std as free run resolution
for digital formats */
afe_write(sd, 0x02, pdata->ain_sel); /* Select analog input muxing mode */
io_write_and_or(sd, 0x30, ~(1 << 4), pdata->output_bus_lsb_to_msb << 4);
- state->prim_mode = pdata->prim_mode;
- select_input(sd, pdata->prim_mode);
-
- enable_input(sd, pdata->prim_mode);
-
/* interrupts */
io_write(sd, 0x40, 0xc2); /* Configure INT1 */
io_write(sd, 0x41, 0xd7); /* STDI irq for any change, disable INT2 */
v4l2_err(sd, "failed to create all i2c clients\n");
goto err_i2c;
}
+ state->restart_stdi_once = true;
/* work queues */
state->work_queues = create_singlethread_workqueue(client->name);
if (ret & 1) /* Autoexposure */
ret = reg_write(client, mt9v022->reg->max_total_shutter_width,
rect.height + mt9v022->y_skip_top + 43);
- else
- ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
- rect.height + mt9v022->y_skip_top + 43);
+ /*
+ * If autoexposure is off, there is no need to set
+ * MT9V022_TOTAL_SHUTTER_WIDTH here. Autoexposure can be off
+ * only if the user has set exposure manually, using the
+ * V4L2_CID_EXPOSURE_AUTO with the value V4L2_EXPOSURE_MANUAL.
+ * In this case the register MT9V022_TOTAL_SHUTTER_WIDTH
+ * already contains the correct value.
+ */
}
/* Setup frame format: defaults apart from width and height */
if (!ret)
MODULE_DEVICE_TABLE(platform, gsc_driver_ids);
static const struct of_device_id exynos_gsc_match[] = {
- { .compatible = "samsung,exynos5250-gsc",
- .data = &gsc_v_100_drvdata, },
+ {
+ .compatible = "samsung,exynos5-gsc",
+ .data = &gsc_v_100_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
}
static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
+ struct v4l2_event_subscription *sub)
{
if (sub->type != V4L2_EVENT_FRAME_SYNC)
return -EINVAL;
}
static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
+ struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
+ struct v4l2_event_subscription *sub)
{
struct ispstat *stat = v4l2_get_subdevdata(subdev);
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
+ struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
void omap3isp_stat_cleanup(struct ispstat *stat);
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub);
+ struct v4l2_event_subscription *sub);
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub);
+ struct v4l2_event_subscription *sub);
int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable);
int omap3isp_stat_busy(struct ispstat *stat);
}
static int
-isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
{
struct isp_video *video = video_drvdata(file);
struct v4l2_subdev *subdev;
config VIDEO_S5P_MIPI_CSIS
tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
depends on REGULATOR
+ select S5P_SETUP_MIPIPHY
help
This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
receiver (MIPI-CSIS) devices.
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct fimc_vid_buffer);
- vb2_queue_init(q);
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_ent;
vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
q->buf_struct_size = sizeof(struct flite_buffer);
q->drv_priv = fimc;
- vb2_queue_init(q);
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ return ret;
fimc->vd_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_init(&vfd->entity, 1, &fimc->vd_pad, 0);
static int fimc_register_callback(struct device *dev, void *p)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
- struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+ struct v4l2_subdev *sd;
struct fimc_md *fmd = p;
- int ret = 0;
-
- if (!fimc || !fimc->pdev)
- return 0;
+ int ret;
- if (fimc->pdev->id < 0 || fimc->pdev->id >= FIMC_MAX_DEVS)
+ if (fimc == NULL || fimc->id >= FIMC_MAX_DEVS)
return 0;
- fimc->pipeline_ops = &fimc_pipeline_ops;
- fmd->fimc[fimc->pdev->id] = fimc;
+ sd = &fimc->vid_cap.subdev;
sd->grp_id = FIMC_GROUP_ID;
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (ret) {
v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
fimc->id, ret);
+ return ret;
}
- return ret;
+ fimc->pipeline_ops = &fimc_pipeline_ops;
+ fmd->fimc[fimc->id] = fimc;
+ return 0;
}
static int fimc_lite_register_callback(struct device *dev, void *p)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
- struct v4l2_subdev *sd = &fimc->subdev;
struct fimc_md *fmd = p;
int ret;
- if (fimc == NULL)
+ if (fimc == NULL || fimc->index >= FIMC_LITE_MAX_DEVS)
return 0;
- if (fimc->index >= FIMC_LITE_MAX_DEVS)
- return 0;
+ fimc->subdev.grp_id = FLITE_GROUP_ID;
- fimc->pipeline_ops = &fimc_pipeline_ops;
- fmd->fimc_lite[fimc->index] = fimc;
- sd->grp_id = FLITE_GROUP_ID;
-
- ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, &fimc->subdev);
if (ret) {
v4l2_err(&fmd->v4l2_dev,
"Failed to register FIMC-LITE.%d (%d)\n",
fimc->index, ret);
+ return ret;
}
- return ret;
+
+ fimc->pipeline_ops = &fimc_pipeline_ops;
+ fmd->fimc_lite[fimc->index] = fimc;
+ return 0;
}
static int csis_register_callback(struct device *dev, void *p)
v4l2_info(sd, "csis%d sd: %s\n", pdev->id, sd->name);
id = pdev->id < 0 ? 0 : pdev->id;
- fmd->csis[id].sd = sd;
sd->grp_id = CSIS_GROUP_ID;
+
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
- if (ret)
+ if (!ret)
+ fmd->csis[id].sd = sd;
+ else
v4l2_err(&fmd->v4l2_dev,
"Failed to register CSIS subdevice: %d\n", ret);
return ret;
/* Assume a dull encoder, do all the work ourselves. */
static int sh_vou_s_crop(struct file *file, void *fh, const struct v4l2_crop *a)
{
+ struct v4l2_crop a_writable = *a;
struct video_device *vdev = video_devdata(file);
struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
- struct v4l2_rect *rect = &a->c;
+ struct v4l2_rect *rect = &a_writable.c;
struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
struct v4l2_pix_format *pix = &vou_dev->pix;
struct sh_vou_geometry geo;
pcdev->icd = NULL;
}
-static int mx1_camera_set_crop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-
- return v4l2_subdev_call(sd, video, s_crop, a);
-}
-
static int mx1_camera_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
.add = mx1_camera_add_device,
.remove = mx1_camera_remove_device,
.set_bus_param = mx1_camera_set_bus_param,
- .set_crop = mx1_camera_set_crop,
.set_fmt = mx1_camera_set_fmt,
.try_fmt = mx1_camera_try_fmt,
.init_videobuf = mx1_camera_init_videobuf,
bytesperline = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- if (bytesperline < 0)
+ if (bytesperline < 0) {
+ spin_unlock_irqrestore(&pcdev->lock, flags);
return bytesperline;
+ }
/*
* I didn't manage to properly enable/disable the prp
pcdev->discard_buffer = dma_alloc_coherent(ici->v4l2_dev.dev,
pcdev->discard_size, &pcdev->discard_buffer_dma,
GFP_KERNEL);
- if (!pcdev->discard_buffer)
+ if (!pcdev->discard_buffer) {
+ spin_unlock_irqrestore(&pcdev->lock, flags);
return -ENOMEM;
+ }
pcdev->buf_discard[0].discard = true;
list_add_tail(&pcdev->buf_discard[0].queue,
}
static int mx2_camera_set_crop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
+ const struct v4l2_crop *a)
{
- struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop a_writable = *a;
+ struct v4l2_rect *rect = &a_writable.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
int ret;
* default g_crop and cropcap from soc_camera.c
*/
static int mx3_camera_set_crop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
+ const struct v4l2_crop *a)
{
- struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop a_writable = *a;
+ struct v4l2_rect *rect = &a_writable.c;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
}
static int omap1_cam_set_crop(struct soc_camera_device *icd,
- struct v4l2_crop *crop)
+ const struct v4l2_crop *crop)
{
- struct v4l2_rect *rect = &crop->c;
+ const struct v4l2_rect *rect = &crop->c;
const struct soc_camera_format_xlate *xlate = icd->current_fmt;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
}
static int pxa_camera_set_crop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
+ const struct v4l2_crop *a)
{
- struct v4l2_rect *rect = &a->c;
+ const struct v4l2_rect *rect = &a->c;
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
}
/* Check if any dimension of r1 is smaller than respective one of r2 */
-static bool is_smaller(struct v4l2_rect *r1, struct v4l2_rect *r2)
+static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
{
return r1->width < r2->width || r1->height < r2->height;
}
/* Check if r1 fails to cover r2 */
-static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2)
+static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
{
return r1->left > r2->left || r1->top > r2->top ||
r1->left + r1->width < r2->left + r2->width ||
* 3. if (2) failed, try to request the maximum image
*/
static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop,
- const struct v4l2_crop *cam_crop)
+ struct v4l2_crop *cam_crop)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
const struct v4l2_crop *a)
{
- struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop a_writable = *a;
+ const struct v4l2_rect *rect = &a_writable.c;
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
* 1. - 2. Apply iterative camera S_CROP for new input window, read back
* actual camera rectangle.
*/
- ret = client_s_crop(icd, a, &cam_crop);
+ ret = client_s_crop(icd, &a_writable, &cam_crop);
if (ret < 0)
return ret;
}
static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
+ const struct v4l2_crop *a)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
/* activate the pid on the device pid filter */
if (adap->props->caps & DVB_USB_ADAP_HAS_PID_FILTER &&
- adap->pid_filtering &&
- adap->props->pid_filter)
+ adap->pid_filtering && adap->props->pid_filter) {
ret = adap->props->pid_filter(adap, dvbdmxfeed->index,
dvbdmxfeed->pid, (count == 1) ? 1 : 0);
- if (ret < 0)
- dev_err(&d->udev->dev, "%s: pid_filter() " \
- "failed=%d\n", KBUILD_MODNAME,
- ret);
+ if (ret < 0)
+ dev_err(&d->udev->dev, "%s: pid_filter() failed=%d\n",
+ KBUILD_MODNAME, ret);
+ }
/* start feeding if it is first pid */
if (adap->feed_count == 1 && count == 1) {
return -EINVAL;
}
- ret = mutex_lock_interruptible(&d->usb_mutex);
- if (ret < 0)
- return ret;
+ mutex_lock(&d->usb_mutex);
dev_dbg(&d->udev->dev, "%s: >>> %*ph\n", __func__, wlen, wbuf);
&rtl2832u_props, "DigitalNow Quad DVB-T Receiver", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d3,
&rtl2832u_props, "TerraTec Cinergy T Stick RC (Rev. 3)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1102,
+ &rtl2832u_props, "Dexatek DK mini DVB-T Dongle", NULL) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00d7,
+ &rtl2832u_props, "TerraTec Cinergy T Stick+", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
if (val & ARIZONA_AIF3_UNDERCLOCKED_STS)
dev_err(arizona->dev, "AIF3 underclocked\n");
- if (val & ARIZONA_AIF3_UNDERCLOCKED_STS)
- dev_err(arizona->dev, "AIF3 underclocked\n");
if (val & ARIZONA_AIF2_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF2 underclocked\n");
+ if (val & ARIZONA_AIF1_UNDERCLOCKED_STS)
dev_err(arizona->dev, "AIF1 underclocked\n");
if (val & ARIZONA_ISRC2_UNDERCLOCKED_STS)
dev_err(arizona->dev, "ISRC2 underclocked\n");
/* If we have a /RESET GPIO we'll already be reset */
if (!arizona->pdata.reset) {
+ regcache_mark_dirty(arizona->regmap);
+
ret = regmap_write(arizona->regmap, ARIZONA_SOFTWARE_RESET, 0);
if (ret != 0) {
dev_err(dev, "Failed to reset device: %d\n", ret);
goto err_reset;
}
+
+ ret = regcache_sync(arizona->regmap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to sync device: %d\n", ret);
+ goto err_reset;
+ }
}
ret = arizona_wait_for_boot(arizona);
break;
case WM5110:
ret = mfd_add_devices(arizona->dev, -1, wm5110_devs,
- ARRAY_SIZE(wm5102_devs), NULL, 0, NULL);
+ ARRAY_SIZE(wm5110_devs), NULL, 0, NULL);
break;
}
switch (arizona->rev) {
case 0:
+ case 1:
ctrlif_error = false;
break;
default:
}
if (IS_ENABLED(CONFIG_PWM_TWL6030) && twl_class_is_6030()) {
- child = add_child(TWL6030_MODULE_ID1, "twl6030-pwm", NULL, 0,
+ child = add_child(SUB_CHIP_ID1, "twl6030-pwm", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
- status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+ status = request_threaded_irq(irq, NULL, handle_twl4030_sih,
+ IRQF_EARLY_RESUME,
agent->irq_name ?: sih->name, NULL);
dev_info(dev, "%s (irq %d) chaining IRQs %d..%d\n", sih->name,
{ 0x479, 0x0A30 },
{ 0x47B, 0x0810 },
{ 0x47D, 0x0510 },
+ { 0x4D1, 0x017F },
{ 0x500, 0x000D },
{ 0x507, 0x1820 },
{ 0x508, 0x1820 },
{ 0x580, 0x000D },
{ 0x587, 0x1820 },
{ 0x588, 0x1820 },
- { 0x101, 0x8140 },
- { 0x3000, 0x2225 },
- { 0x3001, 0x3a03 },
- { 0x3002, 0x0225 },
- { 0x3003, 0x0801 },
- { 0x3004, 0x6249 },
- { 0x3005, 0x0c04 },
- { 0x3006, 0x0225 },
- { 0x3007, 0x5901 },
- { 0x3008, 0xe249 },
- { 0x3009, 0x030d },
- { 0x300a, 0x0249 },
- { 0x300b, 0x2c01 },
- { 0x300c, 0xe249 },
- { 0x300d, 0x4342 },
- { 0x300e, 0xe249 },
- { 0x300f, 0x73c0 },
- { 0x3010, 0x4249 },
- { 0x3011, 0x0c00 },
- { 0x3012, 0x0225 },
- { 0x3013, 0x1f01 },
- { 0x3014, 0x0225 },
- { 0x3015, 0x1e01 },
- { 0x3016, 0x0225 },
- { 0x3017, 0xfa00 },
- { 0x3018, 0x0000 },
- { 0x3019, 0xf000 },
- { 0x301a, 0x0000 },
- { 0x301b, 0xf000 },
- { 0x301c, 0x0000 },
- { 0x301d, 0xf000 },
- { 0x301e, 0x0000 },
- { 0x301f, 0xf000 },
- { 0x3020, 0x0000 },
- { 0x3021, 0xf000 },
- { 0x3022, 0x0000 },
- { 0x3023, 0xf000 },
- { 0x3024, 0x0000 },
- { 0x3025, 0xf000 },
- { 0x3026, 0x0000 },
- { 0x3027, 0xf000 },
- { 0x3028, 0x0000 },
- { 0x3029, 0xf000 },
- { 0x302a, 0x0000 },
- { 0x302b, 0xf000 },
- { 0x302c, 0x0000 },
- { 0x302d, 0xf000 },
- { 0x302e, 0x0000 },
- { 0x302f, 0xf000 },
- { 0x3030, 0x0225 },
- { 0x3031, 0x1a01 },
- { 0x3032, 0x0225 },
- { 0x3033, 0x1e00 },
- { 0x3034, 0x0225 },
- { 0x3035, 0x1f00 },
- { 0x3036, 0x6225 },
- { 0x3037, 0xf800 },
- { 0x3038, 0x0000 },
- { 0x3039, 0xf000 },
- { 0x303a, 0x0000 },
- { 0x303b, 0xf000 },
- { 0x303c, 0x0000 },
- { 0x303d, 0xf000 },
- { 0x303e, 0x0000 },
- { 0x303f, 0xf000 },
- { 0x3040, 0x2226 },
- { 0x3041, 0x3a03 },
- { 0x3042, 0x0226 },
- { 0x3043, 0x0801 },
- { 0x3044, 0x6249 },
- { 0x3045, 0x0c06 },
- { 0x3046, 0x0226 },
- { 0x3047, 0x5901 },
- { 0x3048, 0xe249 },
- { 0x3049, 0x030d },
- { 0x304a, 0x0249 },
- { 0x304b, 0x2c01 },
- { 0x304c, 0xe249 },
- { 0x304d, 0x4342 },
- { 0x304e, 0xe249 },
- { 0x304f, 0x73c0 },
- { 0x3050, 0x4249 },
- { 0x3051, 0x0c00 },
- { 0x3052, 0x0226 },
- { 0x3053, 0x1f01 },
- { 0x3054, 0x0226 },
- { 0x3055, 0x1e01 },
- { 0x3056, 0x0226 },
- { 0x3057, 0xfa00 },
- { 0x3058, 0x0000 },
- { 0x3059, 0xf000 },
- { 0x305a, 0x0000 },
- { 0x305b, 0xf000 },
- { 0x305c, 0x0000 },
- { 0x305d, 0xf000 },
- { 0x305e, 0x0000 },
- { 0x305f, 0xf000 },
- { 0x3060, 0x0000 },
- { 0x3061, 0xf000 },
- { 0x3062, 0x0000 },
- { 0x3063, 0xf000 },
- { 0x3064, 0x0000 },
- { 0x3065, 0xf000 },
- { 0x3066, 0x0000 },
- { 0x3067, 0xf000 },
- { 0x3068, 0x0000 },
- { 0x3069, 0xf000 },
- { 0x306a, 0x0000 },
- { 0x306b, 0xf000 },
- { 0x306c, 0x0000 },
- { 0x306d, 0xf000 },
- { 0x306e, 0x0000 },
- { 0x306f, 0xf000 },
- { 0x3070, 0x0226 },
- { 0x3071, 0x1a01 },
- { 0x3072, 0x0226 },
- { 0x3073, 0x1e00 },
- { 0x3074, 0x0226 },
- { 0x3075, 0x1f00 },
- { 0x3076, 0x6226 },
- { 0x3077, 0xf800 },
- { 0x3078, 0x0000 },
- { 0x3079, 0xf000 },
- { 0x307a, 0x0000 },
- { 0x307b, 0xf000 },
- { 0x307c, 0x0000 },
- { 0x307d, 0xf000 },
- { 0x307e, 0x0000 },
- { 0x307f, 0xf000 },
- { 0x3080, 0x2227 },
- { 0x3081, 0x3a03 },
- { 0x3082, 0x0227 },
- { 0x3083, 0x0801 },
- { 0x3084, 0x6255 },
- { 0x3085, 0x0c04 },
- { 0x3086, 0x0227 },
- { 0x3087, 0x5901 },
- { 0x3088, 0xe255 },
- { 0x3089, 0x030d },
- { 0x308a, 0x0255 },
- { 0x308b, 0x2c01 },
- { 0x308c, 0xe255 },
- { 0x308d, 0x4342 },
- { 0x308e, 0xe255 },
- { 0x308f, 0x73c0 },
- { 0x3090, 0x4255 },
- { 0x3091, 0x0c00 },
- { 0x3092, 0x0227 },
- { 0x3093, 0x1f01 },
- { 0x3094, 0x0227 },
- { 0x3095, 0x1e01 },
- { 0x3096, 0x0227 },
- { 0x3097, 0xfa00 },
- { 0x3098, 0x0000 },
- { 0x3099, 0xf000 },
- { 0x309a, 0x0000 },
- { 0x309b, 0xf000 },
- { 0x309c, 0x0000 },
- { 0x309d, 0xf000 },
- { 0x309e, 0x0000 },
- { 0x309f, 0xf000 },
- { 0x30a0, 0x0000 },
- { 0x30a1, 0xf000 },
- { 0x30a2, 0x0000 },
- { 0x30a3, 0xf000 },
- { 0x30a4, 0x0000 },
- { 0x30a5, 0xf000 },
- { 0x30a6, 0x0000 },
- { 0x30a7, 0xf000 },
- { 0x30a8, 0x0000 },
- { 0x30a9, 0xf000 },
- { 0x30aa, 0x0000 },
- { 0x30ab, 0xf000 },
- { 0x30ac, 0x0000 },
- { 0x30ad, 0xf000 },
- { 0x30ae, 0x0000 },
- { 0x30af, 0xf000 },
- { 0x30b0, 0x0227 },
- { 0x30b1, 0x1a01 },
- { 0x30b2, 0x0227 },
- { 0x30b3, 0x1e00 },
- { 0x30b4, 0x0227 },
- { 0x30b5, 0x1f00 },
- { 0x30b6, 0x6227 },
- { 0x30b7, 0xf800 },
- { 0x30b8, 0x0000 },
- { 0x30b9, 0xf000 },
- { 0x30ba, 0x0000 },
- { 0x30bb, 0xf000 },
- { 0x30bc, 0x0000 },
- { 0x30bd, 0xf000 },
- { 0x30be, 0x0000 },
- { 0x30bf, 0xf000 },
- { 0x30c0, 0x2228 },
- { 0x30c1, 0x3a03 },
- { 0x30c2, 0x0228 },
- { 0x30c3, 0x0801 },
- { 0x30c4, 0x6255 },
- { 0x30c5, 0x0c06 },
- { 0x30c6, 0x0228 },
- { 0x30c7, 0x5901 },
- { 0x30c8, 0xe255 },
- { 0x30c9, 0x030d },
- { 0x30ca, 0x0255 },
- { 0x30cb, 0x2c01 },
- { 0x30cc, 0xe255 },
- { 0x30cd, 0x4342 },
- { 0x30ce, 0xe255 },
- { 0x30cf, 0x73c0 },
- { 0x30d0, 0x4255 },
- { 0x30d1, 0x0c00 },
- { 0x30d2, 0x0228 },
- { 0x30d3, 0x1f01 },
- { 0x30d4, 0x0228 },
- { 0x30d5, 0x1e01 },
- { 0x30d6, 0x0228 },
- { 0x30d7, 0xfa00 },
- { 0x30d8, 0x0000 },
- { 0x30d9, 0xf000 },
- { 0x30da, 0x0000 },
- { 0x30db, 0xf000 },
- { 0x30dc, 0x0000 },
- { 0x30dd, 0xf000 },
- { 0x30de, 0x0000 },
- { 0x30df, 0xf000 },
- { 0x30e0, 0x0000 },
- { 0x30e1, 0xf000 },
- { 0x30e2, 0x0000 },
- { 0x30e3, 0xf000 },
- { 0x30e4, 0x0000 },
- { 0x30e5, 0xf000 },
- { 0x30e6, 0x0000 },
- { 0x30e7, 0xf000 },
- { 0x30e8, 0x0000 },
- { 0x30e9, 0xf000 },
- { 0x30ea, 0x0000 },
- { 0x30eb, 0xf000 },
- { 0x30ec, 0x0000 },
- { 0x30ed, 0xf000 },
- { 0x30ee, 0x0000 },
- { 0x30ef, 0xf000 },
- { 0x30f0, 0x0228 },
- { 0x30f1, 0x1a01 },
- { 0x30f2, 0x0228 },
- { 0x30f3, 0x1e00 },
- { 0x30f4, 0x0228 },
- { 0x30f5, 0x1f00 },
- { 0x30f6, 0x6228 },
- { 0x30f7, 0xf800 },
- { 0x30f8, 0x0000 },
- { 0x30f9, 0xf000 },
- { 0x30fa, 0x0000 },
- { 0x30fb, 0xf000 },
- { 0x30fc, 0x0000 },
- { 0x30fd, 0xf000 },
- { 0x30fe, 0x0000 },
- { 0x30ff, 0xf000 },
- { 0x3100, 0x222b },
- { 0x3101, 0x3a03 },
- { 0x3102, 0x222b },
- { 0x3103, 0x5803 },
- { 0x3104, 0xe26f },
- { 0x3105, 0x030d },
- { 0x3106, 0x626f },
- { 0x3107, 0x2c01 },
- { 0x3108, 0xe26f },
- { 0x3109, 0x4342 },
- { 0x310a, 0xe26f },
- { 0x310b, 0x73c0 },
- { 0x310c, 0x026f },
- { 0x310d, 0x0c00 },
- { 0x310e, 0x022b },
- { 0x310f, 0x1f01 },
- { 0x3110, 0x022b },
- { 0x3111, 0x1e01 },
- { 0x3112, 0x022b },
- { 0x3113, 0xfa00 },
- { 0x3114, 0x0000 },
- { 0x3115, 0xf000 },
- { 0x3116, 0x0000 },
- { 0x3117, 0xf000 },
- { 0x3118, 0x0000 },
- { 0x3119, 0xf000 },
- { 0x311a, 0x0000 },
- { 0x311b, 0xf000 },
- { 0x311c, 0x0000 },
- { 0x311d, 0xf000 },
- { 0x311e, 0x0000 },
- { 0x311f, 0xf000 },
- { 0x3120, 0x022b },
- { 0x3121, 0x0a01 },
- { 0x3122, 0x022b },
- { 0x3123, 0x1e00 },
- { 0x3124, 0x022b },
- { 0x3125, 0x1f00 },
- { 0x3126, 0x622b },
- { 0x3127, 0xf800 },
- { 0x3128, 0x0000 },
- { 0x3129, 0xf000 },
- { 0x312a, 0x0000 },
- { 0x312b, 0xf000 },
- { 0x312c, 0x0000 },
- { 0x312d, 0xf000 },
- { 0x312e, 0x0000 },
- { 0x312f, 0xf000 },
- { 0x3130, 0x0000 },
- { 0x3131, 0xf000 },
- { 0x3132, 0x0000 },
- { 0x3133, 0xf000 },
- { 0x3134, 0x0000 },
- { 0x3135, 0xf000 },
- { 0x3136, 0x0000 },
- { 0x3137, 0xf000 },
- { 0x3138, 0x0000 },
- { 0x3139, 0xf000 },
- { 0x313a, 0x0000 },
- { 0x313b, 0xf000 },
- { 0x313c, 0x0000 },
- { 0x313d, 0xf000 },
- { 0x313e, 0x0000 },
- { 0x313f, 0xf000 },
- { 0x3140, 0x0000 },
- { 0x3141, 0xf000 },
- { 0x3142, 0x0000 },
- { 0x3143, 0xf000 },
- { 0x3144, 0x0000 },
- { 0x3145, 0xf000 },
- { 0x3146, 0x0000 },
- { 0x3147, 0xf000 },
- { 0x3148, 0x0000 },
- { 0x3149, 0xf000 },
- { 0x314a, 0x0000 },
- { 0x314b, 0xf000 },
- { 0x314c, 0x0000 },
- { 0x314d, 0xf000 },
- { 0x314e, 0x0000 },
- { 0x314f, 0xf000 },
- { 0x3150, 0x0000 },
- { 0x3151, 0xf000 },
- { 0x3152, 0x0000 },
- { 0x3153, 0xf000 },
- { 0x3154, 0x0000 },
- { 0x3155, 0xf000 },
- { 0x3156, 0x0000 },
- { 0x3157, 0xf000 },
- { 0x3158, 0x0000 },
- { 0x3159, 0xf000 },
- { 0x315a, 0x0000 },
- { 0x315b, 0xf000 },
- { 0x315c, 0x0000 },
- { 0x315d, 0xf000 },
- { 0x315e, 0x0000 },
- { 0x315f, 0xf000 },
- { 0x3160, 0x0000 },
- { 0x3161, 0xf000 },
- { 0x3162, 0x0000 },
- { 0x3163, 0xf000 },
- { 0x3164, 0x0000 },
- { 0x3165, 0xf000 },
- { 0x3166, 0x0000 },
- { 0x3167, 0xf000 },
- { 0x3168, 0x0000 },
- { 0x3169, 0xf000 },
- { 0x316a, 0x0000 },
- { 0x316b, 0xf000 },
- { 0x316c, 0x0000 },
- { 0x316d, 0xf000 },
- { 0x316e, 0x0000 },
- { 0x316f, 0xf000 },
- { 0x3170, 0x0000 },
- { 0x3171, 0xf000 },
- { 0x3172, 0x0000 },
- { 0x3173, 0xf000 },
- { 0x3174, 0x0000 },
- { 0x3175, 0xf000 },
- { 0x3176, 0x0000 },
- { 0x3177, 0xf000 },
- { 0x3178, 0x0000 },
- { 0x3179, 0xf000 },
- { 0x317a, 0x0000 },
- { 0x317b, 0xf000 },
- { 0x317c, 0x0000 },
- { 0x317d, 0xf000 },
- { 0x317e, 0x0000 },
- { 0x317f, 0xf000 },
- { 0x3180, 0x2001 },
- { 0x3181, 0xf101 },
- { 0x3182, 0x0000 },
- { 0x3183, 0xf000 },
- { 0x3184, 0x0000 },
- { 0x3185, 0xf000 },
- { 0x3186, 0x0000 },
- { 0x3187, 0xf000 },
- { 0x3188, 0x0000 },
- { 0x3189, 0xf000 },
- { 0x318a, 0x0000 },
- { 0x318b, 0xf000 },
- { 0x318c, 0x0000 },
- { 0x318d, 0xf000 },
- { 0x318e, 0x0000 },
- { 0x318f, 0xf000 },
- { 0x3190, 0x0000 },
- { 0x3191, 0xf000 },
- { 0x3192, 0x0000 },
- { 0x3193, 0xf000 },
- { 0x3194, 0x0000 },
- { 0x3195, 0xf000 },
- { 0x3196, 0x0000 },
- { 0x3197, 0xf000 },
- { 0x3198, 0x0000 },
- { 0x3199, 0xf000 },
- { 0x319a, 0x0000 },
- { 0x319b, 0xf000 },
- { 0x319c, 0x0000 },
- { 0x319d, 0xf000 },
- { 0x319e, 0x0000 },
- { 0x319f, 0xf000 },
- { 0x31a0, 0x0000 },
- { 0x31a1, 0xf000 },
- { 0x31a2, 0x0000 },
- { 0x31a3, 0xf000 },
- { 0x31a4, 0x0000 },
- { 0x31a5, 0xf000 },
- { 0x31a6, 0x0000 },
- { 0x31a7, 0xf000 },
- { 0x31a8, 0x0000 },
- { 0x31a9, 0xf000 },
- { 0x31aa, 0x0000 },
- { 0x31ab, 0xf000 },
- { 0x31ac, 0x0000 },
- { 0x31ad, 0xf000 },
- { 0x31ae, 0x0000 },
- { 0x31af, 0xf000 },
- { 0x31b0, 0x0000 },
- { 0x31b1, 0xf000 },
- { 0x31b2, 0x0000 },
- { 0x31b3, 0xf000 },
- { 0x31b4, 0x0000 },
- { 0x31b5, 0xf000 },
- { 0x31b6, 0x0000 },
- { 0x31b7, 0xf000 },
- { 0x31b8, 0x0000 },
- { 0x31b9, 0xf000 },
- { 0x31ba, 0x0000 },
- { 0x31bb, 0xf000 },
- { 0x31bc, 0x0000 },
- { 0x31bd, 0xf000 },
- { 0x31be, 0x0000 },
- { 0x31bf, 0xf000 },
- { 0x31c0, 0x0000 },
- { 0x31c1, 0xf000 },
- { 0x31c2, 0x0000 },
- { 0x31c3, 0xf000 },
- { 0x31c4, 0x0000 },
- { 0x31c5, 0xf000 },
- { 0x31c6, 0x0000 },
- { 0x31c7, 0xf000 },
- { 0x31c8, 0x0000 },
- { 0x31c9, 0xf000 },
- { 0x31ca, 0x0000 },
- { 0x31cb, 0xf000 },
- { 0x31cc, 0x0000 },
- { 0x31cd, 0xf000 },
- { 0x31ce, 0x0000 },
- { 0x31cf, 0xf000 },
- { 0x31d0, 0x0000 },
- { 0x31d1, 0xf000 },
- { 0x31d2, 0x0000 },
- { 0x31d3, 0xf000 },
- { 0x31d4, 0x0000 },
- { 0x31d5, 0xf000 },
- { 0x31d6, 0x0000 },
- { 0x31d7, 0xf000 },
- { 0x31d8, 0x0000 },
- { 0x31d9, 0xf000 },
- { 0x31da, 0x0000 },
- { 0x31db, 0xf000 },
- { 0x31dc, 0x0000 },
- { 0x31dd, 0xf000 },
- { 0x31de, 0x0000 },
- { 0x31df, 0xf000 },
- { 0x31e0, 0x0000 },
- { 0x31e1, 0xf000 },
- { 0x31e2, 0x0000 },
- { 0x31e3, 0xf000 },
- { 0x31e4, 0x0000 },
- { 0x31e5, 0xf000 },
- { 0x31e6, 0x0000 },
- { 0x31e7, 0xf000 },
- { 0x31e8, 0x0000 },
- { 0x31e9, 0xf000 },
- { 0x31ea, 0x0000 },
- { 0x31eb, 0xf000 },
- { 0x31ec, 0x0000 },
- { 0x31ed, 0xf000 },
- { 0x31ee, 0x0000 },
- { 0x31ef, 0xf000 },
- { 0x31f0, 0x0000 },
- { 0x31f1, 0xf000 },
- { 0x31f2, 0x0000 },
- { 0x31f3, 0xf000 },
- { 0x31f4, 0x0000 },
- { 0x31f5, 0xf000 },
- { 0x31f6, 0x0000 },
- { 0x31f7, 0xf000 },
- { 0x31f8, 0x0000 },
- { 0x31f9, 0xf000 },
- { 0x31fa, 0x0000 },
- { 0x31fb, 0xf000 },
- { 0x31fc, 0x0000 },
- { 0x31fd, 0xf000 },
- { 0x31fe, 0x0000 },
- { 0x31ff, 0xf000 },
- { 0x024d, 0xff50 },
- { 0x0252, 0xff50 },
- { 0x0259, 0x0112 },
- { 0x025e, 0x0112 },
- { 0x101, 0x0304 },
{ 0x80, 0x0000 },
};
MMC_CAP_CMD23,
};
-static struct dw_mci_drv_data exynos5250_drv_data = {
+static const struct dw_mci_drv_data exynos5250_drv_data = {
.caps = exynos5250_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
.setup_clock = dw_mci_exynos_setup_clock,
static const struct of_device_id dw_mci_exynos_match[] = {
{ .compatible = "samsung,exynos5250-dw-mshc",
- .data = (void *)&exynos5250_drv_data, },
+ .data = &exynos5250_drv_data, },
{},
};
-MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
+MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
int dw_mci_exynos_probe(struct platform_device *pdev)
{
- struct dw_mci_drv_data *drv_data;
+ const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
#include "dw_mmc.h"
int dw_mci_pltfm_register(struct platform_device *pdev,
- struct dw_mci_drv_data *drv_data)
+ const struct dw_mci_drv_data *drv_data)
{
struct dw_mci *host;
struct resource *regs;
if (!host->regs)
return -ENOMEM;
- if (host->drv_data->init) {
- ret = host->drv_data->init(host);
+ if (drv_data && drv_data->init) {
+ ret = drv_data->init(host);
if (ret)
return ret;
}
#define _DW_MMC_PLTFM_H_
extern int dw_mci_pltfm_register(struct platform_device *pdev,
- struct dw_mci_drv_data *drv_data);
+ const struct dw_mci_drv_data *drv_data);
extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev);
extern const struct dev_pm_ops dw_mci_pltfm_pmops;
{
struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr |= SDMMC_CMD_DAT_WR;
}
- if (slot->host->drv_data->prepare_command)
- slot->host->drv_data->prepare_command(slot->host, &cmdr);
+ if (drv_data && drv_data->prepare_command)
+ drv_data->prepare_command(slot->host, &cmdr);
return cmdr;
}
return 0;
}
-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 regs;
/* set default 1 bit mode */
slot->clock = ios->clock;
}
- if (slot->host->drv_data->set_ios)
- slot->host->drv_data->set_ios(slot->host, ios);
+ if (drv_data && drv_data->set_ios)
+ drv_data->set_ios(slot->host, ios);
switch (ios->power_mode) {
case MMC_POWER_UP:
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
+ struct dw_mci_drv_data *drv_data = host->drv_data;
int ctrl_id, ret;
u8 bus_width;
} else {
ctrl_id = to_platform_device(host->dev)->id;
}
- if (host->drv_data && host->drv_data->caps)
- mmc->caps |= host->drv_data->caps[ctrl_id];
+ if (drv_data && drv_data->caps)
+ mmc->caps |= drv_data->caps[ctrl_id];
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
else
bus_width = 1;
- if (host->drv_data->setup_bus) {
+ if (drv_data && drv_data->setup_bus) {
struct device_node *slot_np;
slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
- ret = host->drv_data->setup_bus(host, slot_np, bus_width);
+ ret = drv_data->setup_bus(host, slot_np, bus_width);
if (ret)
goto err_setup_bus;
}
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
- dev_info(&host->dev, "Using internal DMA controller.\n");
+ dev_info(host->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
struct dw_mci_board *pdata;
struct device *dev = host->dev;
struct device_node *np = dev->of_node;
+ struct dw_mci_drv_data *drv_data = host->drv_data;
int idx, ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
- if (host->drv_data->parse_dt) {
- ret = host->drv_data->parse_dt(host);
+ if (drv_data && drv_data->parse_dt) {
+ ret = drv_data->parse_dt(host);
if (ret)
return ERR_PTR(ret);
}
int dw_mci_probe(struct dw_mci *host)
{
+ struct dw_mci_drv_data *drv_data = host->drv_data;
int width, i, ret = 0;
u32 fifo_size;
int init_slots = 0;
else
host->bus_hz = clk_get_rate(host->ciu_clk);
- if (host->drv_data->setup_clock) {
- ret = host->drv_data->setup_clock(host);
+ if (drv_data && drv_data->setup_clock) {
+ ret = drv_data->setup_clock(host);
if (ret) {
dev_err(host->dev,
"implementation specific clock setup failed\n");
else
host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+ /*
+ * Enable interrupts for command done, data over, data empty, card det,
+ * receive ready and error such as transmit, receive timeout, crc error
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+ dev_info(host->dev, "DW MMC controller at irq %d, "
+ "%d bit host data width, "
+ "%u deep fifo\n",
+ host->irq, width, fifo_size);
+
/* We need at least one slot to succeed */
for (i = 0; i < host->num_slots; i++) {
ret = dw_mci_init_slot(host, i);
else
host->data_offset = DATA_240A_OFFSET;
- /*
- * Enable interrupts for command done, data over, data empty, card det,
- * receive ready and error such as transmit, receive timeout, crc error
- */
- mci_writel(host, RINTSTS, 0xFFFFFFFF);
- mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
- SDMMC_INT_TXDR | SDMMC_INT_RXDR |
- DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
- mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
-
- dev_info(host->dev, "DW MMC controller at irq %d, "
- "%d bit host data width, "
- "%u deep fifo\n",
- host->irq, width, fifo_size);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
+MODULE_ALIAS("platform:mxc-mmc");
static int omap_hsmmc_card_detect(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
/* NOTE: assumes card detect signal is active-low */
return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
static int omap_hsmmc_get_wp(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
/* NOTE: assumes write protect signal is active-high */
return gpio_get_value_cansleep(mmc->slots[0].gpio_wp);
static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
/* NOTE: assumes card detect signal is active-low */
return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
disable_irq(mmc->slots[0].card_detect_irq);
return 0;
static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
{
- struct omap_mmc_platform_data *mmc = dev->platform_data;
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+ struct omap_mmc_platform_data *mmc = host->pdata;
enable_irq(mmc->slots[0].card_detect_irq);
return 0;
clk_put(host->dbclk);
}
- mmc_free_host(host->mmc);
+ omap_hsmmc_gpio_free(host->pdata);
iounmap(host->base);
- omap_hsmmc_gpio_free(pdev->dev.platform_data);
+ mmc_free_host(host->mmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
struct sdhci_dove_priv *priv;
int ret;
- ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
- if (ret)
- goto sdhci_dove_register_fail;
-
priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv),
GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to allocate private data");
- ret = -ENOMEM;
- goto sdhci_dove_allocate_fail;
+ return -ENOMEM;
}
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+ if (ret)
+ goto sdhci_dove_register_fail;
+
host = platform_get_drvdata(pdev);
pltfm_host = sdhci_priv(host);
pltfm_host->priv = priv;
- priv->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
return 0;
-sdhci_dove_allocate_fail:
- sdhci_pltfm_unregister(pdev);
sdhci_dove_register_fail:
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+ }
return ret;
}
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_dove_priv *priv = pltfm_host->priv;
- if (priv->clk) {
- if (!IS_ERR(priv->clk)) {
- clk_disable_unprepare(priv->clk);
- clk_put(priv->clk);
- }
- devm_kfree(&pdev->dev, priv->clk);
+ sdhci_pltfm_unregister(pdev);
+
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
}
- return sdhci_pltfm_unregister(pdev);
+ return 0;
}
static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
}
#endif
+static void esdhc_of_platform_init(struct sdhci_host *host)
+{
+ u32 vvn;
+
+ vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+ vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+ if (vvn == VENDOR_V_22)
+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl,
.read_w = esdhc_readw,
.enable_dma = esdhc_of_enable_dma,
.get_max_clock = esdhc_of_get_max_clock,
.get_min_clock = esdhc_of_get_min_clock,
+ .platform_init = esdhc_of_platform_init,
#ifdef CONFIG_PM
.platform_suspend = esdhc_of_suspend,
.platform_resume = esdhc_of_resume,
return ERR_PTR(-ENODEV);
}
- if (pci_resource_len(pdev, bar) != 0x100) {
+ if (pci_resource_len(pdev, bar) < 0x100) {
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
}
goto err_remap;
}
+ /*
+ * Some platforms need to probe the controller to be able to
+ * determine which caps should be used.
+ */
+ if (host->ops && host->ops->platform_init)
+ host->ops->platform_init(host);
+
platform_set_drvdata(pdev, host);
return host;
if (ourhost->cur_clk != best_src) {
struct clk *clk = ourhost->clk_bus[best_src];
- clk_enable(clk);
- clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_prepare_enable(clk);
+ clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
/* turn clock off to card before changing clock source */
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
}
/* enable the local io clock and keep it running for the moment. */
- clk_enable(sc->clk_io);
+ clk_prepare_enable(sc->clk_io);
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
struct clk *clk;
}
#ifndef CONFIG_PM_RUNTIME
- clk_enable(sc->clk_bus[sc->cur_clk]);
+ clk_prepare_enable(sc->clk_bus[sc->cur_clk]);
#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdhci_s3c_setup_card_detect_gpio(sc);
#ifdef CONFIG_PM_RUNTIME
- clk_disable(sc->clk_io);
+ if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ clk_disable_unprepare(sc->clk_io);
#endif
return 0;
err_req_regs:
#ifndef CONFIG_PM_RUNTIME
- clk_disable(sc->clk_bus[sc->cur_clk]);
+ clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
if (sc->clk_bus[ptr]) {
}
err_no_busclks:
- clk_disable(sc->clk_io);
+ clk_disable_unprepare(sc->clk_io);
clk_put(sc->clk_io);
err_io_clk:
gpio_free(sc->ext_cd_gpio);
#ifdef CONFIG_PM_RUNTIME
- clk_enable(sc->clk_io);
+ if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ clk_prepare_enable(sc->clk_io);
#endif
sdhci_remove_host(host, 1);
pm_runtime_disable(&pdev->dev);
#ifndef CONFIG_PM_RUNTIME
- clk_disable(sc->clk_bus[sc->cur_clk]);
+ clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
if (sc->clk_bus[ptr]) {
clk_put(sc->clk_bus[ptr]);
}
}
- clk_disable(sc->clk_io);
+ clk_disable_unprepare(sc->clk_io);
clk_put(sc->clk_io);
if (pdev->dev.of_node) {
ret = sdhci_runtime_suspend_host(host);
- clk_disable(ourhost->clk_bus[ourhost->cur_clk]);
- clk_disable(busclk);
+ clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_disable_unprepare(busclk);
return ret;
}
struct clk *busclk = ourhost->clk_io;
int ret;
- clk_enable(busclk);
- clk_enable(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_prepare_enable(busclk);
+ clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
ret = sdhci_runtime_resume_host(host);
return ret;
}
*/
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
- /* eMMC uses cmd21 while sd and sdio use cmd19 */
- tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
- MMC_SEND_TUNING_BLOCK_HS200 :
- MMC_SEND_TUNING_BLOCK;
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc, tuning_opcode);
- spin_lock_irqsave(&host->lock, flags);
-
- /* Restore original mmc_request structure */
- host->mrq = mrq;
+ if (mmc->card) {
+ /* eMMC uses cmd21 but sd and sdio use cmd19 */
+ tuning_opcode =
+ mmc->card->type == MMC_TYPE_MMC ?
+ MMC_SEND_TUNING_BLOCK_HS200 :
+ MMC_SEND_TUNING_BLOCK;
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_execute_tuning(mmc, tuning_opcode);
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore original mmc_request structure */
+ host->mrq = mrq;
+ }
}
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
+ mmc->caps &= ~MMC_CAP_CMD23;
+
if (caps[0] & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
- if (IS_ERR(host->vqmmc)) {
- pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc));
- host->vqmmc = NULL;
+ if (IS_ERR_OR_NULL(host->vqmmc)) {
+ if (PTR_ERR(host->vqmmc) < 0) {
+ pr_info("%s: no vqmmc regulator found\n",
+ mmc_hostname(mmc));
+ host->vqmmc = NULL;
+ }
}
else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000))
regulator_enable(host->vqmmc);
ocr_avail = 0;
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
- if (IS_ERR(host->vmmc)) {
- pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
- host->vmmc = NULL;
+ if (IS_ERR_OR_NULL(host->vmmc)) {
+ if (PTR_ERR(host->vmmc) < 0) {
+ pr_info("%s: no vmmc regulator found\n",
+ mmc_hostname(mmc));
+ host->vmmc = NULL;
+ }
} else
regulator_enable(host->vmmc);
void (*hw_reset)(struct sdhci_host *host);
void (*platform_suspend)(struct sdhci_host *host);
void (*platform_resume)(struct sdhci_host *host);
+ void (*platform_init)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
platform_set_drvdata(pdev, NULL);
+ clk_disable(host->hclk);
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
- clk_disable(host->hclk);
pm_runtime_disable(&pdev->dev);
return 0;
if (*(szlength) != '+') {
devlength = simple_strtoul(szlength, &buffer, 0);
- devlength = handle_unit(devlength, buffer) - devstart;
+ devlength = handle_unit(devlength, buffer);
if (devlength < devstart)
goto err_out;
/*
* Field definitions are in the following datasheets:
* Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
- * New style (6 byte ID): Samsung K9GAG08U0F (p.44)
+ * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
* Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
*
- * Check for ID length, cell type, and Hynix/Samsung ID to decide what
- * to do.
+ * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+ * ID to decide what to do.
*/
- if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) {
+ if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ id_data[5] != 0x00) {
/* Calc pagesize */
mtd->writesize = 2048 << (extid & 0x03);
extid >>= 2;
nr_parts = plen / sizeof(part[0]);
*pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
- if (!pparts)
+ if (!*pparts)
return -ENOMEM;
names = of_get_property(dp, "partition-names", &plen);
* flexonenand_set_boundary - Writes the SLC boundary
* @param mtd - mtd info structure
*/
-int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
int boundary, int lock)
{
struct onenand_chip *this = mtd->priv;
struct net_device *bond_dev = bond->dev;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
+ unsigned int gso_max_size = GSO_MAX_SIZE;
+ u16 gso_max_segs = GSO_MAX_SEGS;
int i;
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
+
+ gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
+ gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
}
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
+ bond_dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_size(bond_dev, gso_max_size);
flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
bond_dev->priv_flags = flags | dst_release_flag;
goto out;
}
- sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
+ sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
/* check to see if we are clearing primary */
if (!strlen(ifname) || buf[0] == '\n') {
goto out;
}
- sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
+ sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
/* check to see if we are clearing active */
if (!strlen(ifname) || buf[0] == '\n') {
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
-/* FLEXCAN hardware feature flags */
+/*
+ * FLEXCAN hardware feature flags
+ *
+ * Below is some version info we got:
+ * SOC Version IP-Version Glitch- [TR]WRN_INT
+ * Filter? connected?
+ * MX25 FlexCAN2 03.00.00.00 no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes
+ * MX35 FlexCAN2 03.00.00.00 no no
+ * MX53 FlexCAN2 03.00.00.00 yes no
+ * MX6s FlexCAN3 10.00.12.00 yes yes
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
-#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */
+#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */
/* Structure of the message buffer */
struct flexcan_mb {
};
static struct flexcan_devtype_data fsl_imx28_devtype_data;
static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
- .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
+ .features = FLEXCAN_HAS_V10_FEATURES,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
#include "sja1000.h"
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards");
MODULE_LICENSE("GPL v2");
#define DRV_NAME "peak_pci"
#define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */
#define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */
#define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */
-#define PEAK_MPCI_DEVICE_ID 0x0008 /* The miniPCI slot cards */
+#define PEAK_CPCI_DEVICE_ID 0x0004 /* for nextgen cPCI slot cards */
+#define PEAK_MPCI_DEVICE_ID 0x0005 /* for nextgen miniPCI slot cards */
+#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */
+#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */
+#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */
#define PEAK_PCI_CHAN_MAX 4
{PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
#ifdef CONFIG_CAN_PEAK_PCIEC
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
#endif
mc->pdev->dev.can.state = new_state;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts->hwtstamp = timeval_to_ktime(tv);
}
netif_rx(skb);
struct sk_buff *skb;
struct can_frame *cf;
struct timeval tv;
+ struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(mc->netdev, &cf);
if (!skb)
/* convert timestamp into kernel time */
peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = timeval_to_ktime(tv);
/* push the skb */
netif_rx(skb);
struct can_frame *can_frame;
struct sk_buff *skb;
struct timeval tv;
+ struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(netdev, &can_frame);
if (!skb)
memcpy(can_frame->data, rx->data, can_frame->can_dlc);
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = timeval_to_ktime(tv);
netif_rx(skb);
netdev->stats.rx_packets++;
u8 err_mask = 0;
struct sk_buff *skb;
struct timeval tv;
+ struct skb_shared_hwtstamps *hwts;
/* nothing should be sent while in BUS_OFF state */
if (dev->can.state == CAN_STATE_BUS_OFF)
dev->can.state = new_state;
peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
- skb->tstamp = timeval_to_ktime(tv);
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = timeval_to_ktime(tv);
netif_rx(skb);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
dev->irq = irq[this_dev];
dev->mem_end = bad[this_dev];
}
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = do_ne_probe(dev);
if (err) {
free_netdev(dev);
SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
- "Direct manipulation of EEE advertisment is not supported\n");
+ "Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
}
#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
-
+#define LINK_UPDATE_MASK \
+ (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
+ LINK_STATUS_LINK_UP | \
+ LINK_STATUS_PHYSICAL_LINK_FLAG | \
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
+ LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
+ LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
+ LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
+ LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
DEFAULT_PHY_DEV_ADDR);
}
+static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ /* Set correct devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
+ phy->def_md_devad);
+ break;
+ }
+}
+
static void bnx2x_xgxs_deassert(struct link_params *params)
{
struct bnx2x *bp = params->bp;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
-
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- params->phy[INT_PHY].def_md_devad);
+ bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params,
+ PHY_INIT);
}
static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 val16 = 0, lane, i;
+ u16 lane, i, cl72_ctrl, an_adv = 0;
+ u16 ucode_ver;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
- {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
- {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
{MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
{MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+ cl72_ctrl &= 0xf8ff;
+ cl72_ctrl |= 0x3800;
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
- val16 |= (1<<5);
+ an_adv |= (1<<5);
/* Enable CL37 1G Parallel Detect */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
(vars->line_speed == SPEED_10000)) {
/* Check adding advertisement for 10G KR */
- val16 |= (1<<7);
+ an_adv |= (1<<7);
/* Enable 10G Parallel Detect */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
-
+ bnx2x_set_aer_mmd(params, phy);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
/* Advertised speeds */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
/* Advertised and set FEC (Forward Error Correction) */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
/* Set KR Autoneg Work-Around flag for Warpcore version older than D108
*/
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
- if (val16 < 0xd108) {
- DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
+ MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
+ if (ucode_ver < 0xd108) {
+ DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
+ ucode_ver);
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
}
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 i;
+ u16 val16, i, lane;
static struct bnx2x_reg_set reg_set[] = {
/* Disable Autoneg */
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
- {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
0x3f00},
{MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
{MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
- /* Disable CL36 PCS Tx */
- {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
- /* Double Wide Single Data Rate @ pll rate */
- {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
/* Leave cl72 training enable, needed for KR */
{MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- /* Leave CL72 enabled */
- bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- 0x3800);
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Global registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+ val16 &= ~(0x0011 << lane);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+ val16 |= (0x0303 << (lane << 1));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+ /* Restore AER */
+ bnx2x_set_aer_mmd(params, phy);
/* Set speed via PMA/PMD register */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u16 val16;
+ u16 val16, lane;
bnx2x_sfp_e3_set_transmitter(params, phy, 0);
bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
bnx2x_set_aer_mmd(params, phy);
MDIO_WC_REG_XGXSBLK1_LANECTRL2,
val16 & 0xff00);
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
+ val16 |= (0x11 << lane);
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ val16 |= (0x22 << lane);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
+ val16 &= ~(0x0303 << (lane << 1));
+ val16 |= (0x0101 << (lane << 1));
+ if (phy->flags & FLAGS_WC_DUAL_MODE) {
+ val16 &= ~(0x0c0c << (lane << 1));
+ val16 |= (0x0404 << (lane << 1));
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
+ /* Restore AER */
+ bnx2x_set_aer_mmd(params, phy);
+
}
static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
vars->mac_type = MAC_TYPE_NONE;
/* Update shared memory */
- vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
- LINK_STATUS_LINK_UP |
- LINK_STATUS_PHYSICAL_LINK_FLAG |
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
- LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
- LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
- LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
- LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
- LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
+ vars->link_status &= ~LINK_UPDATE_MASK;
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
u8 active_external_phy = INT_PHY;
vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_UPDATE_MASK;
for (phy_index = INT_PHY; phy_index < params->num_phys;
phy_index++) {
phy_vars[phy_index].flow_ctrl = 0;
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
u16 addr, u8 byte_cnt,
- u8 *o_buf)
+ u8 *o_buf, u8 is_init)
{
int rc = 0;
u8 i, j = 0, cnt = 0;
/* 4 byte aligned address */
addr32 = addr & (~0x3);
do {
- if (cnt == I2C_WA_PWR_ITER) {
+ if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
bnx2x_warpcore_power_module(params, phy, 0);
/* Note that 100us are not enough here */
- usleep_range(1000,1000);
+ usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, phy, 1);
}
rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf, 0);
break;
}
return rc;
{
u8 val;
+ int rc;
struct bnx2x *bp = params->bp;
u16 timeout;
/* Initialization time after hot-plug may take up to 300ms for
*/
for (timeout = 0; timeout < 60; timeout++) {
- if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
- == 0) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
+ params, 1,
+ 1, &val, 1);
+ else
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
+ &val);
+ if (rc == 0) {
DP(NETIF_MSG_LINK,
"SFP+ module initialization took %d ms\n",
timeout * 5);
}
usleep_range(5000, 10000);
}
- return -EINVAL;
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
+ return rc;
}
static void bnx2x_8727_power_module(struct bnx2x *bp,
else
rc = bnx2x_8483x_disable_eee(phy, params, vars);
if (rc) {
- DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
return rc;
}
} else {
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
};
static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
phy->media_type = ETH_PHY_BASE_T;
break;
case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
phy->media_type = ETH_PHY_XFP_FIBER;
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
break;
default:
- DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
}
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
- bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
if (IS_MF(bp))
low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
*/
static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
- if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+ if (!CHIP_IS_E1x(bp)) {
+ u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+ BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << BP_FUNC(bp));
+ }
}
}
/* disable FCOE L2 queue for E1x */
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
-
+ /* disable FCOE for 57840 device, until FW supports it */
+ switch (ent->driver_data) {
+ case BCM57840_O:
+ case BCM57840_4_10:
+ case BCM57840_2_20:
+ case BCM57840_MFO:
+ case BCM57840_MF:
+ bp->flags |= NO_FCOE_FLAG;
+ }
#endif
unsigned char rev; /* chip revision */
unsigned char offload;
+ unsigned char bypass;
+
unsigned int ofldq_wr_cred;
};
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
+static inline int is_bypass(struct adapter *adap)
+{
+ return adap->params.bypass;
+}
+
+static inline int is_bypass_device(int device)
+{
+ /* this should be set based upon device capabilities */
+ switch (device) {
+ case 0x440b:
+ case 0x440c:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
finicsum, cfcsum);
/*
- * If we're a pure NIC driver then disable all offloading facilities.
- * This will allow the firmware to optimize aspects of the hardware
- * configuration which will result in improved performance.
- */
- caps_cmd.ofldcaps = 0;
- caps_cmd.iscsicaps = 0;
- caps_cmd.rdmacaps = 0;
- caps_cmd.fcoecaps = 0;
-
- /*
* And now tell the firmware to use the configuration we just loaded.
*/
caps_cmd.op_to_write =
if (ret < 0)
goto bye;
-#ifndef CONFIG_CHELSIO_T4_OFFLOAD
- /*
- * If we're a pure NIC driver then disable all offloading facilities.
- * This will allow the firmware to optimize aspects of the hardware
- * configuration which will result in improved performance.
- */
- caps_cmd.ofldcaps = 0;
- caps_cmd.iscsicaps = 0;
- caps_cmd.rdmacaps = 0;
- caps_cmd.fcoecaps = 0;
-#endif
-
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
if (!vf_acls)
caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
u32 v, port_vec;
enum dev_state state;
u32 params[7], val[7];
+ struct fw_caps_config_cmd caps_cmd;
int reset = 1, j;
/*
goto bye;
}
+ if (is_bypass_device(adap->pdev->device))
+ adap->params.bypass = 1;
+
/*
* Grab some of our basic fundamental operating parameters.
*/
adap->tids.aftid_end = val[1];
}
-#ifdef CONFIG_CHELSIO_T4_OFFLOAD
/*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
- params[0] = FW_PARAM_PFVF(ETHOFLD_START);
- params[1] = FW_PARAM_PFVF(ETHOFLD_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
- params, val);
- if ((val[0] != val[1]) && (ret >= 0)) {
- adap->tids.uotid_base = val[0];
- adap->tids.nuotids = val[1] - val[0] + 1;
- }
-
adap->params.offload = 1;
}
if (caps_cmd.rdmacaps) {
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
-#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
/*
* These are finalized by FW initialization, load their values now.
unsigned int ftid_base;
unsigned int aftid_base;
unsigned int aftid_end;
+ /* Server filter region */
+ unsigned int sftid_base;
+ unsigned int nsftids;
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union aopen_entry *afree;
{
struct fw_bye_cmd c;
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, BYE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
{
struct fw_initialize_cmd c;
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, INITIALIZE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
{
struct fw_reset_cmd c;
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
c.val = htonl(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
HOSTPAGESIZEPF7(sge_hps));
t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
+ INGPADBOUNDARY_MASK |
EGRSTATUSPAGESIZE_MASK,
INGPADBOUNDARY(fl_align_log - 5) |
EGRSTATUSPAGESIZE(stat_len != 64));
{
struct fw_vi_enable_cmd c;
+ memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
- if (!netif_running(ndev))
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
+
return 0;
+ }
gfar_init_bds(ndev);
init_registers(ndev);
pr_err("no resource\n");
goto no_resource;
}
- if (request_resource(&ioport_resource, etsects->rsrc)) {
+ if (request_resource(&iomem_resource, etsects->rsrc)) {
pr_err("resource busy\n");
goto no_resource;
}
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
- tasklet_enable(&jme->linkch_task);
- tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
+ (unsigned long) jme);
rc = jme_request_irq(jme);
if (rc)
JME_NAPI_DISABLE(jme);
- tasklet_disable(&jme->linkch_task);
- tasklet_disable(&jme->txclean_task);
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
+ tasklet_kill(&jme->linkch_task);
+ tasklet_kill(&jme->txclean_task);
+ tasklet_kill(&jme->rxclean_task);
+ tasklet_kill(&jme->rxempty_task);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
tasklet_init(&jme->pcc_task,
jme_pcc_tasklet,
(unsigned long) jme);
- tasklet_init(&jme->linkch_task,
- jme_link_change_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->txclean_task,
- jme_tx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxclean_task,
- jme_rx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxempty_task,
- jme_rx_empty_tasklet,
- (unsigned long) jme);
- tasklet_disable_nosync(&jme->linkch_task);
- tasklet_disable_nosync(&jme->txclean_task);
- tasklet_disable_nosync(&jme->rxclean_task);
- tasklet_disable_nosync(&jme->rxempty_task);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
dev0 = hw->dev[0];
unregister_netdev(dev0);
- tasklet_disable(&hw->phy_task);
+ tasklet_kill(&hw->phy_task);
spin_lock_irq(&hw->hw_lock);
hw->intr_mask = 0;
if (err)
return err;
- memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
+ memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
return 0;
}
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
- mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
if (bounce)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
- if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
+ if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
ctx = &priv->mfunc.master.slave_state[slave];
spin_lock_irqsave(&ctx->lock, flags);
- mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
- __func__, slave, cur_state, event);
-
switch (cur_state) {
case SLAVE_PORT_DOWN:
if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
- mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
- " :%d gen_event: %d\n",
- __func__, slave, cur_state, event, *gen_event);
out:
spin_unlock_irqrestore(&ctx->lock, flags);
return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
}
+static void mlx4_unmap_uar(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
+ if (priv->eq_table.uar_map[i]) {
+ iounmap(priv->eq_table.uar_map[i]);
+ priv->eq_table.uar_map[i] = NULL;
+ }
+}
+
static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
u8 intr, struct mlx4_eq *eq)
{
mlx4_free_irqs(dev);
err_out_bitmap:
+ mlx4_unmap_uar(dev);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
err_out_free:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
- for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
- if (priv->eq_table.uar_map[i])
- iounmap(priv->eq_table.uar_map[i]);
-
+ mlx4_unmap_uar(dev);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
kfree(priv->eq_table.uar_map);
unmap_bf_area(dev);
err_close:
- mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_slave_exit(dev);
+ else
+ mlx4_CLOSE_HCA(dev, 0);
err_free_icm:
if (!mlx4_is_slave(dev))
new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
*(u8 *)(inbox->buf + 35) = new_index;
-
- mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
- "new pkey index = %d\n", port, orig_index, new_index);
}
static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
qp_ctx->alt_path.mgid_index = slave & 0x7F;
}
-
- mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
- slave, qp_ctx->pri_path.mgid_index);
}
static int mpt_mask(struct mlx4_dev *dev)
/* Delay for receive task to stop scheduling itself. */
msleep(2000 / HZ);
- tasklet_disable(&hw_priv->rx_tasklet);
- tasklet_disable(&hw_priv->tx_tasklet);
+ tasklet_kill(&hw_priv->rx_tasklet);
+ tasklet_kill(&hw_priv->tx_tasklet);
free_irq(dev->irq, hw_priv->dev);
transmit_cleanup(hw_priv, 0);
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- tasklet_enable(&hw_priv->rx_tasklet);
- tasklet_enable(&hw_priv->tx_tasklet);
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
hw->promiscuous = 0;
hw->all_multi = 0;
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
- /* tasklet is enabled. */
- tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
- (unsigned long) hw_priv);
- tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
- (unsigned long) hw_priv);
-
- /* tasklet_enable will decrement the atomic counter. */
- tasklet_disable(&hw_priv->rx_tasklet);
- tasklet_disable(&hw_priv->tx_tasklet);
-
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);
pldat->dma_buff_base_p);
free_irq(ndev->irq, ndev);
iounmap(pldat->net_base);
+ mdiobus_unregister(pldat->mii_bus);
mdiobus_free(pldat->mii_bus);
clk_disable(pldat->clk);
clk_put(pldat->clk);
}
/**
- * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
- * @reg: Pointer of register
- * @busy: Busy bit
- */
-static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
-{
- u32 tmp;
- int ret = -1;
- /* wait busy */
- tmp = 20;
- while ((ioread32(reg) & bit) && --tmp)
- udelay(5);
- if (!tmp)
- pr_err("Error: busy bit is not cleared\n");
- else
- ret = 0;
- return ret;
-}
-
-/**
* pch_gbe_mac_mar_set - Set MAC address register
* @hw: Pointer to the HW structure
* @addr: Pointer to the MAC address
return;
}
-static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
{
- /* Read the MAC addresses. and store to the private data */
- pch_gbe_mac_read_mac_addr(hw);
- iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
- pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
- /* Setup the MAC addresses */
- pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
- return;
+ u32 rctl;
+ /* Disables Receive MAC */
+ rctl = ioread32(&hw->reg->MAC_RX_EN);
+ iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
+}
+
+static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
+{
+ u32 rctl;
+ /* Enables Receive MAC */
+ rctl = ioread32(&hw->reg->MAC_RX_EN);
+ iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
}
/**
static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
{
struct pch_gbe_hw *hw = &adapter->hw;
- u32 rdba, rdlen, rctl, rxdma;
+ u32 rdba, rdlen, rxdma;
pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
(unsigned long long)adapter->rx_ring->dma,
pch_gbe_mac_force_mac_fc(hw);
- /* Disables Receive MAC */
- rctl = ioread32(&hw->reg->MAC_RX_EN);
- iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
+ pch_gbe_disable_mac_rx(hw);
/* Disables Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
-static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
{
- struct pch_gbe_hw *hw = &adapter->hw;
u32 rxdma;
- u16 value;
- int ret;
/* Disable Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma &= ~PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
- /* Wait Rx DMA BUS is IDLE */
- ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
- if (ret) {
- /* Disable Bus master */
- pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
- value &= ~PCI_COMMAND_MASTER;
- pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
- /* Stop Receive */
- pch_gbe_mac_reset_rx(hw);
- /* Enable Bus master */
- value |= PCI_COMMAND_MASTER;
- pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
- } else {
- /* Stop Receive */
- pch_gbe_mac_reset_rx(hw);
- }
- /* reprogram multicast address register after reset */
- pch_gbe_set_multi(adapter->netdev);
}
-static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
{
u32 rxdma;
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma |= PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
- /* Enables Receive */
- iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
- return;
}
/**
int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
&hw->reg->INT_EN);
- pch_gbe_stop_receive(adapter);
+ pch_gbe_disable_dma_rx(&adapter->hw);
int_st |= ioread32(&hw->reg->INT_ST);
int_st = int_st & ioread32(&hw->reg->INT_EN);
}
struct net_device *netdev = adapter->netdev;
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
- int err;
+ int err = -EINVAL;
/* Ensure we have a valid MAC */
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
pr_err("Error: Invalid MAC address\n");
- return -EINVAL;
+ goto out;
}
/* hardware has been reset, we need to reload some things */
err = pch_gbe_request_irq(adapter);
if (err) {
- pr_err("Error: can't bring device up\n");
- return err;
+ pr_err("Error: can't bring device up - irq request failed\n");
+ goto out;
}
err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
if (err) {
- pr_err("Error: can't bring device up\n");
- return err;
+ pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
+ goto freeirq;
}
pch_gbe_alloc_tx_buffers(adapter, tx_ring);
pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
adapter->tx_queue_len = netdev->tx_queue_len;
- pch_gbe_start_receive(&adapter->hw);
+ pch_gbe_enable_dma_rx(&adapter->hw);
+ pch_gbe_enable_mac_rx(&adapter->hw);
mod_timer(&adapter->watchdog_timer, jiffies);
netif_start_queue(adapter->netdev);
return 0;
+
+freeirq:
+ pch_gbe_free_irq(adapter);
+out:
+ return err;
}
/**
int work_done = 0;
bool poll_end_flag = false;
bool cleaned = false;
- u32 int_en;
pr_debug("budget : %d\n", budget);
if (poll_end_flag) {
napi_complete(napi);
- if (adapter->rx_stop_flag) {
- adapter->rx_stop_flag = false;
- pch_gbe_start_receive(&adapter->hw);
- }
pch_gbe_irq_enable(adapter);
- } else
- if (adapter->rx_stop_flag) {
- adapter->rx_stop_flag = false;
- pch_gbe_start_receive(&adapter->hw);
- int_en = ioread32(&adapter->hw.reg->INT_EN);
- iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
- &adapter->hw.reg->INT_EN);
- }
+ }
+
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_enable_dma_rx(&adapter->hw);
+ }
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget);
qdev->req_q_size =
(u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+ qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+ /* The barrier is required to ensure request and response queue
+ * addr writes to the registers.
+ */
+ wmb();
+
qdev->req_q_virt_addr =
pci_alloc_consistent(qdev->pdev,
(size_t) qdev->req_q_size,
return -ENOMEM;
}
- qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
-
qdev->rsp_q_virt_addr =
pci_alloc_consistent(qdev->pdev,
(size_t) qdev->rsp_q_size,
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
cpw8(Config5, cpr8(Config5) & PMEStatus);
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
void __iomem *ioaddr = tp->mmio_addr;
switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29:
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32:
mc_filter[1] = swab32(data);
}
+ if (tp->mac_version == RTL_GIGA_MAC_VER_35)
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+
RTL_W32(MAR0 + 4, mc_filter[1]);
RTL_W32(MAR0 + 0, mc_filter[0]);
netif_start_queue(net_dev);
/* Workaround for EDB */
- sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+ sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
static int __devinit smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- unsigned int byte_test;
+ unsigned int byte_test, mask;
unsigned int to = 100;
SMSC_TRACE(pdata, probe, "Driver Parameters:");
/*
* poll the READY bit in PMT_CTRL. Any other access to the device is
* forbidden while this bit isn't set. Try for 100ms
+ *
+ * Note that this test is done before the WORD_SWAP register is
+ * programmed. So in some configurations the READY bit is at 16 before
+ * WORD_SWAP is written to. This issue is worked around by waiting
+ * until either bit 0 or bit 16 gets set in PMT_CTRL.
+ *
+ * SMSC has confirmed that checking bit 16 (marked as reserved in
+ * the datasheet) is fine since these bits "will either never be set
+ * or can only go high after READY does (so also indicate the device
+ * is ready)".
*/
- while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+
+ mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
udelay(1000);
+
if (to == 0) {
pr_err("Device not READY in 100ms aborting\n");
return -ENODEV;
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
- depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3))
+ depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
ingress_irq = rc;
tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
- 0, NULL, NULL);
+ 0, "tile_net", NULL);
if (rc != 0) {
netdev_err(dev, "request_irq failed: %d\n", rc);
destroy_irq(ingress_irq);
{
struct skb_shared_info *sh = skb_shinfo(skb);
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
+ unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
- long f_size = skb->hdr_len; /* size of the current fragment */
- long f_used = sh_len; /* bytes used from the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int num_edescs = 0;
int segment;
/* Advance as needed. */
while (f_used >= f_size) {
f_id++;
- f_size = sh->frags[f_id].size;
+ f_size = skb_frag_size(&sh->frags[f_id]);
f_used = 0;
}
struct iphdr *ih;
struct tcphdr *th;
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
+ unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
unsigned int isum_seed, tsum_seed, id, seq;
long f_id = -1; /* id of the current fragment */
- long f_size = skb->hdr_len; /* size of the current fragment */
- long f_used = sh_len; /* bytes used from the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int segment;
isum_seed = ((0xFFFF - ih->check) +
(0xFFFF - ih->tot_len) +
(0xFFFF - ih->id));
- tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len));
+ tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
id = ntohs(ih->id);
seq = ntohl(th->seq);
/* Advance as needed. */
while (f_used >= f_size) {
f_id++;
- f_size = sh->frags[f_id].size;
+ f_size = skb_frag_size(&sh->frags[f_id]);
f_used = 0;
}
struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
+ unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
gxio_mpipe_edesc_t edesc_head = { { 0 } };
gxio_mpipe_edesc_t edesc_body = { { 0 } };
long f_id = -1; /* id of the current fragment */
- long f_size = skb->hdr_len; /* size of the current fragment */
- long f_used = sh_len; /* bytes used from the current fragment */
- void *f_data = skb->data;
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
+ void *f_data = skb->data + sh_len;
long n; /* size of the current piece of payload */
unsigned long tx_packets = 0, tx_bytes = 0;
unsigned int csum_start;
/* Egress the payload. */
while (p_used < p_len) {
+ void *va;
/* Advance as needed. */
while (f_used >= f_size) {
f_id++;
- f_size = sh->frags[f_id].size;
- f_used = 0;
+ f_size = skb_frag_size(&sh->frags[f_id]);
f_data = tile_net_frag_buf(&sh->frags[f_id]);
+ f_used = 0;
}
+ va = f_data + f_used;
+
/* Use bytes from the current fragment. */
n = p_len - p_used;
if (n > f_size - f_used)
p_used += n;
/* Egress a piece of the payload. */
- edesc_body.va = va_to_tile_io_addr(f_data) + f_used;
+ edesc_body.va = va_to_tile_io_addr(va);
edesc_body.xfer_size = n;
edesc_body.bound = !(p_used < p_len);
gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
return IRQ_HANDLED;
}
+static void axienet_dma_err_handler(unsigned long data);
+
/**
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
phy_start(lp->phy_dev);
}
+ /* Enable tasklets for Axi DMA error handling */
+ tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
+ (unsigned long) lp);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
if (ret)
ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
if (ret)
goto err_rx_irq;
- /* Enable tasklets for Axi DMA error handling */
- tasklet_enable(&lp->dma_err_tasklet);
+
return 0;
err_rx_irq:
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
+ tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- tasklet_disable(&lp->dma_err_tasklet);
+ tasklet_kill(&lp->dma_err_tasklet);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
goto err_iounmap_2;
}
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
- tasklet_disable(&lp->dma_err_tasklet);
-
return 0;
err_iounmap_2:
{
int i;
- if (!ports_open)
- if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
- POOL_ALLOC_SIZE, 32, 0)))
+ if (!ports_open) {
+ dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ POOL_ALLOC_SIZE, 32, 0);
+ if (!dma_pool)
return -ENOMEM;
+ }
if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys)))
break;
case SIRDEV_STATE_DONGLE_SPEED:
- if (dev->dongle_drv->reset) {
+ if (dev->dongle_drv->set_speed) {
ret = dev->dongle_drv->set_speed(dev, fsm->param);
if (ret < 0) {
fsm->result = ret;
comment "MII PHY device drivers"
+config AT803X_PHY
+ tristate "Drivers for Atheros AT803X PHYs"
+ ---help---
+ Currently supports the AT8030 and AT8035 model
+
config AMD_PHY
tristate "Drivers for the AMD PHYs"
---help---
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
+obj-$(CONFIG_AT803X_PHY) += at803x.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
--- /dev/null
+/*
+ * drivers/net/phy/at803x.c
+ *
+ * Driver for Atheros 803x PHY
+ *
+ * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#define AT803X_INTR_ENABLE 0x12
+#define AT803X_INTR_STATUS 0x13
+#define AT803X_WOL_ENABLE 0x01
+#define AT803X_DEVICE_ADDR 0x03
+#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
+#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
+#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+#define AT803X_MMD_ACCESS_CONTROL 0x0D
+#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
+#define AT803X_FUNC_DATA 0x4003
+
+MODULE_DESCRIPTION("Atheros 803x PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+static void at803x_set_wol_mac_addr(struct phy_device *phydev)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i, offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return;
+
+ mac = (const u8 *) ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return;
+
+ for (i = 0; i < 3; i++) {
+ phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
+ AT803X_DEVICE_ADDR);
+ phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
+ offsets[i]);
+ phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
+ AT803X_FUNC_DATA);
+ phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+ }
+}
+
+static int at803x_config_init(struct phy_device *phydev)
+{
+ int val;
+ u32 features;
+ int status;
+
+ features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
+ SUPPORTED_FIBRE | SUPPORTED_BNC;
+
+ val = phy_read(phydev, MII_BMSR);
+ if (val < 0)
+ return val;
+
+ if (val & BMSR_ANEGCAPABLE)
+ features |= SUPPORTED_Autoneg;
+ if (val & BMSR_100FULL)
+ features |= SUPPORTED_100baseT_Full;
+ if (val & BMSR_100HALF)
+ features |= SUPPORTED_100baseT_Half;
+ if (val & BMSR_10FULL)
+ features |= SUPPORTED_10baseT_Full;
+ if (val & BMSR_10HALF)
+ features |= SUPPORTED_10baseT_Half;
+
+ if (val & BMSR_ESTATEN) {
+ val = phy_read(phydev, MII_ESTATUS);
+ if (val < 0)
+ return val;
+
+ if (val & ESTATUS_1000_TFULL)
+ features |= SUPPORTED_1000baseT_Full;
+ if (val & ESTATUS_1000_THALF)
+ features |= SUPPORTED_1000baseT_Half;
+ }
+
+ phydev->supported = features;
+ phydev->advertising = features;
+
+ /* enable WOL */
+ at803x_set_wol_mac_addr(phydev);
+ status = phy_write(phydev, AT803X_INTR_ENABLE, AT803X_WOL_ENABLE);
+ status = phy_read(phydev, AT803X_INTR_STATUS);
+
+ return 0;
+}
+
+/* ATHEROS 8035 */
+static struct phy_driver at8035_driver = {
+ .phy_id = 0x004dd072,
+ .name = "Atheros 8035 ethernet",
+ .phy_id_mask = 0xffffffef,
+ .config_init = at803x_config_init,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ATHEROS 8030 */
+static struct phy_driver at8030_driver = {
+ .phy_id = 0x004dd076,
+ .name = "Atheros 8030 ethernet",
+ .phy_id_mask = 0xffffffef,
+ .config_init = at803x_config_init,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init atheros_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&at8035_driver);
+ if (ret)
+ goto err1;
+
+ ret = phy_driver_register(&at8030_driver);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ phy_driver_unregister(&at8035_driver);
+err1:
+ return ret;
+}
+
+static void __exit atheros_exit(void)
+{
+ phy_driver_unregister(&at8035_driver);
+ phy_driver_unregister(&at8030_driver);
+}
+
+module_init(atheros_init);
+module_exit(atheros_exit);
+
+static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { 0x004dd076, 0xffffffef },
+ { 0x004dd072, 0xffffffef },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, atheros_tbl);
{
struct mdio_gpio_platform_data *pdata;
struct mii_bus *new_bus;
- int ret;
+ int ret, bus_id;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
pdata = mdio_gpio_of_get_data(pdev);
- else
+ bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+ } else {
pdata = pdev->dev.platform_data;
+ bus_id = pdev->id;
+ }
if (!pdata)
return -ENODEV;
- new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id);
+ new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id);
if (!new_bus)
return -ENODEV;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
- dev->hw_features = NETIF_F_HW_VLAN_TX |
+ dev->hw_features = TEAM_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
dev->features |= dev->hw_features;
}
if (last) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
- ret = team_dev_queue_xmit(team, last,
- skb2);
+ ret = !team_dev_queue_xmit(team, last,
+ skb2);
if (!sum_ret)
sum_ret = ret;
}
}
}
if (last) {
- ret = team_dev_queue_xmit(team, last, skb);
+ ret = !team_dev_queue_xmit(team, last, skb);
if (!sum_ret)
sum_ret = ret;
}
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
/*
/* no jumbogram (16K) support for now */
- dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
return 0;
.driver_info = 0,
},
+/* Novatel USB551L and MC551 - handled by qmi_wwan */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = NOVATEL_VENDOR_ID,
+ .idProduct = 0xB001,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = 0,
+},
+
+/* Novatel E362 - handled by qmi_wwan */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = NOVATEL_VENDOR_ID,
+ .idProduct = 0x9010,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = 0,
+},
+
/*
* WHITELIST!!!
*
* because of bugs/quirks in a given product (like Zaurus, above).
*/
{
- /* Novatel USB551L */
- /* This match must come *before* the generic CDC-ETHER match so that
- * we get FLAG_WWAN set on the device, since it's descriptors are
- * generic CDC-ETHER.
- */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0xB001,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
- .driver_info = (unsigned long)&wwan_info,
-}, {
/* ZTE (Vodafone) K3805-Z */
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
(ctx->ether_desc == NULL) || (ctx->control != intf))
goto error;
- /* claim interfaces, if any */
- temp = usb_driver_claim_interface(driver, ctx->data, dev);
- if (temp)
- goto error;
+ /* claim data interface, if different from control */
+ if (ctx->data != ctx->control) {
+ temp = usb_driver_claim_interface(driver, ctx->data, dev);
+ if (temp)
+ goto error;
+ }
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
tasklet_kill(&ctx->bh);
+ /* handle devices with combined control and data interface */
+ if (ctx->control == ctx->data)
+ ctx->data = NULL;
+
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
.driver_info = (unsigned long) &wwan_info,
},
+ /* Huawei NCM devices disguised as vendor specific */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
#define USB_PRODUCT_IPAD 0x129a
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define USB_PRODUCT_IPHONE_4S 0x12a0
+#define USB_PRODUCT_IPHONE_5 0x12a8
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Novatel USB551L and MC551 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0xb001,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* Novatel E362 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9010,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0021, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x0025, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0031, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x0042, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x0049, 5)},
+ {QMI_FIXED_INTF(0x19d2, 0x0052, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x0058, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */
{QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x0113, 5)},
+ {QMI_FIXED_INTF(0x19d2, 0x0118, 5)},
+ {QMI_FIXED_INTF(0x19d2, 0x0121, 5)},
+ {QMI_FIXED_INTF(0x19d2, 0x0123, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x0124, 5)},
+ {QMI_FIXED_INTF(0x19d2, 0x0125, 6)},
+ {QMI_FIXED_INTF(0x19d2, 0x0126, 5)},
+ {QMI_FIXED_INTF(0x19d2, 0x0130, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0133, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0141, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */
+ {QMI_FIXED_INTF(0x19d2, 0x0158, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */
+ {QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
+ {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
+ {QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
+ {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x1021, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1245, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1247, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1252, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1254, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
+ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_READ_;
+ addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
+ addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
} else {
u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
skb_push(skb, 4);
+ cpu_to_le32s(&csum_preamble);
memcpy(skb->data, &csum_preamble, 4);
}
}
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_err(dev->net, "kevent %d may have been dropped\n", work);
- else
+ if (!schedule_work (&dev->kevent)) {
+ if (net_ratelimit())
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+ } else {
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
usb_anchor_urb(urb, &dev->deferred);
/* no use to process more packets */
netif_stop_queue(net);
+ usb_put_urb(urb);
spin_unlock_irqrestore(&dev->txq.lock, flags);
netdev_dbg(dev->net, "Delaying transmission for resumption\n");
goto deferred;
cancel_work_sync(&dev->kevent);
+ usb_scuttle_anchored_urbs(&dev->deferred);
+
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u32 buf_size;
- tbi = tq->buf_info + tq->tx_ring.next2fill;
- tbi->map_type = VMXNET3_MAP_PAGE;
- tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
+ buf_offset = 0;
+ len = skb_frag_size(frag);
+ while (len) {
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ if (len < VMXNET3_MAX_TX_BUF_SIZE) {
+ buf_size = len;
+ dw2 |= len;
+ } else {
+ buf_size = VMXNET3_MAX_TX_BUF_SIZE;
+ /* spec says that for TxDesc.len, 0 == 2^14 */
+ }
+ tbi->map_type = VMXNET3_MAP_PAGE;
+ tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
+ buf_offset, buf_size,
+ DMA_TO_DEVICE);
- tbi->len = skb_frag_size(frag);
+ tbi->len = buf_size;
- gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
- BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
- gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
- gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
- gdesc->dword[3] = 0;
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+ gdesc->dword[2] = cpu_to_le32(dw2);
+ gdesc->dword[3] = 0;
- dev_dbg(&adapter->netdev->dev,
- "txd[%u]: 0x%llu %u %u\n",
- tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
- le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
- vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
- dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ dev_dbg(&adapter->netdev->dev,
+ "txd[%u]: 0x%llu %u %u\n",
+ tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
+ le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+
+ len -= buf_size;
+ buf_offset += buf_size;
+ }
}
ctx->eop_txd = gdesc;
}
}
+static int txd_estimate(const struct sk_buff *skb)
+{
+ int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+ count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
+ }
+ return count;
+}
/*
* Transmits a pkt thru a given tq
union Vmxnet3_GenericDesc tempTxDesc;
#endif
- /* conservatively estimate # of descriptors to use */
- count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
- skb_shinfo(skb)->nr_frags + 1;
+ count = txd_estimate(skb);
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
/*
- * VXLAN: Virtual eXtensiable Local Area Network
+ * VXLAN: Virtual eXtensible Local Area Network
*
* Copyright (c) 2012 Vyatta Inc.
*
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* VLAN + IP header + UDP + VXLAN */
-#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state == NUD_PERMANENT)
+ if (f->state & NUD_PERMANENT)
continue;
timeout = f->used + vxlan->age_interval * HZ;
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+ /* update header length based on lower device */
+ dev->hard_header_len = lowerdev->hard_header_len +
+ VXLAN_HEADROOM;
}
if (data[IFLA_VXLAN_TOS])
{
int i;
- if (!ports_open)
- if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
- POOL_ALLOC_SIZE, 32, 0)))
+ if (!ports_open) {
+ dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ POOL_ALLOC_SIZE, 32, 0);
+ if (!dma_pool)
return -ENOMEM;
+ }
if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys)))
platform_set_drvdata(pdev, port);
- netdev_info(dev, "HSS-%i\n", port->id);
+ netdev_info(dev, "initialized\n");
return 0;
err_free_netdev:
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
- {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
- {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
- {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
- {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
- {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
- {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
- {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
- {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
- {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
- {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
- {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
- {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
- {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
- {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
- {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
- {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
- {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
- {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
- {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
- {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
- {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
- {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
- {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
- {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
- {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
- {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
- {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
- {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
- {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
- {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
- {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
- {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
- {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
- {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
- {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
- {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
+ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0cf3, 0x7015),
switch (type) {
case ATH9K_RESET_POWER_ON:
ret = ath9k_hw_set_reset_power_on(ah);
- if (!ret)
+ if (ret)
ah->reset_power_on = true;
break;
case ATH9K_RESET_WARM:
}
bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+ bf->bf_next = NULL;
list_del(&bf->list);
spin_unlock_bh(&sc->tx.txbuflock);
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
- bool rc_update = true;
+ bool rc_update = true, isba;
struct ieee80211_tx_rate rates[4];
struct ath_frame_info *fi;
int nframes;
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(an, tidno);
seq_first = tid->seq_start;
+ isba = ts->ts_flags & ATH9K_TX_BA;
/*
* The hardware occasionally sends a tx status for the wrong TID.
* In this case, the BA status cannot be considered valid and all
* subframes need to be retransmitted
+ *
+ * Only BlockAcks have a TID and therefore normal Acks cannot be
+ * checked
*/
- if (tidno != ts->tid)
+ if (isba && tidno != ts->tid)
txok = false;
isaggr = bf_isaggr(bf);
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
+ bf->bf_next = NULL;
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
+ if (!wldev->fw.ucode.data)
+ return; /* NULL if firmware never loaded */
if (wl->current_dev == wldev && wl->hw_registred) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
+ if (!wldev->fw.ucode.data)
+ return; /* NULL if firmware never loaded */
if (wl->current_dev == wldev && wl->hw_registred) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
{
struct b43legacy_pio_txpacket *packet, *tmp_packet;
- tasklet_disable(&queue->txtask);
+ tasklet_kill(&queue->txtask);
list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
free_txpacket(packet, 0);
}
ret = brcmf_bus_start(dev);
- if (ret == -ENOLINK) {
+ if (ret) {
brcmf_dbg(ERROR, "dongle is not responding\n");
brcmf_detach(dev);
goto fail;
if (!request || !request->n_ssids || !request->n_match_sets) {
WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
- request->n_ssids);
+ request ? request->n_ssids : 0);
return -EINVAL;
}
u8 *iovar_ie_buf;
u8 *curr_ie_buf;
u8 *mgmt_ie_buf = NULL;
- u32 mgmt_ie_buf_len = 0;
+ int mgmt_ie_buf_len;
u32 *mgmt_ie_len = 0;
u32 del_add_ie_buf_len = 0;
u32 total_ie_buf_len = 0;
struct parsed_vndr_ie_info *vndrie_info;
s32 i;
u8 *ptr;
- u32 remained_buf_len;
+ int remained_buf_len;
WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
-#ifndef CONFIG_BRCMFISCAN
+#ifndef CONFIG_BRCMISCAN
/* scheduled scan settings */
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
struct wiphy *wiphy = cfg_to_wiphy(cfg);
- struct brcmf_channel_info_le channel_le;
- struct ieee80211_channel *notify_channel;
+ struct ieee80211_channel *notify_channel = NULL;
struct ieee80211_supported_band *band;
+ struct brcmf_bss_info_le *bi;
u32 freq;
s32 err = 0;
u32 target_channel;
+ u8 *buf;
WL_TRACE("Enter\n");
memcpy(profile->bssid, e->addr, ETH_ALEN);
brcmf_update_bss_info(cfg);
- brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
- sizeof(channel_le));
+ buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (buf == NULL) {
+ err = -ENOMEM;
+ goto done;
+ }
- target_channel = le32_to_cpu(channel_le.target_channel);
- WL_CONN("Roamed to channel %d\n", target_channel);
+ /* data sent to dongle has to be little endian */
+ *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+
+ if (err)
+ goto done;
+
+ bi = (struct brcmf_bss_info_le *)(buf + 4);
+ target_channel = bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
if (target_channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
freq = ieee80211_channel_to_frequency(target_channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
+done:
+ kfree(buf);
cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
schedule_work(&cfg->event_work);
}
-static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
-{
- s32 infra = 0;
- s32 err = 0;
-
- switch (iftype) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_WDS:
- WL_ERR("type (%d) : currently we do not support this mode\n",
- iftype);
- err = -EINVAL;
- return err;
- case NL80211_IFTYPE_ADHOC:
- infra = 0;
- break;
- case NL80211_IFTYPE_STATION:
- infra = 1;
- break;
- case NL80211_IFTYPE_AP:
- infra = 1;
- break;
- default:
- err = -EINVAL;
- WL_ERR("invalid type (%d)\n", iftype);
- return err;
- }
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
- if (err) {
- WL_ERR("WLC_SET_INFRA error (%d)\n", err);
- return err;
- }
-
- return 0;
-}
-
static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
{
/* Room for "event_msgs" + '\0' + bitvec */
WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
- err = brcmf_dongle_mode(ndev, wdev->iftype);
+ err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
+ NULL, NULL);
if (err && err != -EINPROGRESS)
goto default_conf_out;
err = brcmf_dongle_probecap(cfg);
} else
len = src->len;
- dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
+ dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
if (!dst)
continue;
* See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl6000_channel_switch_cmd cmd;
+ struct iwl6000_channel_switch_cmd *cmd;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
struct ieee80211_vif *vif = ctx->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
- .len = { sizeof(cmd), },
+ .len = { sizeof(*cmd), },
.flags = CMD_SYNC,
- .data = { &cmd, },
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
+ int err;
- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ hcmd.data[0] = cmd;
+
+ cmd->band = priv->band == IEEE80211_BAND_2GHZ;
ch = ch_switch->channel->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
ctx->active.channel, ch);
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ cmd->channel = cpu_to_le16(ch);
+ cmd->rxon_flags = ctx->staging.flags;
+ cmd->rxon_filter_flags = ctx->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
switch_count = 0;
}
if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
else {
switch_time_in_usec =
vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
ucode_switch_time = iwl_usecs_to_beacons(priv,
switch_time_in_usec,
beacon_interval);
- cmd.switch_time = iwl_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
+ cmd->switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
+ cmd->switch_time);
+ cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
- return iwl_dvm_send_cmd(priv, &hcmd);
+ err = iwl_dvm_send_cmd(priv, &hcmd);
+ kfree(cmd);
+ return err;
}
struct iwl_lib_ops iwl6000_lib = {
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwlagn_tx_skb(priv, control->sta, skb))
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
vif_priv->ctx = ctx;
ctx->vif = vif;
+ /*
+ * In SNIFFER device type, the firmware reports the FCS to
+ * the host, rather than snipping it off. Unfortunately,
+ * mac80211 doesn't (yet) provide a per-packet flag for
+ * this, so that we have to set the hardware flag based
+ * on the interfaces added. As the monitor interface can
+ * only be present by itself, and will be removed before
+ * other interfaces are added, this is safe.
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ else
+ priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
err = iwl_setup_interface(priv, ctx);
if (!err || reset)
goto out;
info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ spin_lock_irqsave(&rxq->lock, flags);
+ list_add(&rxb->list, &rxq->rx_used);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ /*
+ * free the page(s) as well to not break
+ * the invariant that the items on the used
+ * list have no page(s)
+ */
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 rd_ptr, wr_ptr;
- int n_bd = trans_pcie->txq[txq_id].q.n_bd;
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
- wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
-
- WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
- txq_id, rd_ptr, wr_ptr);
-
iwl_txq_set_inactive(trans, txq_id);
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
return -EBUSY;
}
- priv->scan_request = request;
-
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
GFP_KERNEL);
if (!priv->user_scan_cfg) {
return -ENOMEM;
}
+ priv->scan_request = request;
+
priv->user_scan_cfg->num_ssids = request->n_ssids;
priv->user_scan_cfg->ssid_list = request->ssids;
ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
if (ret) {
dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
return ret;
}
return;
}
cmd_node = adapter->curr_cmd;
- if (cmd_node->wait_q_enabled)
- adapter->cmd_wait_q.status = -ETIMEDOUT;
-
if (cmd_node) {
adapter->dbg.timeout_cmd_id =
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
adapter->ps_mode, adapter->ps_state);
+
+ if (cmd_node->wait_q_enabled) {
+ adapter->cmd_wait_q.status = -ETIMEDOUT;
+ wake_up_interruptible(&adapter->cmd_wait_q.wait);
+ mwifiex_cancel_pending_ioctl(adapter);
+ /* reset cmd_sent flag to unblock new commands */
+ adapter->cmd_sent = false;
+ }
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
struct cfg80211_ssid *req_ssid)
{
struct mwifiex_adapter *adapter = priv->adapter;
- int ret = 0;
+ int ret;
struct mwifiex_user_scan_cfg *scan_cfg;
- if (!req_ssid)
- return -1;
-
if (adapter->scan_processing) {
- dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
- return ret;
+ dev_err(adapter->dev, "cmd: Scan already in process...\n");
+ return -EBUSY;
}
if (priv->scan_block) {
- dev_dbg(adapter->dev,
+ dev_err(adapter->dev,
"cmd: Scan is blocked during association...\n");
- return ret;
+ return -EBUSY;
}
scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
- int hs_actived = 0;
int i;
int ret = 0;
adapter = card->adapter;
/* Enable the Host Sleep */
- hs_actived = mwifiex_enable_hs(adapter);
- if (hs_actived) {
- pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
- ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (!mwifiex_enable_hs(adapter)) {
+ dev_err(adapter->dev, "cmd: failed to suspend\n");
+ return -EFAULT;
}
+ dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
/* Indicate device suspended */
adapter->is_suspended = true;
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
+ .reset_resume = rt2x00usb_resume,
.disable_hub_initiated_lpm = 1,
};
/*
* Check if temperature compensation is supported.
*/
- if (tssi_bounds[4] == 0xff)
+ if (tssi_bounds[4] == 0xff || step == 0xff)
return 0;
/*
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
+ .reset_resume = rt2x00usb_resume,
.disable_hub_initiated_lpm = 1,
};
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
+ .reset_resume = rt2x00usb_resume,
.disable_hub_initiated_lpm = 1,
};
/*=== Customer ID ===*/
/****** 8188CU ********/
{RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+ {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
set_hal_start(rtlhal);
/* Start bulk IN */
- _rtl_usb_receive(hw);
+ err = _rtl_usb_receive(hw);
}
return err;
/* Grant backend access to each skb fragment page. */
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct page *page = skb_frag_page(frag);
- tx->flags |= XEN_NETTXF_more_data;
+ len = skb_frag_size(frag);
+ offset = frag->page_offset;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ /* Data must not cross a page boundary. */
+ BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
- mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
- gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
- tx->gref = np->grant_tx_ref[id] = ref;
- tx->offset = frag->page_offset;
- tx->size = skb_frag_size(frag);
- tx->flags = 0;
+ while (len > 0) {
+ unsigned long bytes;
+
+ BUG_ON(offset >= PAGE_SIZE);
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+
+ tx->flags |= XEN_NETTXF_more_data;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist,
+ np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = bytes;
+ tx->flags = 0;
+
+ offset += bytes;
+ len -= bytes;
+
+ /* Next frame */
+ if (offset == PAGE_SIZE && len) {
+ BUG_ON(!PageCompound(page));
+ page++;
+ offset = 0;
+ }
+ }
}
np->tx.req_prod_pvt = prod;
}
+/*
+ * Count how many ring slots are required to send the frags of this
+ * skb. Each frag might be a compound page.
+ */
+static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+
+ pages += PFN_UP(offset + size);
+ }
+
+ return pages;
+}
+
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
grant_ref_t ref;
unsigned long mfn;
int notify;
- int frags = skb_shinfo(skb)->nr_frags;
+ int slots;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned long flags;
- frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
- if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
- printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
- frags);
- dump_stack();
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
+ xennet_count_skb_frag_slots(skb);
+ if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+ net_alert_ratelimited(
+ "xennet: skb rides the rocket: %d slots\n", slots);
goto drop;
}
spin_lock_irqsave(&np->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
- (frags > 1 && !xennet_can_sg(dev)) ||
+ (slots > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irqrestore(&np->tx_lock, flags);
goto drop;
cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+ list_del(&cmd->queue);
+
mutex_unlock(&dev->cmd_lock);
__pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
cmd->in_frame_len, cmd->cmd_complete,
cmd->arg, cmd->flags);
- list_del(&cmd->queue);
kfree(cmd);
}
static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
u8 *params, int params_len)
{
- struct pn533_cmd_jump_dep *cmd;
struct pn533_cmd_jump_dep_response *resp;
struct nfc_target nfc_target;
u8 target_gt_len;
int rc;
+ struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg;
+ u8 active = cmd->active;
+
+ kfree(arg);
if (params_len == -ENOENT) {
nfc_dev_dbg(&dev->interface->dev, "");
}
resp = (struct pn533_cmd_jump_dep_response *) params;
- cmd = (struct pn533_cmd_jump_dep *) arg;
rc = resp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
nfc_dev_err(&dev->interface->dev,
if (rc == 0)
rc = nfc_dep_link_is_up(dev->nfc_dev,
dev->nfc_dev->targets[0].idx,
- !cmd->active, NFC_RF_INITIATOR);
+ !active, NFC_RF_INITIATOR);
return 0;
}
rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
dev->in_maxlen, pn533_in_dep_link_up_complete,
cmd, GFP_KERNEL);
- if (rc)
- goto out;
-
-
-out:
- kfree(cmd);
+ if (rc < 0)
+ kfree(cmd);
return rc;
}
static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
u8 *params, int params_len)
{
+ struct sk_buff *skb_out = arg;
+
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_kfree_skb(skb_out);
+
if (params_len < 0) {
nfc_dev_err(&dev->interface->dev,
"Error %d when sending data",
rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
dev->in_maxlen, pn533_tm_send_complete,
- NULL, GFP_KERNEL);
+ skb, GFP_KERNEL);
if (rc) {
nfc_dev_err(&dev->interface->dev,
"Error %d when trying to send data", rc);
} else
next = dev->bus_list.next;
- /* Run device routines with the device locked */
- device_lock(&dev->dev);
retval = cb(dev, userdata);
- device_unlock(&dev->dev);
if (retval)
break;
}
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
+ pm_runtime_resume(dev);
+
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
pci_msi_shutdown(pci_dev);
* continue to do DMA
*/
pci_disable_device(pci_dev);
-
- /*
- * Devices may be enabled to wake up by runtime PM, but they need not
- * be supposed to wake up the system from its "power off" state (e.g.
- * ACPI S5). Therefore disable wakeup for all devices that aren't
- * supposed to wake up the system at this point. The state argument
- * will be ignored by pci_enable_wake().
- */
- if (!device_may_wakeup(dev))
- pci_enable_wake(pci_dev, PCI_UNKNOWN, false);
}
#ifdef CONFIG_PM
}
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
-static void
-pci_config_pm_runtime_get(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *parent = dev->parent;
-
- if (parent)
- pm_runtime_get_sync(parent);
- pm_runtime_get_noresume(dev);
- /*
- * pdev->current_state is set to PCI_D3cold during suspending,
- * so wait until suspending completes
- */
- pm_runtime_barrier(dev);
- /*
- * Only need to resume devices in D3cold, because config
- * registers are still accessible for devices suspended but
- * not in D3cold.
- */
- if (pdev->current_state == PCI_D3cold)
- pm_runtime_resume(dev);
-}
-
-static void
-pci_config_pm_runtime_put(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *parent = dev->parent;
-
- pm_runtime_put(dev);
- if (parent)
- pm_runtime_put_sync(parent);
-}
-
static ssize_t
pci_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
}
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+void pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ if (parent)
+ pm_runtime_get_sync(parent);
+ pm_runtime_get_noresume(dev);
+ /*
+ * pdev->current_state is set to PCI_D3cold during suspending,
+ * so wait until suspending completes
+ */
+ pm_runtime_barrier(dev);
+ /*
+ * Only need to resume devices in D3cold, because config
+ * registers are still accessible for devices suspended but
+ * not in D3cold.
+ */
+ if (pdev->current_state == PCI_D3cold)
+ pm_runtime_resume(dev);
+}
+
+void pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ pm_runtime_put(dev);
+ if (parent)
+ pm_runtime_put_sync(parent);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
extern void pci_wakeup_bus(struct pci_bus *bus);
+extern void pci_config_pm_runtime_get(struct pci_dev *dev);
+extern void pci_config_pm_runtime_put(struct pci_dev *dev);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
dev->error_state = result_data->state;
if (!dev->driver ||
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return 0;
+ goto out;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
struct aer_broadcast_data *result_data;
result_data = (struct aer_broadcast_data *) data;
+ device_lock(&dev->dev);
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
return 0;
}
{
const struct pci_error_handlers *err_handler;
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return 0;
+ goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
+out:
+ device_unlock(&dev->dev);
return 0;
}
}
/* Hot-Plug Capable */
- if (cap_mask & PCIE_PORT_SERVICE_HP) {
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
+ dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) {
pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, ®32);
if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
if (!access_ok(VERIFY_WRITE, buf, cnt))
return -EINVAL;
+ pci_config_pm_runtime_get(dev);
+
if ((pos & 1) && cnt) {
unsigned char val;
pci_user_read_config_byte(dev, pos, &val);
cnt--;
}
+ pci_config_pm_runtime_put(dev);
+
*ppos = pos;
return nbytes;
}
if (!access_ok(VERIFY_READ, buf, cnt))
return -EINVAL;
+ pci_config_pm_runtime_get(dev);
+
if ((pos & 1) && cnt) {
unsigned char val;
__get_user(val, buf);
cnt--;
}
+ pci_config_pm_runtime_put(dev);
+
*ppos = pos;
i_size_write(ino, dp->size);
return nbytes;
ports of 8 GPIO pins each.
config PINCTRL_SAMSUNG
- bool "Samsung pinctrl driver"
+ bool
+ depends on OF && GPIOLIB
select PINMUX
select PINCONF
config PINCTRL_EXYNOS4
bool "Pinctrl driver data for Exynos4 SoC"
+ depends on OF && GPIOLIB
select PINCTRL_SAMSUNG
config PINCTRL_MVEBU
seq_printf(s, "group: %s\n", gname);
for (i = 0; i < num_pins; i++) {
pname = pin_get_name(pctldev, pins[i]);
- if (WARN_ON(!pname))
+ if (WARN_ON(!pname)) {
+ mutex_unlock(&pinctrl_mutex);
return -EINVAL;
+ }
seq_printf(s, "pin %d (%s)\n", pins[i], pname);
}
seq_puts(s, "\n");
seq_puts(s, "Pin config settings per pin group\n");
seq_puts(s, "Format: group (name): configs\n");
- mutex_lock(&pinctrl_mutex);
-
while (selector < ngroups) {
const char *gname = pctlops->get_group_name(pctldev, selector);
selector++;
}
- mutex_unlock(&pinctrl_mutex);
-
return 0;
}
struct nmk_gpio_chip *nmk_chip =
container_of(chip, struct nmk_gpio_chip, chip);
- return irq_find_mapping(nmk_chip->domain, offset);
+ return irq_create_mapping(nmk_chip->domain, offset);
}
#ifdef CONFIG_DEBUG_FS
struct clk *clk;
int secondary_irq;
void __iomem *base;
- int irq_start = -1;
+ int irq_start = 0;
int irq;
int ret;
if (!np)
irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio);
- nmk_chip->domain = irq_domain_add_simple(NULL,
+ nmk_chip->domain = irq_domain_add_simple(np,
NMK_GPIO_PER_CHIP, irq_start,
&nmk_gpio_irq_simple_ops, nmk_chip);
if (!nmk_chip->domain) {
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->lpmd_bit;
- *width = 1;
+ *width = 2;
break;
case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
*bank = g->drv_bank;
FUNCTION(vi_alt3),
};
-#define MUXCTL_REG_A 0x3000
-#define PINGROUP_REG_A 0x868
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A)
+#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
#define PINGROUP_REG_N(r) -1
#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior) \
}, \
.func_safe = TEGRA_MUX_ ## f_safe, \
.mux_reg = PINGROUP_REG_Y(r), \
- .mux_bank = 0, \
+ .mux_bank = 1, \
.mux_bit = 0, \
.pupd_reg = PINGROUP_REG_Y(r), \
- .pupd_bank = 0, \
+ .pupd_bank = 1, \
.pupd_bit = 2, \
.tri_reg = PINGROUP_REG_Y(r), \
- .tri_bank = 0, \
+ .tri_bank = 1, \
.tri_bit = 4, \
.einput_reg = PINGROUP_REG_Y(r), \
- .einput_bank = 0, \
+ .einput_bank = 1, \
.einput_bit = 5, \
.odrain_reg = PINGROUP_REG_##od(r), \
- .odrain_bank = 0, \
+ .odrain_bank = 1, \
.odrain_bit = 6, \
.lock_reg = PINGROUP_REG_Y(r), \
- .lock_bank = 0, \
+ .lock_bank = 1, \
.lock_bit = 7, \
.ioreset_reg = PINGROUP_REG_##ior(r), \
- .ioreset_bank = 0, \
+ .ioreset_bank = 1, \
.ioreset_bit = 8, \
.drv_reg = -1, \
}
.odrain_reg = -1, \
.lock_reg = -1, \
.ioreset_reg = -1, \
- .drv_reg = ((r) - PINGROUP_REG_A), \
- .drv_bank = 1, \
+ .drv_reg = ((r) - DRV_PINGROUP_REG_A), \
+ .drv_bank = 0, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
else
temp = ~muxreg->val;
- val |= temp;
+ val |= muxreg->mask & temp;
pmx_writel(pmx, val, muxreg->reg);
}
}
};
/* registers */
-#define PERIP_CFG 0x32C
- #define MCIF_SEL_SHIFT 3
+#define PERIP_CFG 0x3B0
+ #define MCIF_SEL_SHIFT 5
#define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT)
#define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT)
#define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT)
#define PMX_SSP0_CS0_MASK (1 << 29)
#define PMX_SSP0_CS1_2_MASK (1 << 30)
+#define PAD_DIRECTION_SEL_0 0x65C
+#define PAD_DIRECTION_SEL_1 0x660
+#define PAD_DIRECTION_SEL_2 0x664
+
/* combined macros */
#define PMX_GMII_MASK (PMX_GMIICLK_MASK | \
PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2C0_MASK,
.val = PMX_I2C0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_SSP0_MASK,
.val = PMX_SSP0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SSP0_MASK,
+ .val = PMX_SSP0_MASK,
},
};
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_SSP0_CS0_MASK,
.val = PMX_SSP0_CS0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_SSP0_CS0_MASK,
+ .val = PMX_SSP0_CS0_MASK,
},
};
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_SSP0_CS1_2_MASK,
.val = PMX_SSP0_CS1_2_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_SSP0_CS1_2_MASK,
+ .val = PMX_SSP0_CS1_2_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2S0_MASK,
.val = PMX_I2S0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_I2S1_MASK,
.val = PMX_I2S1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_I2S1_MASK,
+ .val = PMX_I2S1_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = PMX_CLCD1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_CLCD2_MASK,
.val = PMX_CLCD2_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_CLCD2_MASK,
+ .val = PMX_CLCD2_MASK,
},
};
.nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
};
-static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res_grp" };
static struct spear_function clcd_function = {
.name = "clcd",
.groups = clcd_grps,
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_EGPIO_1_GRP_MASK,
.val = PMX_EGPIO_1_GRP_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_EGPIO_0_GRP_MASK,
+ .val = PMX_EGPIO_0_GRP_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_EGPIO_1_GRP_MASK,
+ .val = PMX_EGPIO_1_GRP_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_SMI_MASK,
.val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
.val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_GMII_MASK,
.val = PMX_GMII_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_GMII_MASK,
+ .val = PMX_GMII_MASK,
},
};
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_RGMII_REG2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_RGMII_REG0_MASK,
+ .val = PMX_RGMII_REG0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_RGMII_REG1_MASK,
+ .val = PMX_RGMII_REG1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_RGMII_REG2_MASK,
+ .val = PMX_RGMII_REG2_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_SMII_0_1_2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_SMII_0_1_2_MASK,
+ .val = PMX_SMII_0_1_2_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NFCE2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NFCE2_MASK,
+ .val = PMX_NFCE2_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NAND8BIT_1_MASK,
.val = PMX_NAND8BIT_1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_NAND8BIT_0_MASK,
+ .val = PMX_NAND8BIT_0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NAND8BIT_1_MASK,
+ .val = PMX_NAND8BIT_1_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NAND16BIT_1_MASK,
.val = PMX_NAND16BIT_1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NAND16BIT_1_MASK,
+ .val = PMX_NAND16BIT_1_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NAND_4CHIPS_MASK,
.val = PMX_NAND_4CHIPS_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NAND_4CHIPS_MASK,
+ .val = PMX_NAND_4CHIPS_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_KBD_ROWCOL68_MASK,
.val = PMX_KBD_ROWCOL68_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL68_MASK,
+ .val = PMX_KBD_ROWCOL68_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_UART0_MASK,
.val = PMX_UART0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_UART0_MASK,
+ .val = PMX_UART0_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_UART0_MODEM_MASK,
.val = PMX_UART0_MODEM_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_UART0_MODEM_MASK,
+ .val = PMX_UART0_MODEM_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT0_TMR0_MASK,
.val = PMX_GPT0_TMR0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT0_TMR0_MASK,
+ .val = PMX_GPT0_TMR0_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT0_TMR1_MASK,
.val = PMX_GPT0_TMR1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT0_TMR1_MASK,
+ .val = PMX_GPT0_TMR1_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT1_TMR0_MASK,
.val = PMX_GPT1_TMR0_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT1_TMR0_MASK,
+ .val = PMX_GPT1_TMR0_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_GPT1_TMR1_MASK,
.val = PMX_GPT1_TMR1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_GPT1_TMR1_MASK,
+ .val = PMX_GPT1_TMR1_MASK,
},
};
.reg = PAD_FUNCTION_EN_2, \
.mask = PMX_MCIFALL_2_MASK, \
.val = PMX_MCIFALL_2_MASK, \
+ }, { \
+ .reg = PAD_DIRECTION_SEL_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = PMX_MCI_DATA8_15_MASK, \
+ }, { \
+ .reg = PAD_DIRECTION_SEL_1, \
+ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ .val = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ }, { \
+ .reg = PAD_DIRECTION_SEL_2, \
+ .mask = PMX_MCIFALL_2_MASK, \
+ .val = PMX_MCIFALL_2_MASK, \
}
/* sdhci device */
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_TOUCH_XY_MASK,
.val = PMX_TOUCH_XY_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_TOUCH_XY_MASK,
+ .val = PMX_TOUCH_XY_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2C0_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
},
};
.mask = PMX_MCIDATA1_MASK |
PMX_MCIDATA2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
+ .val = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2S0_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+ .val = PMX_I2S0_MASK | PMX_CLCD1_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+ .val = PMX_CLCD1_MASK | PMX_SMI_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+ .val = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
},
};
.reg = PAD_FUNCTION_EN_0,
.mask = PMX_SMI_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
},
};
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_MCIDATA5_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_MCIDATA4_MASK,
+ .val = PMX_MCIDATA4_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIDATA5_MASK,
+ .val = PMX_MCIDATA5_MASK,
},
};
.mask = PMX_MCIDATA6_MASK |
PMX_MCIDATA7_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
+ .val = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_KBD_ROWCOL25_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = PMX_KBD_ROWCOL25_MASK,
},
};
.mask = PMX_MCIIORDRE_MASK |
PMX_MCIIOWRWE_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
+ .val = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
},
};
.mask = PMX_MCIRESETCF_MASK |
PMX_MCICS0CE_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
+ .val = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_NFRSTPWDWN3_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_NFRSTPWDWN2_MASK,
+ .val = PMX_NFRSTPWDWN2_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_NFRSTPWDWN3_MASK,
+ .val = PMX_NFRSTPWDWN3_MASK,
},
};
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+ .val = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
},
};
.reg = PAD_FUNCTION_EN_2,
.mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+ .val = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
},
};
.reg = PAD_FUNCTION_EN_1,
.mask = PMX_KBD_ROWCOL25_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = PMX_KBD_ROWCOL25_MASK,
},
};
.ngroups = ARRAY_SIZE(can1_grps),
};
-/* Pad multiplexing for pci device */
-static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+/* Pad multiplexing for (ras-ip) pci device */
+static const unsigned pci_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
-#define PCI_SATA_MUXREG \
- { \
- .reg = PAD_FUNCTION_EN_0, \
- .mask = PMX_MCI_DATA8_15_MASK, \
- .val = 0, \
- }, { \
- .reg = PAD_FUNCTION_EN_1, \
- .mask = PMX_PCI_REG1_MASK, \
- .val = 0, \
- }, { \
- .reg = PAD_FUNCTION_EN_2, \
- .mask = PMX_PCI_REG2_MASK, \
- .val = 0, \
- }
-/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pci_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_MCI_DATA8_15_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_PCI_REG1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_PCI_REG2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_0,
+ .mask = PMX_MCI_DATA8_15_MASK,
+ .val = PMX_MCI_DATA8_15_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_PCI_REG1_MASK,
+ .val = PMX_PCI_REG1_MASK,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_PCI_REG2_MASK,
+ .val = PMX_PCI_REG2_MASK,
+ },
+};
+
+static struct spear_modemux pci_modemux[] = {
+ {
+ .muxregs = pci_muxreg,
+ .nmuxregs = ARRAY_SIZE(pci_muxreg),
+ },
+};
+
+static struct spear_pingroup pci_pingroup = {
+ .name = "pci_grp",
+ .pins = pci_pins,
+ .npins = ARRAY_SIZE(pci_pins),
+ .modemuxs = pci_modemux,
+ .nmodemuxs = ARRAY_SIZE(pci_modemux),
+};
+
+static const char *const pci_grps[] = { "pci_grp" };
+static struct spear_function pci_function = {
+ .name = "pci",
+ .groups = pci_grps,
+ .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for (fix-part) pcie0 device */
static struct spear_muxreg pcie0_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = PCIE_CFG_VAL(0),
static struct spear_pingroup pcie0_pingroup = {
.name = "pcie0_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = pcie0_modemux,
.nmodemuxs = ARRAY_SIZE(pcie0_modemux),
};
-/* pad multiplexing for pcie1 device */
+/* pad multiplexing for (fix-part) pcie1 device */
static struct spear_muxreg pcie1_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = PCIE_CFG_VAL(1),
static struct spear_pingroup pcie1_pingroup = {
.name = "pcie1_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = pcie1_modemux,
.nmodemuxs = ARRAY_SIZE(pcie1_modemux),
};
-/* pad multiplexing for pcie2 device */
+/* pad multiplexing for (fix-part) pcie2 device */
static struct spear_muxreg pcie2_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = PCIE_CFG_VAL(2),
static struct spear_pingroup pcie2_pingroup = {
.name = "pcie2_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = pcie2_modemux,
.nmodemuxs = ARRAY_SIZE(pcie2_modemux),
};
-static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
-static struct spear_function pci_function = {
- .name = "pci",
- .groups = pci_grps,
- .ngroups = ARRAY_SIZE(pci_grps),
+static const char *const pcie_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp"
+};
+static struct spear_function pcie_function = {
+ .name = "pci_express",
+ .groups = pcie_grps,
+ .ngroups = ARRAY_SIZE(pcie_grps),
};
/* pad multiplexing for sata0 device */
static struct spear_muxreg sata0_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = SATA_CFG_VAL(0),
static struct spear_pingroup sata0_pingroup = {
.name = "sata0_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = sata0_modemux,
.nmodemuxs = ARRAY_SIZE(sata0_modemux),
};
/* pad multiplexing for sata1 device */
static struct spear_muxreg sata1_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = SATA_CFG_VAL(1),
static struct spear_pingroup sata1_pingroup = {
.name = "sata1_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = sata1_modemux,
.nmodemuxs = ARRAY_SIZE(sata1_modemux),
};
/* pad multiplexing for sata2 device */
static struct spear_muxreg sata2_muxreg[] = {
- PCI_SATA_MUXREG,
{
.reg = PCIE_SATA_CFG,
.mask = SATA_CFG_VAL(2),
static struct spear_pingroup sata2_pingroup = {
.name = "sata2_grp",
- .pins = pci_sata_pins,
- .npins = ARRAY_SIZE(pci_sata_pins),
.modemuxs = sata2_modemux,
.nmodemuxs = ARRAY_SIZE(sata2_modemux),
};
PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
PMX_NFCE2_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_1,
+ .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
+ .val = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
},
};
.mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+ .val = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
},
};
.mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
| PMX_MCILEDS_MASK,
.val = 0,
+ }, {
+ .reg = PAD_DIRECTION_SEL_2,
+ .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
+ .val = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
},
};
&can0_dis_sd_pingroup,
&can1_dis_sd_pingroup,
&can1_dis_kbd_pingroup,
+ &pci_pingroup,
&pcie0_pingroup,
&pcie1_pingroup,
&pcie2_pingroup,
&can0_function,
&can1_function,
&pci_function,
+ &pcie_function,
&sata_function,
&ssp1_function,
&gpt64_function,
* Pad multiplexing for making all pads as gpio's. This is done to override the
* values passed from bootloader and start from scratch.
*/
-static const unsigned pads_as_gpio_pins[] = { 251 };
+static const unsigned pads_as_gpio_pins[] = { 12, 88, 89, 251 };
static struct spear_muxreg pads_as_gpio_muxreg[] = {
{
.reg = PAD_FUNCTION_EN_1,
.nmodemuxs = ARRAY_SIZE(clcd_modemux),
};
-static const char *const clcd_grps[] = { "clcd_grp" };
+/* Disable cld runtime to save panel damage */
+static struct spear_muxreg clcd_sleep_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = 0x0,
+ },
+};
+
+static struct spear_modemux clcd_sleep_modemux[] = {
+ {
+ .muxregs = clcd_sleep_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_sleep_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_sleep_pingroup = {
+ .name = "clcd_sleep_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_sleep_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_sleep_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_sleep_grp" };
static struct spear_function clcd_function = {
.name = "clcd",
.groups = clcd_grps,
&sdhci_pingroup,
&cf_pingroup,
&xd_pingroup,
+ &clcd_sleep_pingroup,
&clcd_pingroup,
&arm_trace_pingroup,
&miphy_dbg_pingroup,
.mask = PMX_SSP_CS_MASK,
.val = 0,
}, {
+ .reg = MODE_CONFIG_REG,
+ .mask = PMX_PWM_MASK,
+ .val = PMX_PWM_MASK,
+ }, {
.reg = IP_SEL_PAD_30_39_REG,
.mask = PMX_PL_34_MASK,
.val = PMX_PWM2_PL_34_VAL,
};
/* Pad multiplexing for cadence mii 1_2 as smii or rmii device */
-static const unsigned smii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
+static const unsigned rmii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27 };
-static const unsigned rmii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 };
+static const unsigned smii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 };
static struct spear_muxreg mii0_1_muxreg[] = {
{
.reg = PMX_CONFIG_REG,
#include "pinctrl-spear.h"
/* pad mux declarations */
+#define PMX_PWM_MASK (1 << 16)
#define PMX_FIRDA_MASK (1 << 14)
#define PMX_I2C_MASK (1 << 13)
#define PMX_SSP_CS_MASK (1 << 12)
/**
* rio_map_inb_region -- Map inbound memory region.
* @mport: Master port.
- * @lstart: physical address of memory region to be mapped
+ * @local: physical address of memory region to be mapped
* @rbase: RIO base address assigned to this window
* @size: Size of the memory region
* @rflags: Flags for mapping.
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
-/**
- * regulator_put - "free" the regulator source
- * @regulator: regulator source
- *
- * Note: drivers must ensure that all regulator_enable calls made on this
- * regulator source are balanced by regulator_disable calls prior to calling
- * this function.
- */
-void regulator_put(struct regulator *regulator)
+/* Locks held by regulator_put() */
+static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
if (regulator == NULL || IS_ERR(regulator))
return;
- mutex_lock(®ulator_list_mutex);
rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
rdev->exclusive = 0;
module_put(rdev->owner);
+}
+
+/**
+ * regulator_put - "free" the regulator source
+ * @regulator: regulator source
+ *
+ * Note: drivers must ensure that all regulator_enable calls made on this
+ * regulator source are balanced by regulator_disable calls prior to calling
+ * this function.
+ */
+void regulator_put(struct regulator *regulator)
+{
+ mutex_lock(®ulator_list_mutex);
+ _regulator_put(regulator);
mutex_unlock(®ulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_put);
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
- return (min_uV >= ret && ret <= max_uV);
+ return (min_uV <= ret && ret <= max_uV);
else
return ret;
}
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
config->ena_gpio, ret);
- goto clean;
+ goto wash;
}
rdev->ena_gpio = config->ena_gpio;
scrub:
if (rdev->supply)
- regulator_put(rdev->supply);
+ _regulator_put(rdev->supply);
if (rdev->ena_gpio)
gpio_free(rdev->ena_gpio);
kfree(rdev->constraints);
+wash:
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);
if (imxdi->ioaddr == NULL)
return -ENOMEM;
+ spin_lock_init(&imxdi->irq_lock);
+
imxdi->irq = platform_get_irq(pdev, 0);
if (imxdi->irq < 0)
return imxdi->irq;
static int __devexit tps65910_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct tps65910_rtc *tps_rtc = platform_get_drvdata(pdev);
- tps65910_rtc_alarm_irq_enable(&rtc->dev, 0);
+ tps65910_rtc_alarm_irq_enable(&pdev->dev, 0);
- rtc_device_unregister(rtc);
+ rtc_device_unregister(tps_rtc->rtc);
return 0;
}
#define RAW3215_NR_CCWS 3
#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
-#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
#define RAW3215_WORKING 4 /* set if a request is being worked on */
#define RAW3215_THROTTLED 8 /* set if reading is disabled */
#define RAW3215_STOPPED 16 /* set if writing is disabled */
struct tty_struct *tty;
tty = tty_port_tty_get(&raw->port);
- tty_wakeup(tty);
- tty_kref_put(tty);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
}
/*
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
- if (!(raw->port.flags & ASYNC_INITIALIZED) ||
- (raw->flags & RAW3215_FIXED))
+ if (!(raw->port.flags & ASYNC_INITIALIZED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
- raw->flags |= RAW3215_FIXED;
-
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
raw3215_free_info(raw);
/* Will be done on the slow path. */
return -EAGAIN;
}
- if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
+ if (stsch_err(schid, &schib)) {
+ /* Subchannel is not provided. */
+ return -ENXIO;
+ }
+ if (!css_sch_is_valid(&schib)) {
/* Unusable - ignore. */
return 0;
}
case -ENOMEM:
case -EIO:
/* These should abort looping */
+ idset_sch_del_subseq(slow_subchannel_set, schid);
break;
default:
rc = 0;
extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
-#define __MAX_SUBCHANNEL 65535
-#define __MAX_SSID 3
-
struct channel_subsystem {
u8 cssid;
int valid;
}
if (device_is_disconnected(cdev))
return IO_SCH_REPROBE;
- if (cdev->online)
+ if (cdev->online && !cdev->private->flags.resuming)
return IO_SCH_VERIFY;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return IO_SCH_UNREG_ATTACH;
rc = 0;
goto out_unlock;
case IO_SCH_VERIFY:
- if (cdev->private->flags.resuming == 1) {
- if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
- ccw_device_set_notoper(cdev);
- break;
- }
- }
/* Trigger path verification. */
io_subchannel_verify(sch);
rc = 0;
/*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "idset.h"
#include "css.h"
idset_del(set, schid.ssid, schid.sch_no);
}
+/* Clear ids starting from @schid up to end of subchannel set. */
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
+{
+ int pos = schid.ssid * set->num_id + schid.sch_no;
+
+ bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
+}
+
int idset_sch_contains(struct idset *set, struct subchannel_id schid)
{
return idset_contains(set, schid.ssid, schid.sch_no);
int idset_is_empty(struct idset *set)
{
- int bitnum;
-
- bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
- if (bitnum >= set->num_ssid * set->num_id)
- return 1;
- return 0;
+ return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
}
void idset_add_set(struct idset *to, struct idset *from)
{
- unsigned long i, len;
+ int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
- len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
- __BITOPS_WORDS(from->num_ssid * from->num_id));
- for (i = 0; i < len ; i++)
- to->bitmap[i] |= from->bitmap[i];
+ bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
}
/*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
struct idset *idset_sch_new(void);
void idset_sch_add(struct idset *set, struct subchannel_id id);
void idset_sch_del(struct idset *set, struct subchannel_id id);
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
int idset_sch_contains(struct idset *set, struct subchannel_id id);
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
int idset_is_empty(struct idset *set);
QETH_DBF_TEXT(SETUP, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
+
+ switch (cmd->hdr.return_code) {
+ case IPA_RC_NOTSUPP:
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+ card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
+ card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+ return -0;
+ default:
+ if (cmd->hdr.return_code) {
+ QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
+ "rc=%d\n",
+ dev_name(&card->gdev->dev),
+ cmd->hdr.return_code);
+ return 0;
+ }
+ }
+
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- } else {
+ } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- }
+ } else
+ QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
+ "\n", dev_name(&card->gdev->dev));
QETH_DBF_TEXT(SETUP, 2, "suppenbl");
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
QETH_DBF_TEXT(SETUP, 2, "doL2init");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
- rc = qeth_query_setadapterparms(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
- "device %s: x%x\n", CARD_BUS_ID(card), rc);
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "could not query adapter "
+ "parameters on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ }
}
if (card->info.type == QETH_CARD_TYPE_IQD ||
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc)
+ if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
frame_index,
(void **)&frame_buffer);
- sci_controller_copy_sata_response(&ireq->stp.req,
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
+ unsigned long flags;
int ret;
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
+ spin_lock_irqsave(&vha->hw->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
+ spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
int pmap_len;
fc_port_t *fcport;
int global_resets;
+ unsigned long flags;
retry:
global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
res = true;
qlt_undelete_sess(sess);
kref_get(&sess->se_sess->sess_kref);
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+
if (sess->local && !local)
sess->local = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
*/
kref_get(&sess->se_sess->sess_kref);
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
"Reappeared sess %p\n", sess);
}
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
}
if (sess && sess->local) {
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
void *, uint8_t *, uint16_t);
+ void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t);
struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
- return &lport->lport_name[0];
+ return lport->lport_naa_name;
}
static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
return 0;
}
+static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+ uint16_t loop_id, bool conf_compl_supported)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ u32 key;
+
+
+ if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
+ pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
+ sess,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id, loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+
+ if (sess->loop_id != loop_id) {
+ /*
+ * Because we can shuffle loop IDs around and we
+ * update different sessions non-atomically, we might
+ * have overwritten this session's old loop ID
+ * already, and we might end up overwriting some other
+ * session that will be updated later. So we have to
+ * be extra careful and we can't warn about those things...
+ */
+ if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
+ lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
+
+ lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
+
+ sess->loop_id = loop_id;
+ }
+
+ if (sess->s_id.b24 != s_id.b24) {
+ key = (((u32) sess->s_id.b.domain << 16) |
+ ((u32) sess->s_id.b.area << 8) |
+ ((u32) sess->s_id.b.al_pa));
+
+ if (btree_lookup32(&lport->lport_fcport_map, key))
+ WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
+ "Found wrong se_nacl when updating s_id %x:%x:%x\n",
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ else
+ WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+
+ key = (((u32) s_id.b.domain << 16) |
+ ((u32) s_id.b.area << 8) |
+ ((u32) s_id.b.al_pa));
+
+ if (btree_lookup32(&lport->lport_fcport_map, key)) {
+ WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
+ s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+ btree_update32(&lport->lport_fcport_map, key, se_nacl);
+ } else {
+ btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
+ }
+
+ sess->s_id = s_id;
+ nacl->nport_id = key;
+ }
+
+ sess->conf_compl_supported = conf_compl_supported;
+}
+
/*
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
.free_session = tcm_qla2xxx_free_session,
+ .update_sess = tcm_qla2xxx_update_sess,
.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
lport->lport_wwpn = wwpn;
tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
wwpn);
+ sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
ret = tcm_qla2xxx_init_lport(lport);
if (ret != 0)
lport->lport_npiv_wwnn = npiv_wwnn;
tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+ sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
/* FIXME: tcm_qla2xxx_npiv_make_lport */
ret = -ENOSYS;
u64 lport_npiv_wwnn;
/* ASCII formatted WWPN for FC Target Lport */
char lport_name[TCM_QLA2XXX_NAMELEN];
+ /* ASCII formatted naa WWPN for VPD page 83 etc */
+ char lport_naa_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
static const struct of_device_id qpti_match[];
static int __devinit qpti_sbus_probe(struct platform_device *op)
{
- const struct of_device_id *match;
- struct scsi_host_template *tpnt;
struct device_node *dp = op->dev.of_node;
struct Scsi_Host *host;
struct qlogicpti *qpti;
static int nqptis;
const char *fcode;
- match = of_match_device(qpti_match, &op->dev);
- if (!match)
- return -EINVAL;
- tpnt = match->data;
-
/* Sometimes Antares cards come up not completely
* setup, and we get a report of a zero IRQ.
*/
if (op->archdata.irqs[0] == 0)
return -ENODEV;
- host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti));
+ host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti));
if (!host)
return -ENOMEM;
static const struct of_device_id qpti_match[] = {
{
.name = "ptisp",
- .data = &qpti_template,
},
{
.name = "PTI,ptisp",
- .data = &qpti_template,
},
{
.name = "QLGC,isp",
- .data = &qpti_template,
},
{
.name = "SUNW,isp",
- .data = &qpti_template,
},
{},
};
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/async.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
/**
+ * scsi_report_opcode - Find out if a given command opcode is supported
+ * @sdev: scsi device to query
+ * @buffer: scratch buffer (must be at least 20 bytes long)
+ * @len: length of buffer
+ * @opcode: opcode for command to look up
+ *
+ * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
+ * opcode. Returns 0 if RSOC fails or if the command opcode is
+ * unsupported. Returns 1 if the device claims to support the command.
+ */
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+ unsigned int len, unsigned char opcode)
+{
+ unsigned char cmd[16];
+ struct scsi_sense_hdr sshdr;
+ int result;
+
+ if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
+ return 0;
+
+ memset(cmd, 0, 16);
+ cmd[0] = MAINTENANCE_IN;
+ cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
+ cmd[2] = 1; /* One command format */
+ cmd[3] = opcode;
+ put_unaligned_be32(len, &cmd[6]);
+ memset(buffer, 0, len);
+
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+ &sshdr, 30 * HZ, 3, NULL);
+
+ if (result && scsi_sense_valid(&sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
+ return 0;
+
+ if ((buffer[1] & 3) == 3) /* Command supported */
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_report_opcode);
+
+/**
* scsi_device_get - get an additional reference to a scsi_device
* @sdev: device to get a reference to
*
action = ACTION_FAIL;
error = -EILSEQ;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
- } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
- (cmd->cmnd[0] == UNMAP ||
- cmd->cmnd[0] == WRITE_SAME_16 ||
- cmd->cmnd[0] == WRITE_SAME)) {
- description = "Discard failure";
+ } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+ switch (cmd->cmnd[0]) {
+ case UNMAP:
+ description = "Discard failure";
+ break;
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ if (cmd->cmnd[1] & 0x8)
+ description = "Discard failure";
+ else
+ description =
+ "Write same failure";
+ break;
+ default:
+ description = "Invalid command failure";
+ break;
+ }
action = ACTION_FAIL;
error = -EREMOTEIO;
} else
#endif
static void sd_config_discard(struct scsi_disk *, unsigned int);
+static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
return err ? err : count;
}
+static ssize_t
+sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+}
+
+static ssize_t
+sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ unsigned long max;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ err = kstrtoul(buf, 10, &max);
+
+ if (err)
+ return err;
+
+ if (max == 0)
+ sdp->no_write_same = 1;
+ else if (max <= SD_MAX_WS16_BLOCKS)
+ sdkp->max_ws_blocks = max;
+
+ sd_config_write_same(sdkp);
+
+ return count;
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
__ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
sd_store_provisioning_mode),
+ __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
+ sd_show_write_same_blocks, sd_store_write_same_blocks),
__ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
sd_show_max_medium_access_timeouts,
sd_store_max_medium_access_timeouts),
return;
case SD_LBP_UNMAP:
- max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+ max_blocks = min_not_zero(sdkp->max_unmap_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS16:
- max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS10:
- max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
break;
case SD_LBP_ZERO:
- max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ max_blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
q->limits.discard_zeroes_data = 1;
break;
}
}
/**
- * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
+ * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
* @sdp: scsi device to operate one
* @rq: Request to prepare
*
* Will issue either UNMAP or WRITE SAME(16) depending on preference
* indicated by target device.
**/
-static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
{
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct bio *bio = rq->bio;
- sector_t sector = bio->bi_sector;
- unsigned int nr_sectors = bio_sectors(bio);
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
unsigned int len;
int ret;
char *buf;
struct page *page;
- if (sdkp->device->sector_size == 4096) {
- sector >>= 3;
- nr_sectors >>= 3;
- }
-
+ sector >>= ilog2(sdp->sector_size) - 9;
+ nr_sectors >>= ilog2(sdp->sector_size) - 9;
rq->timeout = SD_TIMEOUT;
memset(rq->cmd, 0, rq->cmd_len);
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->buffer = page_address(page);
+ rq->__data_len = nr_bytes;
out:
if (ret != BLKPREP_OK) {
return ret;
}
+static void sd_config_write_same(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+ unsigned int blocks = 0;
+
+ if (sdkp->device->no_write_same) {
+ sdkp->max_ws_blocks = 0;
+ goto out;
+ }
+
+ /* Some devices can not handle block counts above 0xffff despite
+ * supporting WRITE SAME(16). Consequently we default to 64k
+ * blocks per I/O unless the device explicitly advertises a
+ * bigger limit.
+ */
+ if (sdkp->max_ws_blocks == 0)
+ sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
+
+ if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+ blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS16_BLOCKS);
+ else
+ blocks = min_not_zero(sdkp->max_ws_blocks,
+ (u32)SD_MAX_WS10_BLOCKS);
+
+out:
+ blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
+}
+
+/**
+ * sd_setup_write_same_cmnd - write the same data to multiple blocks
+ * @sdp: scsi device to operate one
+ * @rq: Request to prepare
+ *
+ * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
+ * preference indicated by target device.
+ **/
+static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
+{
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct bio *bio = rq->bio;
+ sector_t sector = blk_rq_pos(rq);
+ unsigned int nr_sectors = blk_rq_sectors(rq);
+ unsigned int nr_bytes = blk_rq_bytes(rq);
+ int ret;
+
+ if (sdkp->device->no_write_same)
+ return BLKPREP_KILL;
+
+ BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+
+ sector >>= ilog2(sdp->sector_size) - 9;
+ nr_sectors >>= ilog2(sdp->sector_size) - 9;
+
+ rq->__data_len = sdp->sector_size;
+ rq->timeout = SD_WRITE_SAME_TIMEOUT;
+ memset(rq->cmd, 0, rq->cmd_len);
+
+ if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+ rq->cmd_len = 16;
+ rq->cmd[0] = WRITE_SAME_16;
+ put_unaligned_be64(sector, &rq->cmd[2]);
+ put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+ } else {
+ rq->cmd_len = 10;
+ rq->cmd[0] = WRITE_SAME;
+ put_unaligned_be32(sector, &rq->cmd[2]);
+ put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+ }
+
+ ret = scsi_setup_blk_pc_cmnd(sdp, rq);
+ rq->__data_len = nr_bytes;
+
+ return ret;
+}
+
static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
{
rq->timeout = SD_FLUSH_TIMEOUT;
* block PC requests to make life easier.
*/
if (rq->cmd_flags & REQ_DISCARD) {
- ret = scsi_setup_discard_cmnd(sdp, rq);
+ ret = sd_setup_discard_cmnd(sdp, rq);
+ goto out;
+ } else if (rq->cmd_flags & REQ_WRITE_SAME) {
+ ret = sd_setup_write_same_cmnd(sdp, rq);
goto out;
} else if (rq->cmd_flags & REQ_FLUSH) {
ret = scsi_setup_flush_cmnd(sdp, rq);
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
struct scsi_sense_hdr sshdr;
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
+ struct request *req = SCpnt->request;
int sense_valid = 0;
int sense_deferred = 0;
unsigned char op = SCpnt->cmnd[0];
+ unsigned char unmap = SCpnt->cmnd[1] & 8;
- if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
- scsi_set_resid(SCpnt, 0);
+ if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+ if (!result) {
+ good_bytes = blk_rq_bytes(req);
+ scsi_set_resid(SCpnt, 0);
+ } else {
+ good_bytes = 0;
+ scsi_set_resid(SCpnt, blk_rq_bytes(req));
+ }
+ }
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
good_bytes = sd_completed_bytes(SCpnt);
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
- if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
- (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
- sd_config_discard(sdkp, SD_LBP_DISABLE);
+ if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+ switch (op) {
+ case UNMAP:
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ break;
+ case WRITE_SAME_16:
+ case WRITE_SAME:
+ if (unmap)
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else {
+ sdkp->device->no_write_same = 1;
+ sd_config_write_same(sdkp);
+
+ good_bytes = 0;
+ req->__data_len = blk_rq_bytes(req);
+ req->cmd_flags |= REQ_QUIET;
+ }
+ }
+ }
break;
default:
break;
if (buffer[3] == 0x3c) {
unsigned int lba_count, desc_count;
- sdkp->max_ws_blocks =
- (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
- (u64)0xffffffff);
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
if (!sdkp->lbpme)
goto out;
kfree(buffer);
}
+static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
+ WRITE_SAME_16))
+ sdkp->ws16 = 1;
+}
+
static int sd_try_extended_inquiry(struct scsi_device *sdp)
{
/*
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
+ sd_read_write_same(sdkp, buffer);
}
sdkp->first_scan = 0;
blk_queue_flush(sdkp->disk->queue, flush);
set_capacity(disk, sdkp->capacity);
+ sd_config_write_same(sdkp);
kfree(buffer);
out:
#define SD_TIMEOUT (30 * HZ)
#define SD_MOD_TIMEOUT (75 * HZ)
#define SD_FLUSH_TIMEOUT (60 * HZ)
+#define SD_WRITE_SAME_TIMEOUT (120 * HZ)
/*
* Number of allowed retries
};
enum {
+ SD_MAX_WS10_BLOCKS = 0xffff,
+ SD_MAX_WS16_BLOCKS = 0x7fffff,
+};
+
+enum {
SD_LBP_FULL = 0, /* Full logical block provisioning */
SD_LBP_UNMAP, /* Use UNMAP command */
SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */
unsigned lbpws : 1;
unsigned lbpws10 : 1;
unsigned lbpvpd : 1;
+ unsigned ws16 : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
if (!ret) {
dev_err(ssp->dev, "DMA transfer timeout\n");
ret = -ETIMEDOUT;
+ dmaengine_terminate_all(ssp->dmach);
goto err_vmalloc;
}
first = last = 0;
}
- m->status = 0;
+ m->status = status;
spi_finalize_current_message(master);
return status;
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
- pm_runtime_resume(dev);
-
pl022->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
status = PTR_ERR(pl022->clk);
clk_disable(pl022->clk);
clk_unprepare(pl022->clk);
- pm_runtime_disable(&adev->dev);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
spi_unregister_master(pl022->master);
unsigned char spsr;
/* for dmaengine */
- struct sh_dmae_slave dma_tx;
- struct sh_dmae_slave dma_rx;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
int irq;
return ret;
}
-static bool rspi_filter(struct dma_chan *chan, void *filter_param)
-{
- chan->private = filter_param;
- return true;
-}
-
-static void __devinit rspi_request_dma(struct rspi_data *rspi,
- struct platform_device *pdev)
+static int __devinit rspi_request_dma(struct rspi_data *rspi,
+ struct platform_device *pdev)
{
struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ int ret;
if (!rspi_pd)
- return;
+ return 0; /* The driver assumes no error. */
rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- rspi->dma_rx.slave_id = rspi_pd->dma_rx_id;
- rspi->chan_rx = dma_request_channel(mask, rspi_filter,
- &rspi->dma_rx);
- if (rspi->chan_rx)
- dev_info(&pdev->dev, "Use DMA when rx.\n");
+ rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)rspi_pd->dma_rx_id);
+ if (rspi->chan_rx) {
+ cfg.slave_id = rspi_pd->dma_rx_id;
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
+ if (!ret)
+ dev_info(&pdev->dev, "Use DMA when rx.\n");
+ else
+ return ret;
+ }
}
if (rspi_pd->dma_tx_id) {
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- rspi->dma_tx.slave_id = rspi_pd->dma_tx_id;
- rspi->chan_tx = dma_request_channel(mask, rspi_filter,
- &rspi->dma_tx);
- if (rspi->chan_tx)
- dev_info(&pdev->dev, "Use DMA when tx\n");
+ rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)rspi_pd->dma_tx_id);
+ if (rspi->chan_tx) {
+ cfg.slave_id = rspi_pd->dma_tx_id;
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
+ if (!ret)
+ dev_info(&pdev->dev, "Use DMA when tx\n");
+ else
+ return ret;
+ }
}
+
+ return 0;
}
static void __devexit rspi_release_dma(struct rspi_data *rspi)
}
rspi->irq = irq;
- rspi_request_dma(rspi, pdev);
+ ret = rspi_request_dma(rspi, pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "rspi_request_dma failed.\n");
+ goto error4;
+ }
ret = spi_register_master(master);
if (ret < 0) {
#define ANDROID_ALARM_WAIT _IO('a', 1)
#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-#define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size)
-
/* Set alarm */
#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (*page == NULL) {
pr_err("binder: %d: binder_alloc_buf failed "
"for page at %p\n", proc->pid, page_addr);
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
- if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+ if (t->buffer->target_node &&
+ !(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, BR_DEAD_REPLY);
+ } else {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: undelivered transaction %d\n",
+ t->debug_id);
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: undelivered TRANSACTION_COMPLETE\n");
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+ struct binder_ref_death *death;
+
+ death = container_of(w, struct binder_ref_death, work);
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: undelivered death notification, %p\n",
+ death->cookie);
+ kfree(death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ } break;
default:
+ pr_err("binder: unexpected work type, %d, not freed\n",
+ w->type);
break;
}
}
nodes++;
rb_erase(&node->rb_node, &proc->nodes);
list_del_init(&node->work.entry);
+ binder_release_work(&node->async_todo);
if (hlist_empty(&node->refs)) {
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
binder_delete_ref(ref);
}
binder_release_work(&proc->todo);
+ binder_release_work(&proc->delivered_death);
buffers = 0;
while ((n = rb_first(&proc->allocated_buffers))) {
struct comedi_subdevice *s;
int i;
+ if (!board || !devpriv)
+ return;
if (dev->subdevices) {
for (i = 0; i < board->n_8255; i++) {
s = &dev->subdevices[i];
const struct dio200_layout_struct *layout;
unsigned n;
+ if (!thisboard)
+ return;
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->subdevices) {
static void pc236_detach(struct comedi_device *dev)
{
const struct pc236_board *thisboard = comedi_board(dev);
- struct pc236_private *devpriv = dev->private;
- if (devpriv)
+ if (!thisboard)
+ return;
+ if (dev->iobase)
pc236_intr_disable(dev);
if (dev->irq)
free_irq(dev->irq, dev);
{
const struct pc263_board *thisboard = comedi_board(dev);
+ if (!thisboard)
+ return;
if (is_isa_board(thisboard)) {
if (dev->iobase)
release_region(dev->iobase, PC263_IO_SIZE);
{
const struct das08_board_struct *thisboard = comedi_board(dev);
+ if (!thisboard)
+ return;
das08_common_detach(dev);
if (is_isa_board(thisboard)) {
if (dev->iobase)
}
data[1] = s->state & 0xff;
- data[1] |= inb(dev->iobase + DIO_R);
+ data[1] |= inb(dev->iobase + DIO_R) << 8;
return insn->n;
}
{
struct comedi_subdevice *s;
+ if (!thisboard)
+ return;
if (dev->subdevices) {
s = &dev->subdevices[2];
subdev_8255_cleanup(dev, s);
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 1220;
- else
- *val2 = 610;
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220000; /* 1.22 mV */
+ } else {
+ *val = 0;
+ *val2 = 610000; /* 0.610 mV */
+ }
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- *val2 = -470000;
+ *val = -470; /* 0.47 C */
+ *val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
*val = 0;
- *val2 = 462500;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val2 = IIO_G_TO_M_S_2(462400); /* 0.4624 mg */
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_INCLI:
*val = 0;
- *val2 = 100000;
+ *val2 = 100000; /* 0.1 degree */
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_OFFSET:
- *val = 25;
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 1220;
- else
- *val2 = 610;
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220000; /* 1.22 mV */
+ } else {
+ *val = 0;
+ *val2 = 610000; /* 0.61 mV */
+ }
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- *val2 = -470000;
+ *val = -470; /* -0.47 C */
+ *val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_INCLI:
*val = 0;
- *val2 = 25000;
+ *val2 = 25000; /* 0.025 degree */
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
- *val = 25;
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
bits = 14;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 1220;
- else
- *val2 = 610;
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220000; /* 1.22 mV */
+ } else {
+ *val = 0;
+ *val2 = 610000; /* 0.61 mV */
+ }
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- *val2 = -470000;
+ *val = -470; /* 0.47 C */
+ *val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
*val = 0;
switch (chan->channel2) {
case IIO_MOD_X:
case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
- *val2 = 17125;
+ *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
break;
case IIO_MOD_Y:
case IIO_MOD_Z:
- *val2 = 8407;
+ *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
break;
}
return IIO_VAL_INT_PLUS_MICRO;
}
break;
case IIO_CHAN_INFO_OFFSET:
- *val = 25;
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
case IIO_CHAN_INFO_PEAK:
case IIO_VOLTAGE:
*val = 0;
if (chan->channel == 0)
- *val2 = 305180;
+ *val2 = 305180; /* 0.30518 mV */
else
- *val2 = 610500;
+ *val2 = 610500; /* 0.6105 mV */
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- *val2 = -470000;
+ *val = -470; /* -0.47 C */
+ *val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
*val = 0;
- *val2 = 2394;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val2 = IIO_G_TO_M_S_2(244140); /* 0.244140 mg */
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_INCLI:
+ case IIO_ROT:
*val = 0;
- *val2 = 436;
+ *val2 = 25000; /* 0.025 degree */
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_OFFSET:
- *val = 25;
+ *val = 25000 / -470 - 0x4FE; /* 25 C = 0x4FE */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
.modified = 1,
.channel2 = IIO_MOD_X,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = rot,
.scan_index = ADIS16209_SCAN_ROT,
.scan_type = {
break;
case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP) {
- *val = 25;
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
return IIO_VAL_INT;
}
addrind = 1;
addrind = 2;
break;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
switch (chan->type) {
case IIO_TEMP:
- *val2 = -470000;
+ *val = -470; /* -0.47 C */
+ *val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
- *val2 = 1887042;
+ *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
return IIO_VAL_INT_PLUS_MICRO;
case IIO_VOLTAGE:
- if (chan->channel == 0)
- *val2 = 0012221;
- else /* Should really be dependent on VDD */
- *val2 = 305;
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220700; /* 1.2207 mV */
+ } else {
+ /* Should really be dependent on VDD */
+ *val2 = 305180; /* 305.18 uV */
+ }
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 4880;
- else
+ if (chan->channel == 0) {
+ *val = 4;
+ *val2 = 880000; /* 4.88 mV */
+ return IIO_VAL_INT_PLUS_MICRO;
+ } else {
return -EINVAL;
- return IIO_VAL_INT_PLUS_MICRO;
+ }
case IIO_TEMP:
- *val = 0;
- *val2 = 244000;
+ *val = 244; /* 0.244 C */
+ *val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
*val = 0;
- *val2 = 504062;
+ *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_PEAK_SCALE:
- *val = 6;
- *val2 = 629295;
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OFFSET:
- *val = 25;
+ *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
bits = 10;
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
- if (spi_get_device_id(st->us)->driver_data)
- *val2 = 320;
- else
- *val2 = 1278;
+ if (spi_get_device_id(st->us)->driver_data) {
+ /* 0.01832 degree / sec */
+ *val2 = IIO_DEGREE_TO_RAD(18320);
+ } else {
+ /* 0.07326 degree / sec */
+ *val2 = IIO_DEGREE_TO_RAD(73260);
+ }
return IIO_VAL_INT_PLUS_MICRO;
case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 18315;
- else
- *val2 = 610500;
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 831500; /* 1.8315 mV */
+ } else {
+ *val = 0;
+ *val2 = 610500; /* 610.5 uV */
+ }
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- *val2 = 145300;
+ *val = 145;
+ *val2 = 300000; /* 0.1453 C */
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
break;
case IIO_CHAN_INFO_OFFSET:
- *val = 25;
+ *val = 250000 / 1453; /* 25 C = 0x00 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
const long flags;
unsigned int gyro_scale_micro;
unsigned int accel_scale_micro;
+ int temp_scale_nano;
+ int temp_offset;
unsigned long default_scan_mask;
};
return IIO_VAL_INT_PLUS_MICRO;
case IIO_VOLTAGE:
*val = 0;
- if (chan->channel == 0)
- *val2 = 2418;
- else
- *val2 = 806;
+ if (chan->channel == 0) {
+ *val = 2;
+ *val2 = 418000; /* 2.418 mV */
+ } else {
+ *val = 0;
+ *val2 = 805800; /* 805.8 uV */
+ }
return IIO_VAL_INT_PLUS_MICRO;
case IIO_ACCEL:
*val = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_MAGN:
*val = 0;
- *val2 = 500;
+ *val2 = 500; /* 0.5 mgauss */
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
- *val = 0;
- *val2 = 140000;
+ *val = st->variant->temp_scale_nano / 1000000;
+ *val2 = (st->variant->temp_scale_nano % 1000000);
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
/* currently only temperature */
- *val = 198;
- *val2 = 160000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = st->variant->temp_offset;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
mutex_lock(&indio_dev->mlock);
/* Need both the number of taps and the sampling frequency */
.indexed = 1,
.channel = 0,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = temp0,
.scan_index = ADIS16400_SCAN_TEMP,
[ADIS16300] = {
.channels = adis16300_channels,
.num_channels = ARRAY_SIZE(adis16300_channels),
- .gyro_scale_micro = 873,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
.accel_scale_micro = 5884,
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.default_scan_mask = (1 << ADIS16400_SCAN_SUPPLY) |
(1 << ADIS16400_SCAN_GYRO_X) | (1 << ADIS16400_SCAN_ACC_X) |
(1 << ADIS16400_SCAN_ACC_Y) | (1 << ADIS16400_SCAN_ACC_Z) |
[ADIS16334] = {
.channels = adis16334_channels,
.num_channels = ARRAY_SIZE(adis16334_channels),
- .gyro_scale_micro = 873,
- .accel_scale_micro = 981,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 67850000, /* 0.06785 C */
+ .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
.default_scan_mask = (1 << ADIS16400_SCAN_GYRO_X) |
(1 << ADIS16400_SCAN_GYRO_Y) | (1 << ADIS16400_SCAN_GYRO_Z) |
(1 << ADIS16400_SCAN_ACC_X) | (1 << ADIS16400_SCAN_ACC_Y) |
[ADIS16350] = {
.channels = adis16350_channels,
.num_channels = ARRAY_SIZE(adis16350_channels),
- .gyro_scale_micro = 872664,
- .accel_scale_micro = 24732,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(73260), /* 0.07326 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(2522), /* 0.002522 g */
+ .temp_scale_nano = 145300000, /* 0.1453 C */
+ .temp_offset = 25000000 / 145300, /* 25 C = 0x00 */
.default_scan_mask = 0x7FF,
.flags = ADIS16400_NO_BURST,
},
.num_channels = ARRAY_SIZE(adis16350_channels),
.flags = ADIS16400_HAS_PROD_ID,
.product_id = 0x3FE8,
- .gyro_scale_micro = 1279,
- .accel_scale_micro = 24732,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.default_scan_mask = 0x7FF,
},
[ADIS16362] = {
.num_channels = ARRAY_SIZE(adis16350_channels),
.flags = ADIS16400_HAS_PROD_ID,
.product_id = 0x3FEA,
- .gyro_scale_micro = 1279,
- .accel_scale_micro = 24732,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(333), /* 0.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.default_scan_mask = 0x7FF,
},
[ADIS16364] = {
.num_channels = ARRAY_SIZE(adis16350_channels),
.flags = ADIS16400_HAS_PROD_ID,
.product_id = 0x3FEC,
- .gyro_scale_micro = 1279,
- .accel_scale_micro = 24732,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.default_scan_mask = 0x7FF,
},
[ADIS16365] = {
.num_channels = ARRAY_SIZE(adis16350_channels),
.flags = ADIS16400_HAS_PROD_ID,
.product_id = 0x3FED,
- .gyro_scale_micro = 1279,
- .accel_scale_micro = 24732,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.default_scan_mask = 0x7FF,
},
[ADIS16400] = {
.num_channels = ARRAY_SIZE(adis16400_channels),
.flags = ADIS16400_HAS_PROD_ID,
.product_id = 0x4015,
- .gyro_scale_micro = 873,
- .accel_scale_micro = 32656,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
.default_scan_mask = 0xFFF,
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
}
};
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include "tpci200.h"
static u16 tpci200_status_timeout[] = {
* DSS, GPU, etc. are not cache coherent:
*/
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
- addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
+ addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
} else {
- addrs = kzalloc(npages * sizeof(addrs), GFP_KERNEL);
+ addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
config RAMSTER
bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE2=y
+ depends on NET
# must ensure struct page is 8-byte aligned
select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
default n
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs);
-bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
+bool wait_for_start(struct bridge_dev_context *dev_context,
+ void __iomem *sync_addr);
/* ----------------------------------- Globals */
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
- u32 dw_sync_addr = 0;
+ void __iomem *sync_addr;
u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
+ u32 shm_sync_pa;
/* Offset of shm_base_virt from tlb_base_virt */
u32 ul_shm_offset_virt;
s32 entry_ndx;
/* Kernel logical address */
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
+ /* SHM physical sync address */
+ shm_sync_pa = dev_context->atlb_entry[0].gpp_pa + ul_shm_offset_virt +
+ SHMSYNCOFFSET;
+
/* 2nd wd is used as sync field */
- dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
+ sync_addr = ioremap(shm_sync_pa, SZ_32);
+ if (!sync_addr)
+ return -ENOMEM;
+
/* Write a signature into the shm base + offset; this will
* get cleared when the DSP program starts. */
if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
pr_err("%s: Illegal SM base\n", __func__);
status = -EPERM;
} else
- __raw_writel(0xffffffff, dw_sync_addr);
+ __raw_writel(0xffffffff, sync_addr);
if (!status) {
resources = dev_context->resources;
* function is made available.
*/
void __iomem *ctrl = ioremap(0x48002000, SZ_4K);
- if (!ctrl)
+ if (!ctrl) {
+ iounmap(sync_addr);
return -ENOMEM;
+ }
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
- dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
+ dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", *(u32 *)sync_addr);
dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
if (dsp_debug)
- while (__raw_readw(dw_sync_addr))
+ while (__raw_readw(sync_addr))
;
/* Wait for DSP to clear word in shared memory */
/* Read the Location */
- if (!wait_for_start(dev_context, dw_sync_addr))
+ if (!wait_for_start(dev_context, sync_addr))
status = -ETIMEDOUT;
dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
/* Write the synchronization bit to indicate the
* completion of OPP table update to DSP
*/
- __raw_writel(0XCAFECAFE, dw_sync_addr);
+ __raw_writel(0XCAFECAFE, sync_addr);
/* update board state */
dev_context->brd_state = BRD_RUNNING;
dev_context->brd_state = BRD_UNKNOWN;
}
}
+
+ iounmap(sync_addr);
+
return status;
}
* ======== wait_for_start ========
* Wait for the singal from DSP that it has started, or time out.
*/
-bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
+bool wait_for_start(struct bridge_dev_context *dev_context,
+ void __iomem *sync_addr)
{
u16 timeout = TIHELEN_ACKTIMEOUT;
/* Wait for response from board */
- while (__raw_readw(dw_sync_addr) && --timeout)
+ while (__raw_readw(sync_addr) && --timeout)
udelay(10);
/* If timed out: return false */
};
/*
- * FUNCTION : mmu_flush_entry
- *
- * INPUTS:
- *
- * Identifier : base_address
- * Type : const u32
- * Description : Base Address of instance of MMU module
- *
- * RETURNS:
- *
- * Type : hw_status
- * Description : 0 -- No errors occurred
- * RET_BAD_NULL_PARAM -- A Pointer
- * Parameter was set to NULL
- *
- * PURPOSE: : Flush the TLB entry pointed by the
- * lock counter register
- * even if this entry is set protected
- *
- * METHOD: : Check the Input parameter and Flush a
- * single entry in the TLB.
- */
-static hw_status mmu_flush_entry(const void __iomem *base_address);
-
-/*
* FUNCTION : mmu_set_cam_entry
*
* INPUTS:
*
* Identifier : base_address
- * TypE : const u32
+ * Type : void __iomem *
* Description : Base Address of instance of MMU module
*
* Identifier : page_sz
*
* METHOD: : Check the Input parameters and set the CAM entry.
*/
-static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+static hw_status mmu_set_cam_entry(void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 valid_bit,
* INPUTS:
*
* Identifier : base_address
- * Type : const u32
+ * Type : void __iomem *
* Description : Base Address of instance of MMU module
*
* Identifier : physical_addr
*
* METHOD: : Check the Input parameters and set the RAM entry.
*/
-static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+static hw_status mmu_set_ram_entry(void __iomem *base_address,
const u32 physical_addr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
/* HW FUNCTIONS */
-hw_status hw_mmu_enable(const void __iomem *base_address)
+hw_status hw_mmu_enable(void __iomem *base_address)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_disable(const void __iomem *base_address)
+hw_status hw_mmu_disable(void __iomem *base_address)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+hw_status hw_mmu_num_locked_set(void __iomem *base_address,
u32 num_locked_entries)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+hw_status hw_mmu_victim_num_set(void __iomem *base_address,
u32 victim_entry_num)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
+hw_status hw_mmu_event_ack(void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
+hw_status hw_mmu_event_disable(void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
u32 irq_reg;
return status;
}
-hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
+hw_status hw_mmu_event_enable(void __iomem *base_address, u32 irq_mask)
{
hw_status status = 0;
u32 irq_reg;
return status;
}
-hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
+hw_status hw_mmu_event_status(void __iomem *base_address, u32 *irq_mask)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
+hw_status hw_mmu_fault_addr_read(void __iomem *base_address, u32 *addr)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
+hw_status hw_mmu_ttb_set(void __iomem *base_address, u32 ttb_phys_addr)
{
hw_status status = 0;
u32 load_ttb;
return status;
}
-hw_status hw_mmu_twl_enable(const void __iomem *base_address)
+hw_status hw_mmu_twl_enable(void __iomem *base_address)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_twl_disable(const void __iomem *base_address)
+hw_status hw_mmu_twl_disable(void __iomem *base_address)
{
hw_status status = 0;
return status;
}
-hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
- u32 page_sz)
-{
- hw_status status = 0;
- u32 virtual_addr_tag;
- enum hw_mmu_page_size_t pg_size_bits;
-
- switch (page_sz) {
- case HW_PAGE_SIZE4KB:
- pg_size_bits = HW_MMU_SMALL_PAGE;
- break;
-
- case HW_PAGE_SIZE64KB:
- pg_size_bits = HW_MMU_LARGE_PAGE;
- break;
-
- case HW_PAGE_SIZE1MB:
- pg_size_bits = HW_MMU_SECTION;
- break;
-
- case HW_PAGE_SIZE16MB:
- pg_size_bits = HW_MMU_SUPERSECTION;
- break;
-
- default:
- return -EINVAL;
- }
-
- /* Generate the 20-bit tag from virtual address */
- virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
-
- mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
-
- mmu_flush_entry(base_address);
-
- return status;
-}
-
-hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+hw_status hw_mmu_tlb_add(void __iomem *base_address,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
return status;
}
-/* mmu_flush_entry */
-static hw_status mmu_flush_entry(const void __iomem *base_address)
-{
- hw_status status = 0;
- u32 flush_entry_data = 0x1;
-
- /* write values to register */
- MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
-
- return status;
-}
-
/* mmu_set_cam_entry */
-static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+static hw_status mmu_set_cam_entry(void __iomem *base_address,
const u32 page_sz,
const u32 preserved_bit,
const u32 valid_bit,
}
/* mmu_set_ram_entry */
-static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+static hw_status mmu_set_ram_entry(void __iomem *base_address,
const u32 physical_addr,
enum hw_endianism_t endianism,
enum hw_element_size_t element_size,
}
-void hw_mmu_tlb_flush_all(const void __iomem *base)
+void hw_mmu_tlb_flush_all(void __iomem *base)
{
__raw_writel(1, base + MMU_GFLUSH);
}
bool donotlockmpupage;
};
-extern hw_status hw_mmu_enable(const void __iomem *base_address);
+extern hw_status hw_mmu_enable(void __iomem *base_address);
-extern hw_status hw_mmu_disable(const void __iomem *base_address);
+extern hw_status hw_mmu_disable(void __iomem *base_address);
-extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+extern hw_status hw_mmu_num_locked_set(void __iomem *base_address,
u32 num_locked_entries);
-extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+extern hw_status hw_mmu_victim_num_set(void __iomem *base_address,
u32 victim_entry_num);
/* For MMU faults */
-extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
+extern hw_status hw_mmu_event_ack(void __iomem *base_address,
u32 irq_mask);
-extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
+extern hw_status hw_mmu_event_disable(void __iomem *base_address,
u32 irq_mask);
-extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
+extern hw_status hw_mmu_event_enable(void __iomem *base_address,
u32 irq_mask);
-extern hw_status hw_mmu_event_status(const void __iomem *base_address,
+extern hw_status hw_mmu_event_status(void __iomem *base_address,
u32 *irq_mask);
-extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
+extern hw_status hw_mmu_fault_addr_read(void __iomem *base_address,
u32 *addr);
/* Set the TT base address */
-extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
+extern hw_status hw_mmu_ttb_set(void __iomem *base_address,
u32 ttb_phys_addr);
-extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
+extern hw_status hw_mmu_twl_enable(void __iomem *base_address);
-extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
+extern hw_status hw_mmu_twl_disable(void __iomem *base_address);
-extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
- u32 virtual_addr, u32 page_sz);
-
-extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+extern hw_status hw_mmu_tlb_add(void __iomem *base_address,
u32 physical_addr,
u32 virtual_addr,
u32 page_sz,
extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 virtual_addr, u32 page_size);
-void hw_mmu_tlb_flush_all(const void __iomem *base);
+void hw_mmu_tlb_flush_all(void __iomem *base);
static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
{
u32 chnl_buf_size;
u32 num_chnls;
void __iomem *per_base;
- u32 per_pm_base;
- u32 core_pm_base;
+ void __iomem *per_pm_base;
+ void __iomem *core_pm_base;
void __iomem *dmmu_base;
};
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
-/* TODO -- Remove, once BP defines them */
-#define INT_DSP_MMU_IRQ 28
+/* TODO -- Remove, once omap-iommu is used */
+#define INT_DSP_MMU_IRQ (28 + NR_IRQS)
#define PRCM_VDD1 1
OMAP_DSP_MEM3_SIZE);
host_res->per_base = ioremap(OMAP_PER_CM_BASE,
OMAP_PER_CM_SIZE);
- host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
- OMAP_PER_PRM_SIZE);
- host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
- OMAP_CORE_PRM_SIZE);
+ host_res->per_pm_base = ioremap(OMAP_PER_PRM_BASE,
+ OMAP_PER_PRM_SIZE);
+ host_res->core_pm_base = ioremap(OMAP_CORE_PRM_BASE,
+ OMAP_CORE_PRM_SIZE);
host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
OMAP_DMMU_SIZE);
u32 pul_value;
u32 dynext_base;
u32 off_set = 0;
- u32 ul_stack_seg_addr, ul_stack_seg_val;
- u32 ul_gpp_mem_base;
+ u32 ul_stack_seg_val;
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
u32 mapped_addr = 0;
if (strcmp((char *)
pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name, STACKSEGLABEL) == 0) {
+ void __iomem *stack_seg;
+ u32 stack_seg_pa;
+
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
goto func_end;
}
- ul_gpp_mem_base = (u32) host_res->mem_base[1];
off_set = pul_value - dynext_base;
- ul_stack_seg_addr = ul_gpp_mem_base + off_set;
- ul_stack_seg_val = readl(ul_stack_seg_addr);
+ stack_seg_pa = host_res->mem_phys[1] + off_set;
+ stack_seg = ioremap(stack_seg_pa, SZ_32);
+ if (!stack_seg) {
+ status = -ENOMEM;
+ goto func_end;
+ }
+
+ ul_stack_seg_val = readl(stack_seg);
+
+ iounmap(stack_seg);
dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
" 0x%x\n", __func__, ul_stack_seg_val,
- ul_stack_seg_addr);
+ host_res->mem_base[1] + off_set);
pnode->create_args.asa.task_arg_obj.stack_seg =
ul_stack_seg_val;
cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
ZS_MM_RO);
- ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
+ if (zram->table[index].size == PAGE_SIZE) {
+ memcpy(uncmem, cmem, PAGE_SIZE);
+ ret = LZO_E_OK;
+ } else {
+ ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
uncmem, &clen);
+ }
if (is_partial_io(bvec)) {
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
goto out;
}
- if (unlikely(clen > max_zpage_size))
+ if (unlikely(clen > max_zpage_size)) {
zram_stat_inc(&zram->stats.bad_compress);
+ src = uncmem;
+ clen = PAGE_SIZE;
+ }
handle = zs_malloc(zram->mem_pool, clen);
if (!handle) {
*/
iscsit_thread_check_cpumask(conn, current, 1);
- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+ wait_event_interruptible(conn->queues_wq,
+ !iscsit_conn_all_queues_empty(conn) ||
+ ts->status == ISCSI_THREAD_SET_RESET);
if ((ts->status == ISCSI_THREAD_SET_RESET) ||
signal_pending(current))
};
struct iscsi_conn {
+ wait_queue_head_t queues_wq;
/* Authentication Successful for this connection */
u8 auth_complete;
/* State connection is currently in */
static int iscsi_login_init_conn(struct iscsi_conn *conn)
{
+ init_waitqueue_head(&conn->queues_wq);
INIT_LIST_HEAD(&conn->conn_list);
INIT_LIST_HEAD(&conn->conn_cmd_list);
INIT_LIST_HEAD(&conn->immed_queue_list);
atomic_set(&conn->check_immediate_queue, 1);
spin_unlock_bh(&conn->immed_queue_lock);
- wake_up_process(conn->thread_set->tx_thread);
+ wake_up(&conn->queues_wq);
}
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
atomic_inc(&cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
- wake_up_process(conn->thread_set->tx_thread);
+ wake_up(&conn->queues_wq);
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
}
}
+bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+{
+ bool empty;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ empty = list_empty(&conn->immed_queue_list);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ if (!empty)
+ return empty;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ empty = list_empty(&conn->response_queue_list);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ return empty;
+}
+
void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
extern void iscsit_free_cmd(struct iscsi_cmd *);
if (ret < 0)
goto out;
- if (core_dev_setup_virtual_lun0() < 0)
+ ret = core_dev_setup_virtual_lun0();
+ if (ret < 0)
goto out;
return 0;
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
- u32 tmp, aligned_max_sectors;
+ u32 aligned_max_sectors;
+ u32 alignment;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
- tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
- aligned_max_sectors = (tmp / block_size);
- if (max_sectors != aligned_max_sectors) {
- printk(KERN_INFO "Rounding down aligned max_sectors from %u"
- " to %u\n", max_sectors, aligned_max_sectors);
- return aligned_max_sectors;
- }
+ alignment = max(1ul, PAGE_SIZE / block_size);
+ aligned_max_sectors = rounddown(max_sectors, alignment);
+
+ if (max_sectors != aligned_max_sectors)
+ pr_info("Rounding down aligned max_sectors from %u to %u\n",
+ max_sectors, aligned_max_sectors);
- return max_sectors;
+ return aligned_max_sectors;
}
void se_dev_set_default_attribs(
return 0;
}
+static int sbc_emulate_noop(struct se_cmd *cmd)
+{
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
size = 0;
cmd->execute_cmd = sbc_emulate_verify;
break;
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ /*
+ * There are still clients out there which use these old SCSI-2
+ * commands. This mainly happens when running VMs with legacy
+ * guest systems, connected via SCSI command pass-through to
+ * iSCSI targets. Make them happy and return status GOOD.
+ */
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
+ break;
default:
ret = spc_parse_cdb(cmd, &size);
if (ret)
unsigned char buf[SE_INQUIRY_BUF];
int p, ret;
+ memset(buf, 0, SE_INQUIRY_BUF);
+
if (dev == tpg->tpg_virt_lun0.lun_se_dev)
buf[0] = 0x3f; /* Not connected */
else
printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
- spin_lock_irq(&se_cmd->t_state_lock);
+ spin_lock(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_COMPLETE) {
printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
- spin_unlock_irq(&se_cmd->t_state_lock);
+ spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
goto out;
}
se_cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irq(&se_cmd->t_state_lock);
+ spin_unlock(&se_cmd->t_state_lock);
list_del_init(&se_cmd->se_cmd_list);
kref_get(&se_cmd->cmd_kref);
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
- transport_generic_free_cmd(se_cmd, 0);
}
/**
/*
* If the received CDB has aleady been aborted stop processing it here.
*/
- if (transport_check_aborted_status(cmd, 1))
+ if (transport_check_aborted_status(cmd, 1)) {
+ complete(&cmd->t_transport_stop_comp);
return;
+ }
/*
* Determine if IOCTL context caller in requesting the stopping of this
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
},
{ },
};
-MODULE_DEVICE_TABLE(platform, exynos4_tmu_driver_ids);
+MODULE_DEVICE_TABLE(platform, exynos_tmu_driver_ids);
static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
struct platform_device *pdev)
goto error_free_priv;
}
- zone = thermal_zone_device_register("rcar_thermal", 0, priv,
+ zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
&rcar_thermal_zone_ops, 0, 0);
if (IS_ERR(zone)) {
dev_err(&pdev->dev, "thermal zone device is NULL\n");
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
- int temp_open_count;
if (!hp)
return;
return;
}
- temp_open_count = hp->port.count;
hp->port.count = 0;
spin_unlock_irqrestore(&hp->port.lock, flags);
tty_port_tty_set(&hp->port, NULL);
if (hp->ops->notifier_hangup)
hp->ops->notifier_hangup(hp, hp->data);
-
- while(temp_open_count) {
- --temp_open_count;
- tty_port_put(&hp->port);
- }
}
/*
static const struct spi_device_id max310x_id_table[] = {
{ "max3107", MAX310X_TYPE_MAX3107 },
{ "max3108", MAX310X_TYPE_MAX3108 },
+ { }
};
MODULE_DEVICE_TABLE(spi, max310x_id_table);
/*
* IXON Flag:
- * Flow control for OMAP.TX
- * OMAP.RX should listen for XON/XOFF
+ * Enable XON/XOFF flow control on output.
+ * Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXON)
- up->efr |= OMAP_UART_SW_RX;
+ up->efr |= OMAP_UART_SW_TX;
/*
* IXOFF Flag:
- * Flow control for OMAP.RX
- * OMAP.TX should send XON/XOFF
+ * Enable XON/XOFF flow control on input.
+ * Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXOFF)
- up->efr |= OMAP_UART_SW_TX;
+ up->efr |= OMAP_UART_SW_RX;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
{
unsigned short *p = (unsigned short *) vc->vc_pos;
- scr_memmovew(p + nr, p, vc->vc_cols - vc->vc_x);
+ scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
if (DO_UPDATE(vc))
do_update_region(vc, (unsigned long) p,
- (vc->vc_cols - vc->vc_x) / 2 + 1);
+ vc->vc_cols - vc->vc_x);
}
static void delete_char(struct vc_data *vc, unsigned int nr)
{
unsigned short *p = (unsigned short *) vc->vc_pos;
- scr_memcpyw(p, p + nr, vc->vc_cols - vc->vc_x - nr);
+ scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
if (DO_UPDATE(vc))
do_update_region(vc, (unsigned long) p,
- (vc->vc_cols - vc->vc_x) / 2);
+ vc->vc_cols - vc->vc_x);
}
static int softcursor_original;
irqreturn_t usb_hcd_irq (int irq, void *__hcd)
{
struct usb_hcd *hcd = __hcd;
+ unsigned long flags;
irqreturn_t rc;
+ /* IRQF_DISABLED doesn't work correctly with shared IRQs
+ * when the first handler doesn't use it. So let's just
+ * assume it's never used.
+ */
+ local_irq_save(flags);
+
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
else if (hcd->driver->irq(hcd) == IRQ_NONE)
else
rc = IRQ_HANDLED;
+ local_irq_restore(flags);
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_irq);
int retval;
if (hcd->driver->irq) {
+
+ /* IRQF_DISABLED doesn't work as advertised when used together
+ * with IRQF_SHARED. As usb_hcd_irq() will always disable
+ * interrupts we can remove it here.
+ */
+ if (irqflags & IRQF_SHARED)
+ irqflags &= ~IRQF_DISABLED;
+
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
int limit = 100;
spin_lock_irqsave (&hub->tt.lock, flags);
- while (--limit && !list_empty (&hub->tt.clear_list)) {
+ while (!list_empty(&hub->tt.clear_list)) {
struct list_head *next;
struct usb_tt_clear *clear;
struct usb_device *hdev = hub->hdev;
const struct hc_driver *drv;
int status;
+ if (!hub->quiescing && --limit < 0)
+ break;
+
next = hub->tt.clear_list.next;
clear = list_entry (next, struct usb_tt_clear, clear_list);
list_del (&clear->clear_list);
if (hub->has_indicators)
cancel_delayed_work_sync(&hub->leds);
if (hub->tt.hub)
- cancel_work_sync(&hub->tt.clear_work);
+ flush_work(&hub->tt.clear_work);
}
/* caller has locked the hub device */
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
+#include <linux/kconfig.h>
#include <linux/kgdb.h>
#include <linux/kthread.h>
#include <asm/io.h>
return -ENODEV;
}
-int dbgp_external_startup(struct usb_hcd *hcd)
-{
- return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
-}
-EXPORT_SYMBOL_GPL(dbgp_external_startup);
-
static int ehci_reset_port(int port)
{
u32 portsc;
.index = -1,
};
+#if IS_ENABLED(CONFIG_USB_EHCI_HCD)
int dbgp_reset_prep(struct usb_hcd *hcd)
{
int ret = xen_dbgp_reset_prep(hcd);
}
EXPORT_SYMBOL_GPL(dbgp_reset_prep);
+int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
+}
+EXPORT_SYMBOL_GPL(dbgp_external_startup);
+#endif /* USB_EHCI_HCD */
+
#ifdef CONFIG_KGDB
static char kgdbdbgp_buf[DBGP_MAX_PACKET];
#if defined(PLX_PCI_RDK2)
/* see if PCI int for us by checking irqstat */
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
- if (!intcsr & (1 << NET2272_PCI_IRQ))
+ if (!intcsr & (1 << NET2272_PCI_IRQ)) {
+ spin_unlock(&dev->lock);
return IRQ_NONE;
+ }
/* check dma interrupts */
#endif
/* Platform/devcice interrupt handler */
#include <linux/ctype.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include "u_ether.h"
while (skb2) {
if (status < 0
|| ETH_HLEN > skb2->len
- || skb2->len > ETH_FRAME_LEN) {
+ || skb2->len > VLAN_ETH_FRAME_LEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
DBG(dev, "rx length %d\n", skb2->len);
goto err_put_hcd;
}
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret)
goto err_put_hcd;
goto err3;
}
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err4;
return retval;
/* Pegatron Lucid (Ordissimo AIRIS) */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
- DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+ },
+ },
+ {
+ /* Pegatron Lucid (Ordissimo) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
},
},
{ }
int i;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
- struct xhci_slot_ctx *slot_ctx;
dma_addr_t dma = ctx->dma;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
}
- slot_ctx = xhci_get_slot_ctx(xhci, ctx);
xhci_dbg_slot_ctx(xhci, ctx);
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
}
if (portsc & PORT_DEV_REMOVE)
port_removable |= 1 << (i + 1);
}
- memset(&desc->u.ss.DeviceRemovable,
- (__force __u16) cpu_to_le16(port_removable),
- sizeof(__u16));
+
+ desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
}
static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
temp = xhci_readl(xhci, port_array[wIndex]);
xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
+ spin_unlock_irqrestore(&xhci->lock, flags);
temp = usb_acpi_power_manageable(hcd->self.root_hub,
wIndex);
if (temp)
usb_acpi_set_power_state(hcd->self.root_hub,
wIndex, true);
+ spin_lock_irqsave(&xhci->lock, flags);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
xhci_writel(xhci, temp & ~PORT_POWER,
port_array[wIndex]);
+ spin_unlock_irqrestore(&xhci->lock, flags);
temp = usb_acpi_power_manageable(hcd->self.root_hub,
wIndex);
if (temp)
usb_acpi_set_power_state(hcd->self.root_hub,
wIndex, false);
+ spin_lock_irqsave(&xhci->lock, flags);
break;
default:
goto error;
cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
xhci->cmd_ring->dequeue, &cycle_state);
+ if (!cur_seg) {
+ xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
+ xhci->cmd_ring->dequeue,
+ (unsigned long long)
+ xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+ xhci->cmd_ring->dequeue));
+ xhci_debug_ring(xhci, xhci->cmd_ring);
+ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+ return;
+ }
+
/* find the command trb matched by cd from command ring */
for (cmd_trb = xhci->cmd_ring->dequeue;
cmd_trb != xhci->cmd_ring->enqueue;
struct xhci_hcd *xhci;
struct xhci_container_ctx *in_ctx, *out_ctx;
unsigned int ep_index;
- struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u32 added_ctxs;
out_ctx = virt_dev->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc);
- ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
/* If this endpoint is already in use, and the upper layers are trying
* to add it again without dropping it, reject the addition.
case COMP_EBADSLT:
dev_warn(&udev->dev, "WARN: slot not enabled for"
"evaluate context command.\n");
+ ret = -EINVAL;
+ break;
case COMP_CTX_STATE:
dev_warn(&udev->dev, "WARN: invalid context state for "
"evaluate context command.\n");
static unsigned long long xhci_service_interval_to_ns(
struct usb_endpoint_descriptor *desc)
{
- return (1 << (desc->bInterval - 1)) * 125 * 1000;
+ return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
}
static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
(xhci_service_interval_to_ns(desc) > timeout_ns))
timeout_ns = xhci_service_interval_to_ns(desc);
- u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000;
+ u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
if (u2_del_ns > timeout_ns)
timeout_ns = u2_del_ns;
}
EXPORT_SYMBOL_GPL(ezusb_fx2_ihex_firmware_download);
+MODULE_LICENSE("GPL");
struct platform_device *musb;
struct resource *res;
struct resource resources[2];
- char res_name[10];
+ char res_name[11];
int ret, musbid;
/* get memory resource */
- sprintf(res_name, "musb%d", id);
+ snprintf(res_name, sizeof(res_name), "musb%d", id);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
if (!res) {
dev_err(dev, "%s get mem resource failed\n", res_name);
resources[0] = *res;
/* get irq resource */
- sprintf(res_name, "musb%d-irq", id);
+ snprintf(res_name, sizeof(res_name), "musb%d-irq", id);
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
if (!res) {
dev_err(dev, "%s get irq resource failed\n", res_name);
of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
- sprintf(res_name, "port%d-mode", id);
+ snprintf(res_name, sizeof(res_name), "port%d-mode", id);
of_property_read_u32(np, res_name, (u32 *)&pdata->mode);
of_property_read_u32(np, "power", (u32 *)&pdata->power);
config->multipoint = of_property_read_bool(np, "multipoint");
fifo_count = musb_readw(epio, MUSB_RXCOUNT);
/*
- * use mode 1 only if we expect data of at least ep packet_sz
- * and have not yet received a short packet
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
*/
- if ((request->length - request->actual >= musb_ep->packet_sz) &&
- (fifo_count >= musb_ep->packet_sz))
+
+ if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
use_mode_1 = 1;
else
use_mode_1 = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
struct platform_device *musb;
struct ux500_glue *glue;
struct clk *clk;
-
+ int musbid;
int ret = -ENOMEM;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
- depends on TWL4030_CORE && REGULATOR_TWL4030
+ depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
select USB_OTG_UTILS
help
Enable this to support the USB OTG transceiver on TWL4030
config TWL6030_USB
tristate "TWL6030 USB Transceiver Driver"
- depends on TWL4030_CORE && OMAP_USB2
+ depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
select USB_OTG_UTILS
help
Enable this to support the USB OTG transceiver on TWL6030
dev_dbg(dev, " %s %d (%d/ %d)\n",
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
+ usbhs_pipe_enable(pipe);
usbhsf_dma_start(pipe, fifo);
dma_async_issue_pending(chan);
}
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
+ if (unlikely(!uep)) {
+ dev_err(dev, "no uep\n");
+ return;
+ }
+
/******************** spin lock ********************/
usbhs_lock(priv, flags);
return r;
}
-/* allocate private data */
-static int ch341_attach(struct usb_serial *serial)
+static int ch341_port_probe(struct usb_serial_port *port)
{
struct ch341_private *priv;
int r;
- /* private data */
priv = kzalloc(sizeof(struct ch341_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->baud_rate = DEFAULT_BAUD_RATE;
priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
- r = ch341_configure(serial->dev, priv);
+ r = ch341_configure(port->serial->dev, priv);
if (r < 0)
goto error;
- usb_set_serial_port_data(serial->port[0], priv);
+ usb_set_serial_port_data(port, priv);
return 0;
error: kfree(priv);
return r;
}
+static int ch341_port_remove(struct usb_serial_port *port)
+{
+ struct ch341_private *priv;
+
+ priv = usb_get_serial_port_data(port);
+ kfree(priv);
+
+ return 0;
+}
+
static int ch341_carrier_raised(struct usb_serial_port *port)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct ch341_private *priv = usb_get_serial_port_data(serial->port[0]);
+ struct ch341_private *priv = usb_get_serial_port_data(port);
int r;
priv->baud_rate = DEFAULT_BAUD_RATE;
.tiocmget = ch341_tiocmget,
.tiocmset = ch341_tiocmset,
.read_int_callback = ch341_read_int_callback,
- .attach = ch341_attach,
+ .port_probe = ch341_port_probe,
+ .port_remove = ch341_port_remove,
.reset_resume = ch341_reset_resume,
};
static int digi_startup(struct usb_serial *serial);
static void digi_disconnect(struct usb_serial *serial);
static void digi_release(struct usb_serial *serial);
+static int digi_port_probe(struct usb_serial_port *port);
+static int digi_port_remove(struct usb_serial_port *port);
static void digi_read_bulk_callback(struct urb *urb);
static int digi_read_inb_callback(struct urb *urb);
static int digi_read_oob_callback(struct urb *urb);
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
+ .port_probe = digi_port_probe,
+ .port_remove = digi_port_remove,
};
static struct usb_serial_driver digi_acceleport_4_device = {
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
+ .port_probe = digi_port_probe,
+ .port_remove = digi_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
return ret;
}
-
-static int digi_startup(struct usb_serial *serial)
+static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
{
-
- int i;
struct digi_port *priv;
- struct digi_serial *serial_priv;
- /* allocate the private data structures for all ports */
- /* number of regular ports + 1 for the out-of-band port */
- for (i = 0; i < serial->type->num_ports + 1; i++) {
- /* allocate port private structure */
- priv = kmalloc(sizeof(struct digi_port), GFP_KERNEL);
- if (priv == NULL) {
- while (--i >= 0)
- kfree(usb_get_serial_port_data(serial->port[i]));
- return 1; /* error */
- }
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /* initialize port private structure */
- spin_lock_init(&priv->dp_port_lock);
- priv->dp_port_num = i;
- priv->dp_out_buf_len = 0;
- priv->dp_write_urb_in_use = 0;
- priv->dp_modem_signals = 0;
- init_waitqueue_head(&priv->dp_modem_change_wait);
- priv->dp_transmit_idle = 0;
- init_waitqueue_head(&priv->dp_transmit_idle_wait);
- priv->dp_throttled = 0;
- priv->dp_throttle_restart = 0;
- init_waitqueue_head(&priv->dp_flush_wait);
- init_waitqueue_head(&priv->dp_close_wait);
- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
- priv->dp_port = serial->port[i];
- /* initialize write wait queue for this port */
- init_waitqueue_head(&serial->port[i]->write_wait);
-
- usb_set_serial_port_data(serial->port[i], priv);
- }
+ spin_lock_init(&priv->dp_port_lock);
+ priv->dp_port_num = port_num;
+ init_waitqueue_head(&priv->dp_modem_change_wait);
+ init_waitqueue_head(&priv->dp_transmit_idle_wait);
+ init_waitqueue_head(&priv->dp_flush_wait);
+ init_waitqueue_head(&priv->dp_close_wait);
+ INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+ priv->dp_port = port;
- /* allocate serial private structure */
- serial_priv = kmalloc(sizeof(struct digi_serial), GFP_KERNEL);
- if (serial_priv == NULL) {
- for (i = 0; i < serial->type->num_ports + 1; i++)
- kfree(usb_get_serial_port_data(serial->port[i]));
- return 1; /* error */
- }
+ init_waitqueue_head(&port->write_wait);
+
+ usb_set_serial_port_data(port, priv);
+
+ return 0;
+}
+
+static int digi_startup(struct usb_serial *serial)
+{
+ struct digi_serial *serial_priv;
+ int ret;
+
+ serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
+ if (!serial_priv)
+ return -ENOMEM;
- /* initialize serial private structure */
spin_lock_init(&serial_priv->ds_serial_lock);
serial_priv->ds_oob_port_num = serial->type->num_ports;
serial_priv->ds_oob_port = serial->port[serial_priv->ds_oob_port_num];
- serial_priv->ds_device_started = 0;
+
+ ret = digi_port_init(serial_priv->ds_oob_port,
+ serial_priv->ds_oob_port_num);
+ if (ret) {
+ kfree(serial_priv);
+ return ret;
+ }
+
usb_set_serial_data(serial, serial_priv);
return 0;
static void digi_release(struct usb_serial *serial)
{
- int i;
+ struct digi_serial *serial_priv;
+ struct digi_port *priv;
+
+ serial_priv = usb_get_serial_data(serial);
+
+ priv = usb_get_serial_port_data(serial_priv->ds_oob_port);
+ kfree(priv);
- /* free the private data structures for all ports */
- /* number of regular ports + 1 for the out-of-band port */
- for (i = 0; i < serial->type->num_ports + 1; i++)
- kfree(usb_get_serial_port_data(serial->port[i]));
- kfree(usb_get_serial_data(serial));
+ kfree(serial_priv);
}
+static int digi_port_probe(struct usb_serial_port *port)
+{
+ unsigned port_num;
+
+ port_num = port->number - port->serial->minor;
+
+ return digi_port_init(port, port_num);
+}
+
+static int digi_port_remove(struct usb_serial_port *port)
+{
+ struct digi_port *priv;
+
+ priv = usb_get_serial_port_data(port);
+ kfree(priv);
+
+ return 0;
+}
static void digi_read_bulk_callback(struct urb *urb)
{
return 0;
}
-/* fake probe - only to allocate data structures */
-static int ipw_probe(struct usb_serial *serial, const struct usb_device_id *id)
+static int ipw_attach(struct usb_serial *serial)
{
struct usb_wwan_intf_private *data;
.num_ports = 1,
.open = ipw_open,
.close = ipw_close,
- .probe = ipw_probe,
- .attach = usb_wwan_startup,
+ .attach = ipw_attach,
.release = ipw_release,
+ .port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
.dtr_rts = ipw_dtr_rts,
.write = usb_wwan_write,
data in device_details */
static void keyspan_setup_urbs(struct usb_serial *serial)
{
- int i, j;
struct keyspan_serial_private *s_priv;
const struct keyspan_device_details *d_details;
- struct usb_serial_port *port;
- struct keyspan_port_private *p_priv;
struct callbacks *cback;
- int endp;
s_priv = usb_get_serial_data(serial);
d_details = s_priv->device_details;
(serial, d_details->glocont_endpoint, USB_DIR_OUT,
serial, s_priv->glocont_buf, GLOCONT_BUFLEN,
cback->glocont_callback);
-
- /* Setup endpoints for each port specific thing */
- for (i = 0; i < d_details->num_ports; i++) {
- port = serial->port[i];
- p_priv = usb_get_serial_port_data(port);
-
- /* Do indat endpoints first, once for each flip */
- endp = d_details->indat_endpoints[i];
- for (j = 0; j <= d_details->indat_endp_flip; ++j, ++endp) {
- p_priv->in_urbs[j] = keyspan_setup_urb
- (serial, endp, USB_DIR_IN, port,
- p_priv->in_buffer[j], 64,
- cback->indat_callback);
- }
- for (; j < 2; ++j)
- p_priv->in_urbs[j] = NULL;
-
- /* outdat endpoints also have flip */
- endp = d_details->outdat_endpoints[i];
- for (j = 0; j <= d_details->outdat_endp_flip; ++j, ++endp) {
- p_priv->out_urbs[j] = keyspan_setup_urb
- (serial, endp, USB_DIR_OUT, port,
- p_priv->out_buffer[j], 64,
- cback->outdat_callback);
- }
- for (; j < 2; ++j)
- p_priv->out_urbs[j] = NULL;
-
- /* inack endpoint */
- p_priv->inack_urb = keyspan_setup_urb
- (serial, d_details->inack_endpoints[i], USB_DIR_IN,
- port, p_priv->inack_buffer, 1, cback->inack_callback);
-
- /* outcont endpoint */
- p_priv->outcont_urb = keyspan_setup_urb
- (serial, d_details->outcont_endpoints[i], USB_DIR_OUT,
- port, p_priv->outcont_buffer, 64,
- cback->outcont_callback);
- }
}
/* usa19 function doesn't require prescaler */
static int keyspan_startup(struct usb_serial *serial)
{
int i, err;
- struct usb_serial_port *port;
struct keyspan_serial_private *s_priv;
- struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
for (i = 0; (d_details = keyspan_devices[i]) != NULL; ++i)
s_priv->device_details = d_details;
usb_set_serial_data(serial, s_priv);
- /* Now setup per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- p_priv = kzalloc(sizeof(struct keyspan_port_private),
- GFP_KERNEL);
- if (!p_priv) {
- dev_dbg(&port->dev, "%s - kmalloc for keyspan_port_private (%d) failed!.\n", __func__, i);
- return 1;
- }
- p_priv->device_details = d_details;
- usb_set_serial_port_data(port, p_priv);
- }
-
keyspan_setup_urbs(serial);
if (s_priv->instat_urb != NULL) {
static void keyspan_disconnect(struct usb_serial *serial)
{
- int i, j;
- struct usb_serial_port *port;
- struct keyspan_serial_private *s_priv;
- struct keyspan_port_private *p_priv;
+ struct keyspan_serial_private *s_priv;
s_priv = usb_get_serial_data(serial);
- /* Stop reading/writing urbs */
stop_urb(s_priv->instat_urb);
stop_urb(s_priv->glocont_urb);
stop_urb(s_priv->indat_urb);
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- p_priv = usb_get_serial_port_data(port);
- stop_urb(p_priv->inack_urb);
- stop_urb(p_priv->outcont_urb);
- for (j = 0; j < 2; j++) {
- stop_urb(p_priv->in_urbs[j]);
- stop_urb(p_priv->out_urbs[j]);
- }
- }
+}
+
+static void keyspan_release(struct usb_serial *serial)
+{
+ struct keyspan_serial_private *s_priv;
+
+ s_priv = usb_get_serial_data(serial);
- /* Now free them */
usb_free_urb(s_priv->instat_urb);
usb_free_urb(s_priv->indat_urb);
usb_free_urb(s_priv->glocont_urb);
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- p_priv = usb_get_serial_port_data(port);
- usb_free_urb(p_priv->inack_urb);
- usb_free_urb(p_priv->outcont_urb);
- for (j = 0; j < 2; j++) {
- usb_free_urb(p_priv->in_urbs[j]);
- usb_free_urb(p_priv->out_urbs[j]);
- }
- }
+
+ kfree(s_priv);
}
-static void keyspan_release(struct usb_serial *serial)
+static int keyspan_port_probe(struct usb_serial_port *port)
{
- int i;
- struct usb_serial_port *port;
- struct keyspan_serial_private *s_priv;
+ struct usb_serial *serial = port->serial;
+ struct keyspan_serial_private *s_priv;
+ struct keyspan_port_private *p_priv;
+ const struct keyspan_device_details *d_details;
+ struct callbacks *cback;
+ int endp;
+ int port_num;
+ int i;
s_priv = usb_get_serial_data(serial);
+ d_details = s_priv->device_details;
- kfree(s_priv);
+ p_priv = kzalloc(sizeof(*p_priv), GFP_KERNEL);
+ if (!p_priv)
+ return -ENOMEM;
- /* Now free per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- kfree(usb_get_serial_port_data(port));
+ p_priv->device_details = d_details;
+
+ /* Setup values for the various callback routines */
+ cback = &keyspan_callbacks[d_details->msg_format];
+
+ port_num = port->number - port->serial->minor;
+
+ /* Do indat endpoints first, once for each flip */
+ endp = d_details->indat_endpoints[port_num];
+ for (i = 0; i <= d_details->indat_endp_flip; ++i, ++endp) {
+ p_priv->in_urbs[i] = keyspan_setup_urb(serial, endp,
+ USB_DIR_IN, port,
+ p_priv->in_buffer[i], 64,
+ cback->indat_callback);
+ }
+ /* outdat endpoints also have flip */
+ endp = d_details->outdat_endpoints[port_num];
+ for (i = 0; i <= d_details->outdat_endp_flip; ++i, ++endp) {
+ p_priv->out_urbs[i] = keyspan_setup_urb(serial, endp,
+ USB_DIR_OUT, port,
+ p_priv->out_buffer[i], 64,
+ cback->outdat_callback);
+ }
+ /* inack endpoint */
+ p_priv->inack_urb = keyspan_setup_urb(serial,
+ d_details->inack_endpoints[port_num],
+ USB_DIR_IN, port,
+ p_priv->inack_buffer, 1,
+ cback->inack_callback);
+ /* outcont endpoint */
+ p_priv->outcont_urb = keyspan_setup_urb(serial,
+ d_details->outcont_endpoints[port_num],
+ USB_DIR_OUT, port,
+ p_priv->outcont_buffer, 64,
+ cback->outcont_callback);
+
+ usb_set_serial_port_data(port, p_priv);
+
+ return 0;
+}
+
+static int keyspan_port_remove(struct usb_serial_port *port)
+{
+ struct keyspan_port_private *p_priv;
+ int i;
+
+ p_priv = usb_get_serial_port_data(port);
+
+ stop_urb(p_priv->inack_urb);
+ stop_urb(p_priv->outcont_urb);
+ for (i = 0; i < 2; i++) {
+ stop_urb(p_priv->in_urbs[i]);
+ stop_urb(p_priv->out_urbs[i]);
+ }
+
+ usb_free_urb(p_priv->inack_urb);
+ usb_free_urb(p_priv->outcont_urb);
+ for (i = 0; i < 2; i++) {
+ usb_free_urb(p_priv->in_urbs[i]);
+ usb_free_urb(p_priv->out_urbs[i]);
}
+
+ kfree(p_priv);
+
+ return 0;
}
MODULE_AUTHOR(DRIVER_AUTHOR);
static int keyspan_startup (struct usb_serial *serial);
static void keyspan_disconnect (struct usb_serial *serial);
static void keyspan_release (struct usb_serial *serial);
+static int keyspan_port_probe(struct usb_serial_port *port);
+static int keyspan_port_remove(struct usb_serial_port *port);
static int keyspan_write_room (struct tty_struct *tty);
static int keyspan_write (struct tty_struct *tty,
.attach = keyspan_startup,
.disconnect = keyspan_disconnect,
.release = keyspan_release,
+ .port_probe = keyspan_port_probe,
+ .port_remove = keyspan_port_remove,
};
static struct usb_serial_driver keyspan_2port_device = {
.attach = keyspan_startup,
.disconnect = keyspan_disconnect,
.release = keyspan_release,
+ .port_probe = keyspan_port_probe,
+ .port_remove = keyspan_port_remove,
};
static struct usb_serial_driver keyspan_4port_device = {
.attach = keyspan_startup,
.disconnect = keyspan_disconnect,
.release = keyspan_release,
+ .port_probe = keyspan_port_probe,
+ .port_remove = keyspan_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
* Function prototypes
*/
static int mct_u232_startup(struct usb_serial *serial);
-static void mct_u232_release(struct usb_serial *serial);
+static int mct_u232_port_probe(struct usb_serial_port *port);
+static int mct_u232_port_remove(struct usb_serial_port *remove);
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port);
static void mct_u232_close(struct usb_serial_port *port);
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
.tiocmget = mct_u232_tiocmget,
.tiocmset = mct_u232_tiocmset,
.attach = mct_u232_startup,
- .release = mct_u232_release,
+ .port_probe = mct_u232_port_probe,
+ .port_remove = mct_u232_port_remove,
.ioctl = mct_u232_ioctl,
.get_icount = mct_u232_get_icount,
};
static int mct_u232_startup(struct usb_serial *serial)
{
- struct mct_u232_private *priv;
struct usb_serial_port *port, *rport;
- priv = kzalloc(sizeof(struct mct_u232_private), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->msr_wait);
- usb_set_serial_port_data(serial->port[0], priv);
-
- init_waitqueue_head(&serial->port[0]->write_wait);
-
/* Puh, that's dirty */
port = serial->port[0];
rport = serial->port[1];
return 0;
} /* mct_u232_startup */
+static int mct_u232_port_probe(struct usb_serial_port *port)
+{
+ struct mct_u232_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ init_waitqueue_head(&priv->msr_wait);
+
+ usb_set_serial_port_data(port, priv);
-static void mct_u232_release(struct usb_serial *serial)
+ return 0;
+}
+
+static int mct_u232_port_remove(struct usb_serial_port *port)
{
struct mct_u232_private *priv;
- int i;
- for (i = 0; i < serial->num_ports; ++i) {
- /* My special items, the standard routines free my urbs */
- priv = usb_get_serial_port_data(serial->port[i]);
- kfree(priv);
- }
-} /* mct_u232_release */
+ priv = usb_get_serial_port_data(port);
+ kfree(priv);
+
+ return 0;
+}
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
static void mct_u232_close(struct usb_serial_port *port)
{
- if (port->serial->dev) {
- /* shutdown our urbs */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
- usb_kill_urb(port->interrupt_in_urb);
- }
+ /*
+ * Must kill the read urb as it is actually an interrupt urb, which
+ * generic close thus fails to kill.
+ */
+ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->interrupt_in_urb);
+
+ usb_serial_generic_close(port);
} /* mct_u232_close */
{
dev_dbg(&port->dev, "%s\n", __func__);
- if (port->serial->dev) {
- /* Shutdown any interrupt in urbs. */
- if (port->interrupt_in_urb) {
- usb_unlink_urb(port->interrupt_in_urb);
- usb_kill_urb(port->interrupt_in_urb);
- }
-
- /* Send deactivate cmd to device */
+ usb_unlink_urb(port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
+
+ mutex_lock(&port->serial->disc_mutex);
+ if (!port->serial->disconnected)
metrousb_send_unidirectional_cmd(UNI_CMD_CLOSE, port);
- }
+ mutex_unlock(&port->serial->disc_mutex);
}
static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
return retval;
}
-static void metrousb_shutdown(struct usb_serial *serial)
+static int metrousb_port_probe(struct usb_serial_port *port)
{
- int i = 0;
+ struct metrousb_private *metro_priv;
- dev_dbg(&serial->dev->dev, "%s\n", __func__);
+ metro_priv = kzalloc(sizeof(*metro_priv), GFP_KERNEL);
+ if (!metro_priv)
+ return -ENOMEM;
- /* Stop reading and writing on all ports. */
- for (i = 0; i < serial->num_ports; ++i) {
- /* Close any open urbs. */
- metrousb_cleanup(serial->port[i]);
+ spin_lock_init(&metro_priv->lock);
- /* Free memory. */
- kfree(usb_get_serial_port_data(serial->port[i]));
- usb_set_serial_port_data(serial->port[i], NULL);
+ usb_set_serial_port_data(port, metro_priv);
- dev_dbg(&serial->dev->dev, "%s - freed port number=%d\n",
- __func__, serial->port[i]->number);
- }
+ return 0;
}
-static int metrousb_startup(struct usb_serial *serial)
+static int metrousb_port_remove(struct usb_serial_port *port)
{
struct metrousb_private *metro_priv;
- struct usb_serial_port *port;
- int i = 0;
- dev_dbg(&serial->dev->dev, "%s\n", __func__);
-
- /* Loop through the serial ports setting up the private structures.
- * Currently we only use one port. */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
-
- /* Declare memory. */
- metro_priv = kzalloc(sizeof(struct metrousb_private), GFP_KERNEL);
- if (!metro_priv)
- return -ENOMEM;
-
- /* Initialize memory. */
- spin_lock_init(&metro_priv->lock);
- usb_set_serial_port_data(port, metro_priv);
-
- dev_dbg(&serial->dev->dev, "%s - port number=%d\n ",
- __func__, port->number);
- }
+ metro_priv = usb_get_serial_port_data(port);
+ kfree(metro_priv);
return 0;
}
.close = metrousb_cleanup,
.read_int_callback = metrousb_read_int_callback,
.write_int_callback = metrousb_write_int_callback,
- .attach = metrousb_startup,
- .release = metrousb_shutdown,
+ .port_probe = metrousb_port_probe,
+ .port_remove = metrousb_port_remove,
.throttle = metrousb_throttle,
.unthrottle = metrousb_unthrottle,
.tiocmget = metrousb_tiocmget,
static int mos7720_startup(struct usb_serial *serial)
{
- struct moschip_port *mos7720_port;
struct usb_device *dev;
- int i;
char data;
u16 product;
int ret_val;
serial->port[1]->interrupt_in_buffer = NULL;
}
-
- /* set up serial port private structures */
- for (i = 0; i < serial->num_ports; ++i) {
- mos7720_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
- if (mos7720_port == NULL) {
- dev_err(&dev->dev, "%s - Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- /* Initialize all port interrupt end point to port 0 int
- * endpoint. Our device has only one interrupt endpoint
- * common to all ports */
- serial->port[i]->interrupt_in_endpointAddress =
- serial->port[0]->interrupt_in_endpointAddress;
-
- mos7720_port->port = serial->port[i];
- usb_set_serial_port_data(serial->port[i], mos7720_port);
-
- dev_dbg(&dev->dev, "port number is %d\n", serial->port[i]->number);
- dev_dbg(&dev->dev, "serial number is %d\n", serial->minor);
- }
-
-
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
static void mos7720_release(struct usb_serial *serial)
{
- int i;
-
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
/* close the parallel port */
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
#endif
- /* free private structure allocated for serial port */
- for (i = 0; i < serial->num_ports; ++i)
- kfree(usb_get_serial_port_data(serial->port[i]));
+}
+
+static int mos7720_port_probe(struct usb_serial_port *port)
+{
+ struct moschip_port *mos7720_port;
+
+ mos7720_port = kzalloc(sizeof(*mos7720_port), GFP_KERNEL);
+ if (!mos7720_port)
+ return -ENOMEM;
+
+ /* Initialize all port interrupt end point to port 0 int endpoint.
+ * Our device has only one interrupt endpoint common to all ports.
+ */
+ port->interrupt_in_endpointAddress =
+ port->serial->port[0]->interrupt_in_endpointAddress;
+ mos7720_port->port = port;
+
+ usb_set_serial_port_data(port, mos7720_port);
+
+ return 0;
+}
+
+static int mos7720_port_remove(struct usb_serial_port *port)
+{
+ struct moschip_port *mos7720_port;
+
+ mos7720_port = usb_get_serial_port_data(port);
+ kfree(mos7720_port);
+
+ return 0;
}
static struct usb_serial_driver moschip7720_2port_driver = {
.probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
+ .port_probe = mos7720_port_probe,
+ .port_remove = mos7720_port_remove,
.ioctl = mos7720_ioctl,
.tiocmget = mos7720_tiocmget,
.tiocmset = mos7720_tiocmset,
int port_num; /*Actual port number in the device(1,2,etc) */
struct urb *write_urb; /* write URB for this port */
struct urb *read_urb; /* read URB for this port */
- struct urb *int_urb;
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
char open;
char open_ports;
- char zombie;
wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
int delta_msr_cond;
struct moschip_port *mos7840_port;
struct device *dev = &urb->dev->dev;
__u8 regval = 0x0;
- int result = 0;
int status = urb->status;
mos7840_port = urb->context;
return;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- goto exit;
+ return;
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
mos7840_handle_new_msr(mos7840_port, regval);
else if (mos7840_port->MsrLsr == 1)
mos7840_handle_new_lsr(mos7840_port, regval);
-
-exit:
- spin_lock(&mos7840_port->pool_lock);
- if (!mos7840_port->zombie)
- result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC);
- spin_unlock(&mos7840_port->pool_lock);
- if (result) {
- dev_err(dev, "%s - Error %d submitting interrupt urb\n",
- __func__, result);
- }
}
static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
wreg = MODEM_STATUS_REGISTER;
break;
}
- spin_lock(&mos7840_port->pool_lock);
- if (!mos7840_port->zombie) {
- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
- } else {
- spin_unlock(&mos7840_port->pool_lock);
- return;
- }
- spin_unlock(&mos7840_port->pool_lock);
+ rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
}
}
}
return mos7840_num_ports;
}
-/****************************************************************************
- * mos7840_startup
- ****************************************************************************/
-
-static int mos7840_startup(struct usb_serial *serial)
+static int mos7840_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct moschip_port *mos7840_port;
- struct usb_device *dev;
- int i, status;
+ int status;
+ int pnum;
__u16 Data;
- dev = serial->dev;
-
/* we set up the pointers to the endpoints in the mos7840_open *
* function, as the structures aren't created yet. */
- /* set up port private structures */
- for (i = 0; i < serial->num_ports; ++i) {
- dev_dbg(&dev->dev, "mos7840_startup: configuring port %d............\n", i);
- mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
- if (mos7840_port == NULL) {
- dev_err(&dev->dev, "%s - Out of memory\n", __func__);
- status = -ENOMEM;
- i--; /* don't follow NULL pointer cleaning up */
- goto error;
- }
-
- /* Initialize all port interrupt end point to port 0 int
- * endpoint. Our device has only one interrupt end point
- * common to all port */
-
- mos7840_port->port = serial->port[i];
- mos7840_set_port_private(serial->port[i], mos7840_port);
- spin_lock_init(&mos7840_port->pool_lock);
-
- /* minor is not initialised until later by
- * usb-serial.c:get_free_serial() and cannot therefore be used
- * to index device instances */
- mos7840_port->port_num = i + 1;
- dev_dbg(&dev->dev, "serial->port[i]->number = %d\n", serial->port[i]->number);
- dev_dbg(&dev->dev, "serial->port[i]->serial->minor = %d\n", serial->port[i]->serial->minor);
- dev_dbg(&dev->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
- dev_dbg(&dev->dev, "serial->minor = %d\n", serial->minor);
-
- if (mos7840_port->port_num == 1) {
- mos7840_port->SpRegOffset = 0x0;
- mos7840_port->ControlRegOffset = 0x1;
- mos7840_port->DcrRegOffset = 0x4;
- } else if ((mos7840_port->port_num == 2)
- && (serial->num_ports == 4)) {
- mos7840_port->SpRegOffset = 0x8;
- mos7840_port->ControlRegOffset = 0x9;
- mos7840_port->DcrRegOffset = 0x16;
- } else if ((mos7840_port->port_num == 2)
- && (serial->num_ports == 2)) {
- mos7840_port->SpRegOffset = 0xa;
- mos7840_port->ControlRegOffset = 0xb;
- mos7840_port->DcrRegOffset = 0x19;
- } else if ((mos7840_port->port_num == 3)
- && (serial->num_ports == 4)) {
- mos7840_port->SpRegOffset = 0xa;
- mos7840_port->ControlRegOffset = 0xb;
- mos7840_port->DcrRegOffset = 0x19;
- } else if ((mos7840_port->port_num == 4)
- && (serial->num_ports == 4)) {
- mos7840_port->SpRegOffset = 0xc;
- mos7840_port->ControlRegOffset = 0xd;
- mos7840_port->DcrRegOffset = 0x1c;
- }
- mos7840_dump_serial_port(serial->port[i], mos7840_port);
- mos7840_set_port_private(serial->port[i], mos7840_port);
+ pnum = port->number - serial->minor;
- /* enable rx_disable bit in control register */
- status = mos7840_get_reg_sync(serial->port[i],
- mos7840_port->ControlRegOffset, &Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Reading ControlReg failed status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
- Data |= 0x08; /* setting driver done bit */
- Data |= 0x04; /* sp1_bit to have cts change reflect in
- modem status reg */
-
- /* Data |= 0x20; //rx_disable bit */
- status = mos7840_set_reg_sync(serial->port[i],
- mos7840_port->ControlRegOffset, Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
+ dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum);
+ mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
+ if (mos7840_port == NULL) {
+ dev_err(&port->dev, "%s - Out of memory\n", __func__);
+ return -ENOMEM;
+ }
- /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
- and 0x24 in DCR3 */
- Data = 0x01;
- status = mos7840_set_reg_sync(serial->port[i],
- (__u16) (mos7840_port->DcrRegOffset + 0), Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing DCR0 failed status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "DCR0 Writing success status%d\n", status);
+ /* Initialize all port interrupt end point to port 0 int
+ * endpoint. Our device has only one interrupt end point
+ * common to all port */
+
+ mos7840_port->port = port;
+ mos7840_set_port_private(port, mos7840_port);
+ spin_lock_init(&mos7840_port->pool_lock);
+
+ /* minor is not initialised until later by
+ * usb-serial.c:get_free_serial() and cannot therefore be used
+ * to index device instances */
+ mos7840_port->port_num = pnum + 1;
+ dev_dbg(&port->dev, "port->number = %d\n", port->number);
+ dev_dbg(&port->dev, "port->serial->minor = %d\n", port->serial->minor);
+ dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
+ dev_dbg(&port->dev, "serial->minor = %d\n", serial->minor);
+
+ if (mos7840_port->port_num == 1) {
+ mos7840_port->SpRegOffset = 0x0;
+ mos7840_port->ControlRegOffset = 0x1;
+ mos7840_port->DcrRegOffset = 0x4;
+ } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 4)) {
+ mos7840_port->SpRegOffset = 0x8;
+ mos7840_port->ControlRegOffset = 0x9;
+ mos7840_port->DcrRegOffset = 0x16;
+ } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 2)) {
+ mos7840_port->SpRegOffset = 0xa;
+ mos7840_port->ControlRegOffset = 0xb;
+ mos7840_port->DcrRegOffset = 0x19;
+ } else if ((mos7840_port->port_num == 3) && (serial->num_ports == 4)) {
+ mos7840_port->SpRegOffset = 0xa;
+ mos7840_port->ControlRegOffset = 0xb;
+ mos7840_port->DcrRegOffset = 0x19;
+ } else if ((mos7840_port->port_num == 4) && (serial->num_ports == 4)) {
+ mos7840_port->SpRegOffset = 0xc;
+ mos7840_port->ControlRegOffset = 0xd;
+ mos7840_port->DcrRegOffset = 0x1c;
+ }
+ mos7840_dump_serial_port(port, mos7840_port);
+ mos7840_set_port_private(port, mos7840_port);
+
+ /* enable rx_disable bit in control register */
+ status = mos7840_get_reg_sync(port,
+ mos7840_port->ControlRegOffset, &Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
+ Data |= 0x08; /* setting driver done bit */
+ Data |= 0x04; /* sp1_bit to have cts change reflect in
+ modem status reg */
- Data = 0x05;
- status = mos7840_set_reg_sync(serial->port[i],
- (__u16) (mos7840_port->DcrRegOffset + 1), Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing DCR1 failed status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "DCR1 Writing success status%d\n", status);
+ /* Data |= 0x20; //rx_disable bit */
+ status = mos7840_set_reg_sync(port,
+ mos7840_port->ControlRegOffset, Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
- Data = 0x24;
- status = mos7840_set_reg_sync(serial->port[i],
- (__u16) (mos7840_port->DcrRegOffset + 2), Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing DCR2 failed status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "DCR2 Writing success status%d\n", status);
+ /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
+ and 0x24 in DCR3 */
+ Data = 0x01;
+ status = mos7840_set_reg_sync(port,
+ (__u16) (mos7840_port->DcrRegOffset + 0), Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
- /* write values in clkstart0x0 and clkmulti 0x20 */
- Data = 0x0;
- status = mos7840_set_reg_sync(serial->port[i],
- CLK_START_VALUE_REGISTER, Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
+ Data = 0x05;
+ status = mos7840_set_reg_sync(port,
+ (__u16) (mos7840_port->DcrRegOffset + 1), Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
- Data = 0x20;
- status = mos7840_set_reg_sync(serial->port[i],
- CLK_MULTI_REGISTER, Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status);
- goto error;
- } else
- dev_dbg(&dev->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status);
+ Data = 0x24;
+ status = mos7840_set_reg_sync(port,
+ (__u16) (mos7840_port->DcrRegOffset + 2), Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
- /* write value 0x0 to scratchpad register */
- Data = 0x00;
- status = mos7840_set_uart_reg(serial->port[i],
- SCRATCH_PAD_REGISTER, Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
- break;
- } else
- dev_dbg(&dev->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
+ /* write values in clkstart0x0 and clkmulti 0x20 */
+ Data = 0x0;
+ status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
- /* Zero Length flag register */
- if ((mos7840_port->port_num != 1)
- && (serial->num_ports == 2)) {
+ Data = 0x20;
+ status = mos7840_set_reg_sync(port, CLK_MULTI_REGISTER, Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status);
+ goto error;
+ } else
+ dev_dbg(&port->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status);
- Data = 0xff;
- status = mos7840_set_reg_sync(serial->port[i],
- (__u16) (ZLP_REG1 +
- ((__u16)mos7840_port->port_num)), Data);
- dev_dbg(&dev->dev, "ZLIP offset %x\n",
+ /* write value 0x0 to scratchpad register */
+ Data = 0x00;
+ status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
+
+ /* Zero Length flag register */
+ if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) {
+ Data = 0xff;
+ status = mos7840_set_reg_sync(port,
+ (__u16) (ZLP_REG1 +
+ ((__u16)mos7840_port->port_num)), Data);
+ dev_dbg(&port->dev, "ZLIP offset %x\n",
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing ZLP_REG%d failed status-0x%x\n", i + 2, status);
- break;
- } else
- dev_dbg(&dev->dev, "ZLP_REG%d Writing success status%d\n", i + 2, status);
- } else {
- Data = 0xff;
- status = mos7840_set_reg_sync(serial->port[i],
- (__u16) (ZLP_REG1 +
- ((__u16)mos7840_port->port_num) - 0x1), Data);
- dev_dbg(&dev->dev, "ZLIP offset %x\n",
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
+ } else {
+ Data = 0xff;
+ status = mos7840_set_reg_sync(port,
+ (__u16) (ZLP_REG1 +
+ ((__u16)mos7840_port->port_num) - 0x1), Data);
+ dev_dbg(&port->dev, "ZLIP offset %x\n",
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing ZLP_REG%d failed status-0x%x\n", i + 1, status);
- break;
- } else
- dev_dbg(&dev->dev, "ZLP_REG%d Writing success status%d\n", i + 1, status);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
+ goto out;
+ } else
+ dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
- }
- mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
- mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
- mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
- if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
- !mos7840_port->dr) {
- status = -ENOMEM;
- goto error;
- }
+ }
+ mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
+ mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
+ mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
+ !mos7840_port->dr) {
+ status = -ENOMEM;
+ goto error;
+ }
- mos7840_port->has_led = false;
+ mos7840_port->has_led = false;
- /* Initialize LED timers */
- if (device_type == MOSCHIP_DEVICE_ID_7810) {
- mos7840_port->has_led = true;
+ /* Initialize LED timers */
+ if (device_type == MOSCHIP_DEVICE_ID_7810) {
+ mos7840_port->has_led = true;
- init_timer(&mos7840_port->led_timer1);
- mos7840_port->led_timer1.function = mos7840_led_off;
- mos7840_port->led_timer1.expires =
- jiffies + msecs_to_jiffies(LED_ON_MS);
- mos7840_port->led_timer1.data =
- (unsigned long)mos7840_port;
+ init_timer(&mos7840_port->led_timer1);
+ mos7840_port->led_timer1.function = mos7840_led_off;
+ mos7840_port->led_timer1.expires =
+ jiffies + msecs_to_jiffies(LED_ON_MS);
+ mos7840_port->led_timer1.data = (unsigned long)mos7840_port;
- init_timer(&mos7840_port->led_timer2);
- mos7840_port->led_timer2.function =
- mos7840_led_flag_off;
- mos7840_port->led_timer2.expires =
- jiffies + msecs_to_jiffies(LED_OFF_MS);
- mos7840_port->led_timer2.data =
- (unsigned long)mos7840_port;
+ init_timer(&mos7840_port->led_timer2);
+ mos7840_port->led_timer2.function = mos7840_led_flag_off;
+ mos7840_port->led_timer2.expires =
+ jiffies + msecs_to_jiffies(LED_OFF_MS);
+ mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
- mos7840_port->led_flag = false;
+ mos7840_port->led_flag = false;
- /* Turn off LED */
- mos7840_set_led_sync(serial->port[i],
- MODEM_CONTROL_REGISTER, 0x0300);
- }
+ /* Turn off LED */
+ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
+out:
+ if (pnum == serial->num_ports - 1) {
+ /* Zero Length flag enable */
+ Data = 0x0f;
+ status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
+ if (status < 0) {
+ dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
+ goto error;
+ } else
+ dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
- /* Zero Length flag enable */
- Data = 0x0f;
- status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
- if (status < 0) {
- dev_dbg(&dev->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
- goto error;
- } else
- dev_dbg(&dev->dev, "ZLP_REG5 Writing success status%d\n", status);
-
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT);
+ /* setting configuration feature to one */
+ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
+ MOS_WDR_TIMEOUT);
+ }
return 0;
error:
- for (/* nothing */; i >= 0; i--) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
+ kfree(mos7840_port->dr);
+ kfree(mos7840_port->ctrl_buf);
+ usb_free_urb(mos7840_port->control_urb);
+ kfree(mos7840_port);
- kfree(mos7840_port->dr);
- kfree(mos7840_port->ctrl_buf);
- usb_free_urb(mos7840_port->control_urb);
- kfree(mos7840_port);
- serial->port[i] = NULL;
- }
return status;
}
-/****************************************************************************
- * mos7840_disconnect
- * This function is called whenever the device is removed from the usb bus.
- ****************************************************************************/
-
-static void mos7840_disconnect(struct usb_serial *serial)
+static int mos7840_port_remove(struct usb_serial_port *port)
{
- int i;
- unsigned long flags;
struct moschip_port *mos7840_port;
- /* check for the ports to be closed,close the ports and disconnect */
+ mos7840_port = mos7840_get_port_private(port);
- /* free private structure allocated for serial port *
- * stop reads and writes on all ports */
+ if (mos7840_port->has_led) {
+ /* Turn off LED */
+ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
- for (i = 0; i < serial->num_ports; ++i) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
- if (mos7840_port) {
- spin_lock_irqsave(&mos7840_port->pool_lock, flags);
- mos7840_port->zombie = 1;
- spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
- usb_kill_urb(mos7840_port->control_urb);
- }
+ del_timer_sync(&mos7840_port->led_timer1);
+ del_timer_sync(&mos7840_port->led_timer2);
}
-}
-
-/****************************************************************************
- * mos7840_release
- * This function is called when the usb_serial structure is freed.
- ****************************************************************************/
-
-static void mos7840_release(struct usb_serial *serial)
-{
- int i;
- struct moschip_port *mos7840_port;
-
- /* check for the ports to be closed,close the ports and disconnect */
+ usb_kill_urb(mos7840_port->control_urb);
+ usb_free_urb(mos7840_port->control_urb);
+ kfree(mos7840_port->ctrl_buf);
+ kfree(mos7840_port->dr);
+ kfree(mos7840_port);
- /* free private structure allocated for serial port *
- * stop reads and writes on all ports */
-
- for (i = 0; i < serial->num_ports; ++i) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
- if (mos7840_port) {
- if (mos7840_port->has_led) {
- /* Turn off LED */
- mos7840_set_led_sync(mos7840_port->port,
- MODEM_CONTROL_REGISTER, 0x0300);
-
- del_timer_sync(&mos7840_port->led_timer1);
- del_timer_sync(&mos7840_port->led_timer2);
- }
- kfree(mos7840_port->ctrl_buf);
- kfree(mos7840_port->dr);
- kfree(mos7840_port);
- }
- }
+ return 0;
}
static struct usb_serial_driver moschip7840_4port_device = {
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
.get_icount = mos7840_get_icount,
- .attach = mos7840_startup,
- .disconnect = mos7840_disconnect,
- .release = mos7840_release,
+ .port_probe = mos7840_port_probe,
+ .port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
.read_int_callback = mos7840_interrupt_callback,
};
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
static void omninet_disconnect(struct usb_serial *serial);
-static void omninet_release(struct usb_serial *serial);
-static int omninet_attach(struct usb_serial *serial);
+static int omninet_port_probe(struct usb_serial_port *port);
+static int omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
.num_ports = 1,
- .attach = omninet_attach,
+ .port_probe = omninet_port_probe,
+ .port_remove = omninet_port_remove,
.open = omninet_open,
.close = omninet_close,
.write = omninet_write,
.read_bulk_callback = omninet_read_bulk_callback,
.write_bulk_callback = omninet_write_bulk_callback,
.disconnect = omninet_disconnect,
- .release = omninet_release,
};
static struct usb_serial_driver * const serial_drivers[] = {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
-static int omninet_attach(struct usb_serial *serial)
+static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
- struct usb_serial_port *port = serial->port[0];
od = kmalloc(sizeof(struct omninet_data), GFP_KERNEL);
- if (!od) {
- dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n",
- __func__, sizeof(struct omninet_data));
+ if (!od)
return -ENOMEM;
- }
+
usb_set_serial_port_data(port, od);
+
+ return 0;
+}
+
+static int omninet_port_remove(struct usb_serial_port *port)
+{
+ struct omninet_data *od;
+
+ od = usb_get_serial_port_data(port);
+ kfree(od);
+
return 0;
}
usb_kill_urb(wport->write_urb);
}
-
-static void omninet_release(struct usb_serial *serial)
-{
- struct usb_serial_port *port = serial->port[0];
-
- kfree(usb_get_serial_port_data(port));
-}
-
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
{
struct usb_serial *serial = port->serial;
int retval;
- u8 buffer[2];
+ u8 *buffer;
+
+ buffer = kzalloc(1, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
buffer[0] = val;
/* Send the message to the vendor control endpoint
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
0, 0, buffer, 1, 0);
+ kfree(buffer);
return retval;
}
if (!dr) {
dev_err(&port->dev, "out of memory\n");
count = -ENOMEM;
- goto error;
+ goto error_no_dr;
}
dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
return count;
error:
+ kfree(dr);
+error_no_dr:
usb_free_urb(urb);
error_no_urb:
kfree(buffer);
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
+static int option_attach(struct usb_serial *serial);
static void option_release(struct usb_serial *serial);
static int option_send_setup(struct usb_serial_port *port);
static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+#define NOVATELWIRELESS_PRODUCT_E362 0x9010
#define NOVATELWIRELESS_PRODUCT_G1 0xA001
#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
#define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
+#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
+#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
/* ALCATEL PRODUCTS */
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S_X200 0x0000
+#define ALCATEL_PRODUCT_X220_X500D 0x0017
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.tiocmget = usb_wwan_tiocmget,
.tiocmset = usb_wwan_tiocmset,
.ioctl = usb_wwan_ioctl,
- .attach = usb_wwan_startup,
+ .attach = option_attach,
.release = option_release,
+ .port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- struct usb_wwan_intf_private *data;
- struct option_private *priv;
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
return -ENODEV;
+ /* Store device id so we can use it during attach. */
+ usb_set_serial_data(serial, (void *)id);
+
+ return 0;
+}
+
+static int option_attach(struct usb_serial *serial)
+{
+ struct usb_interface_descriptor *iface_desc;
+ const struct usb_device_id *id;
+ struct usb_wwan_intf_private *data;
+ struct option_private *priv;
+
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
return -ENOMEM;
}
+ /* Retrieve device id stored at probe. */
+ id = usb_get_serial_data(serial);
+ iface_desc = &serial->interface->cur_altsetting->desc;
+
priv->bInterfaceNumber = iface_desc->bInterfaceNumber;
data->private = priv;
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
{
- struct usb_wwan_intf_private *data;
struct usb_host_interface *intf = serial->interface->cur_altsetting;
struct device *dev = &serial->dev->dev;
int retval = -ENODEV;
ifnum = intf->desc.bInterfaceNumber;
dev_dbg(dev, "This Interface = %d\n", ifnum);
- data = kzalloc(sizeof(struct usb_wwan_intf_private),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- spin_lock_init(&data->susp_lock);
-
if (nintf == 1) {
/* QDL mode */
/* Gobi 2000 has a single altsetting, older ones have two */
}
}
- /* Set serial->private if not returning error */
- if (retval == 0)
- usb_set_serial_data(serial, data);
- else
- kfree(data);
-
return retval;
}
+static int qc_attach(struct usb_serial *serial)
+{
+ struct usb_wwan_intf_private *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->susp_lock);
+
+ usb_set_serial_data(serial, data);
+
+ return 0;
+}
+
static void qc_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
- /* Free the private data allocated in qcprobe */
usb_set_serial_data(serial, NULL);
kfree(priv);
}
.write = usb_wwan_write,
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
- .attach = usb_wwan_startup,
+ .attach = qc_attach,
.release = qc_release,
+ .port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
static void qt2_release(struct usb_serial *serial)
{
- int i;
+ struct qt2_serial_private *serial_priv;
- kfree(usb_get_serial_data(serial));
+ serial_priv = usb_get_serial_data(serial);
- for (i = 0; i < serial->num_ports; i++)
- kfree(usb_get_serial_port_data(serial->port[i]));
+ usb_free_urb(serial_priv->read_urb);
+ kfree(serial_priv);
}
static inline int calc_baud_divisor(int baudrate)
port_priv->is_open = false;
spin_lock_irqsave(&port_priv->urb_lock, flags);
- if (port_priv->write_urb->status == -EINPROGRESS)
- usb_kill_urb(port_priv->write_urb);
+ usb_kill_urb(port_priv->write_urb);
port_priv->urb_in_use = false;
spin_unlock_irqrestore(&port_priv->urb_lock, flags);
+ mutex_lock(&port->serial->disc_mutex);
+ if (port->serial->disconnected) {
+ mutex_unlock(&port->serial->disc_mutex);
+ return;
+ }
+
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
dev_err(&port->dev, "%s - close port failed %i\n",
__func__, i);
+ mutex_unlock(&port->serial->disc_mutex);
}
static void qt2_disconnect(struct usb_serial *serial)
{
struct qt2_serial_private *serial_priv = usb_get_serial_data(serial);
- struct qt2_port_private *port_priv;
- int i;
-
- if (serial_priv->read_urb->status == -EINPROGRESS)
- usb_kill_urb(serial_priv->read_urb);
-
- usb_free_urb(serial_priv->read_urb);
- for (i = 0; i < serial->num_ports; i++) {
- port_priv = usb_get_serial_port_data(serial->port[i]);
-
- if (port_priv->write_urb->status == -EINPROGRESS)
- usb_kill_urb(port_priv->write_urb);
- usb_free_urb(port_priv->write_urb);
- }
+ usb_kill_urb(serial_priv->read_urb);
}
static int get_serial_info(struct usb_serial_port *port,
static int qt2_setup_urbs(struct usb_serial *serial)
{
- struct usb_serial_port *port;
struct usb_serial_port *port0;
struct qt2_serial_private *serial_priv;
- struct qt2_port_private *port_priv;
- int pcount, status;
+ int status;
port0 = serial->port[0];
sizeof(serial_priv->read_buffer),
qt2_read_bulk_callback, serial);
- /* setup write_urb for each port */
- for (pcount = 0; pcount < serial->num_ports; pcount++) {
-
- port = serial->port[pcount];
- port_priv = usb_get_serial_port_data(port);
-
- port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port_priv->write_urb) {
- dev_err(&serial->dev->dev,
- "failed to alloc write_urb for port %i\n",
- pcount);
- return -ENOMEM;
- }
-
- usb_fill_bulk_urb(port_priv->write_urb,
- serial->dev,
- usb_sndbulkpipe(serial->dev,
- port0->
- bulk_out_endpointAddress),
- port_priv->write_buffer,
- sizeof(port_priv->write_buffer),
- qt2_write_bulk_callback, port);
- }
-
status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL);
if (status != 0) {
dev_err(&serial->dev->dev,
"%s - submit read urb failed %i\n", __func__, status);
+ usb_free_urb(serial_priv->read_urb);
return status;
}
return 0;
-
}
static int qt2_attach(struct usb_serial *serial)
{
struct qt2_serial_private *serial_priv;
- struct qt2_port_private *port_priv;
- int status, pcount;
+ int status;
/* power on unit */
status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
usb_set_serial_data(serial, serial_priv);
- for (pcount = 0; pcount < serial->num_ports; pcount++) {
- port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
- if (!port_priv) {
- dev_err(&serial->dev->dev,
- "%s- kmalloc(%Zd) failed.\n", __func__,
- sizeof(*port_priv));
- pcount--;
- status = -ENOMEM;
- goto attach_failed;
- }
-
- spin_lock_init(&port_priv->lock);
- spin_lock_init(&port_priv->urb_lock);
- init_waitqueue_head(&port_priv->delta_msr_wait);
-
- port_priv->port = serial->port[pcount];
-
- usb_set_serial_port_data(serial->port[pcount], port_priv);
- }
-
status = qt2_setup_urbs(serial);
if (status != 0)
goto attach_failed;
return 0;
attach_failed:
- for (/* empty */; pcount >= 0; pcount--) {
- port_priv = usb_get_serial_port_data(serial->port[pcount]);
- kfree(port_priv);
- }
kfree(serial_priv);
return status;
}
+static int qt2_port_probe(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ struct qt2_port_private *port_priv;
+ u8 bEndpointAddress;
+
+ port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
+ if (!port_priv)
+ return -ENOMEM;
+
+ spin_lock_init(&port_priv->lock);
+ spin_lock_init(&port_priv->urb_lock);
+ init_waitqueue_head(&port_priv->delta_msr_wait);
+ port_priv->port = port;
+
+ port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port_priv->write_urb) {
+ kfree(port_priv);
+ return -ENOMEM;
+ }
+ bEndpointAddress = serial->port[0]->bulk_out_endpointAddress;
+ usb_fill_bulk_urb(port_priv->write_urb, serial->dev,
+ usb_sndbulkpipe(serial->dev, bEndpointAddress),
+ port_priv->write_buffer,
+ sizeof(port_priv->write_buffer),
+ qt2_write_bulk_callback, port);
+
+ usb_set_serial_port_data(port, port_priv);
+
+ return 0;
+}
+
+static int qt2_port_remove(struct usb_serial_port *port)
+{
+ struct qt2_port_private *port_priv;
+
+ port_priv = usb_get_serial_port_data(port);
+ usb_free_urb(port_priv->write_urb);
+ kfree(port_priv);
+
+ return 0;
+}
+
static int qt2_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
.attach = qt2_attach,
.release = qt2_release,
.disconnect = qt2_disconnect,
+ .port_probe = qt2_port_probe,
+ .port_remove = qt2_port_remove,
.dtr_rts = qt2_dtr_rts,
.break_ctl = qt2_break_ctl,
.tiocmget = qt2_tiocmget,
{
int result = 0;
struct usb_device *udev;
- struct sierra_intf_private *data;
u8 ifnum;
udev = serial->dev;
return -ENODEV;
}
- data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- spin_lock_init(&data->susp_lock);
-
return result;
}
static int sierra_startup(struct usb_serial *serial)
{
- struct usb_serial_port *port;
- struct sierra_port_private *portdata;
- struct sierra_iface_info *himemoryp = NULL;
- int i;
- u8 ifnum;
+ struct sierra_intf_private *intfdata;
+
+ intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL);
+ if (!intfdata)
+ return -ENOMEM;
+
+ spin_lock_init(&intfdata->susp_lock);
+
+ usb_set_serial_data(serial, intfdata);
/* Set Device mode to D0 */
sierra_set_power_state(serial->dev, 0x0000);
if (nmea)
sierra_vsc_set_nmea(serial->dev, 1);
- /* Now setup per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
- if (!portdata) {
- dev_dbg(&port->dev, "%s: kmalloc for "
- "sierra_port_private (%d) failed!\n",
- __func__, i);
- return -ENOMEM;
- }
- spin_lock_init(&portdata->lock);
- init_usb_anchor(&portdata->active);
- init_usb_anchor(&portdata->delayed);
- ifnum = i;
- /* Assume low memory requirements */
- portdata->num_out_urbs = N_OUT_URB;
- portdata->num_in_urbs = N_IN_URB;
-
- /* Determine actual memory requirements */
- if (serial->num_ports == 1) {
- /* Get interface number for composite device */
- ifnum = sierra_calc_interface(serial);
- himemoryp =
- (struct sierra_iface_info *)&typeB_interface_list;
- if (is_himemory(ifnum, himemoryp)) {
- portdata->num_out_urbs = N_OUT_URB_HM;
- portdata->num_in_urbs = N_IN_URB_HM;
- }
- }
- else {
- himemoryp =
- (struct sierra_iface_info *)&typeA_interface_list;
- if (is_himemory(i, himemoryp)) {
- portdata->num_out_urbs = N_OUT_URB_HM;
- portdata->num_in_urbs = N_IN_URB_HM;
- }
- }
- dev_dbg(&serial->dev->dev,
- "Memory usage (urbs) interface #%d, in=%d, out=%d\n",
- ifnum,portdata->num_in_urbs, portdata->num_out_urbs );
- /* Set the port private data pointer */
- usb_set_serial_port_data(port, portdata);
- }
-
return 0;
}
static void sierra_release(struct usb_serial *serial)
{
- int i;
- struct usb_serial_port *port;
+ struct sierra_intf_private *intfdata;
+
+ intfdata = usb_get_serial_data(serial);
+ kfree(intfdata);
+}
+
+static int sierra_port_probe(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
+ const struct sierra_iface_info *himemoryp;
+ u8 ifnum;
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- if (!port)
- continue;
- portdata = usb_get_serial_port_data(port);
- if (!portdata)
- continue;
- kfree(portdata);
+ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
+ if (!portdata)
+ return -ENOMEM;
+
+ spin_lock_init(&portdata->lock);
+ init_usb_anchor(&portdata->active);
+ init_usb_anchor(&portdata->delayed);
+
+ /* Assume low memory requirements */
+ portdata->num_out_urbs = N_OUT_URB;
+ portdata->num_in_urbs = N_IN_URB;
+
+ /* Determine actual memory requirements */
+ if (serial->num_ports == 1) {
+ /* Get interface number for composite device */
+ ifnum = sierra_calc_interface(serial);
+ himemoryp = &typeB_interface_list;
+ } else {
+ /* This is really the usb-serial port number of the interface
+ * rather than the interface number.
+ */
+ ifnum = port->number - serial->minor;
+ himemoryp = &typeA_interface_list;
}
+
+ if (is_himemory(ifnum, himemoryp)) {
+ portdata->num_out_urbs = N_OUT_URB_HM;
+ portdata->num_in_urbs = N_IN_URB_HM;
+ }
+
+ dev_dbg(&port->dev,
+ "Memory usage (urbs) interface #%d, in=%d, out=%d\n",
+ ifnum, portdata->num_in_urbs, portdata->num_out_urbs);
+
+ usb_set_serial_port_data(port, portdata);
+
+ return 0;
+}
+
+static int sierra_port_remove(struct usb_serial_port *port)
+{
+ struct sierra_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+ kfree(portdata);
+
+ return 0;
}
#ifdef CONFIG_PM
.tiocmset = sierra_tiocmset,
.attach = sierra_startup,
.release = sierra_release,
+ .port_probe = sierra_port_probe,
+ .port_remove = sierra_port_remove,
.suspend = sierra_suspend,
.resume = sierra_resume,
.read_int_callback = sierra_instat_callback,
extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
extern void usb_wwan_close(struct usb_serial_port *port);
-extern int usb_wwan_startup(struct usb_serial *serial);
+extern int usb_wwan_port_probe(struct usb_serial_port *port);
extern int usb_wwan_port_remove(struct usb_serial_port *port);
extern int usb_wwan_write_room(struct tty_struct *tty);
extern void usb_wwan_set_termios(struct tty_struct *tty,
EXPORT_SYMBOL(usb_wwan_close);
/* Helper functions used by usb_wwan_setup_urbs */
-static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint,
+static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
+ int endpoint,
int dir, void *ctx, char *buf, int len,
void (*callback) (struct urb *))
{
+ struct usb_serial *serial = port->serial;
struct urb *urb;
- if (endpoint == -1)
- return NULL; /* endpoint not needed */
-
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
if (urb == NULL) {
dev_dbg(&serial->interface->dev,
return urb;
}
-/* Setup urbs */
-static void usb_wwan_setup_urbs(struct usb_serial *serial)
+int usb_wwan_port_probe(struct usb_serial_port *port)
{
- int i, j;
- struct usb_serial_port *port;
struct usb_wwan_port_private *portdata;
+ struct urb *urb;
+ u8 *buffer;
+ int err;
+ int i;
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
+ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
+ if (!portdata)
+ return -ENOMEM;
- /* Do indat endpoints first */
- for (j = 0; j < N_IN_URB; ++j) {
- portdata->in_urbs[j] = usb_wwan_setup_urb(serial,
- port->
- bulk_in_endpointAddress,
- USB_DIR_IN,
- port,
- portdata->
- in_buffer[j],
- IN_BUFLEN,
- usb_wwan_indat_callback);
- }
+ init_usb_anchor(&portdata->delayed);
- /* outdat endpoints */
- for (j = 0; j < N_OUT_URB; ++j) {
- portdata->out_urbs[j] = usb_wwan_setup_urb(serial,
- port->
- bulk_out_endpointAddress,
- USB_DIR_OUT,
- port,
- portdata->
- out_buffer
- [j],
- OUT_BUFLEN,
- usb_wwan_outdat_callback);
- }
- }
-}
+ for (i = 0; i < N_IN_URB; i++) {
+ if (!port->bulk_in_size)
+ break;
-int usb_wwan_startup(struct usb_serial *serial)
-{
- int i, j, err;
- struct usb_serial_port *port;
- struct usb_wwan_port_private *portdata;
- u8 *buffer;
+ buffer = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error;
+ portdata->in_buffer[i] = buffer;
- /* Now setup per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
- if (!portdata) {
- dev_dbg(&port->dev, "%s: kmalloc for usb_wwan_port_private (%d) failed!.\n",
- __func__, i);
- return 1;
- }
- init_usb_anchor(&portdata->delayed);
+ urb = usb_wwan_setup_urb(port, port->bulk_in_endpointAddress,
+ USB_DIR_IN, port,
+ buffer, IN_BUFLEN,
+ usb_wwan_indat_callback);
+ portdata->in_urbs[i] = urb;
+ }
- for (j = 0; j < N_IN_URB; j++) {
- buffer = (u8 *) __get_free_page(GFP_KERNEL);
- if (!buffer)
- goto bail_out_error;
- portdata->in_buffer[j] = buffer;
- }
+ for (i = 0; i < N_OUT_URB; i++) {
+ if (!port->bulk_out_size)
+ break;
- for (j = 0; j < N_OUT_URB; j++) {
- buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
- if (!buffer)
- goto bail_out_error2;
- portdata->out_buffer[j] = buffer;
- }
+ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error2;
+ portdata->out_buffer[i] = buffer;
- usb_set_serial_port_data(port, portdata);
+ urb = usb_wwan_setup_urb(port, port->bulk_out_endpointAddress,
+ USB_DIR_OUT, port,
+ buffer, OUT_BUFLEN,
+ usb_wwan_outdat_callback);
+ portdata->out_urbs[i] = urb;
+ }
- if (!port->interrupt_in_urb)
- continue;
+ usb_set_serial_port_data(port, portdata);
+
+ if (port->interrupt_in_urb) {
err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (err)
dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
__func__, err);
}
- usb_wwan_setup_urbs(serial);
+
return 0;
bail_out_error2:
- for (j = 0; j < N_OUT_URB; j++)
- kfree(portdata->out_buffer[j]);
+ for (i = 0; i < N_OUT_URB; i++) {
+ usb_free_urb(portdata->out_urbs[i]);
+ kfree(portdata->out_buffer[i]);
+ }
bail_out_error:
- for (j = 0; j < N_IN_URB; j++)
- if (portdata->in_buffer[j])
- free_page((unsigned long)portdata->in_buffer[j]);
+ for (i = 0; i < N_IN_URB; i++) {
+ usb_free_urb(portdata->in_urbs[i]);
+ free_page((unsigned long)portdata->in_buffer[i]);
+ }
kfree(portdata);
- return 1;
+
+ return -ENOMEM;
}
-EXPORT_SYMBOL(usb_wwan_startup);
+EXPORT_SYMBOL_GPL(usb_wwan_port_probe);
int usb_wwan_port_remove(struct usb_serial_port *port)
{
/* function prototypes for the Connect Tech WhiteHEAT serial converter */
static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_release(struct usb_serial *serial);
+static int whiteheat_port_probe(struct usb_serial_port *port);
+static int whiteheat_port_remove(struct usb_serial_port *port);
static int whiteheat_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void whiteheat_close(struct usb_serial_port *port);
.num_ports = 4,
.attach = whiteheat_attach,
.release = whiteheat_release,
+ .port_probe = whiteheat_port_probe,
+ .port_remove = whiteheat_port_remove,
.open = whiteheat_open,
.close = whiteheat_close,
.ioctl = whiteheat_ioctl,
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
- struct usb_serial_port *port;
- struct whiteheat_private *info;
struct whiteheat_hw_info *hw_info;
int pipe;
int ret;
int alen;
__u8 *command;
__u8 *result;
- int i;
command_port = serial->port[COMMAND_PORT];
serial->type->description,
hw_info->sw_major_rev, hw_info->sw_minor_rev);
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
-
- info = kmalloc(sizeof(struct whiteheat_private), GFP_KERNEL);
- if (info == NULL) {
- dev_err(&port->dev,
- "%s: Out of memory for port structures\n",
- serial->type->description);
- goto no_private;
- }
-
- info->mcr = 0;
-
- usb_set_serial_port_data(port, info);
- }
-
command_info = kmalloc(sizeof(struct whiteheat_command_private),
GFP_KERNEL);
if (command_info == NULL) {
"%s: please contact support@connecttech.com\n",
serial->type->description);
kfree(result);
+ kfree(command);
return -ENODEV;
no_command_private:
- for (i = serial->num_ports - 1; i >= 0; i--) {
- port = serial->port[i];
- info = usb_get_serial_port_data(port);
- kfree(info);
-no_private:
- ;
- }
kfree(result);
no_result_buffer:
kfree(command);
return -ENOMEM;
}
-
static void whiteheat_release(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
- struct whiteheat_private *info;
- int i;
/* free up our private data for our command port */
command_port = serial->port[COMMAND_PORT];
kfree(usb_get_serial_port_data(command_port));
+}
- for (i = 0; i < serial->num_ports; i++) {
- info = usb_get_serial_port_data(serial->port[i]);
- kfree(info);
- }
+static int whiteheat_port_probe(struct usb_serial_port *port)
+{
+ struct whiteheat_private *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ usb_set_serial_port_data(port, info);
+
+ return 0;
+}
+
+static int whiteheat_port_remove(struct usb_serial_port *port)
+{
+ struct whiteheat_private *info;
+
+ info = usb_get_serial_port_data(port);
+ kfree(info);
+
+ return 0;
}
static int whiteheat_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Some devices don't handle VPD pages correctly */
sdev->skip_vpd_pages = 1;
+ /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */
+ sdev->no_report_opcodes = 1;
+
+ /* Do not attempt to use WRITE SAME */
+ sdev->no_write_same = 1;
+
/* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
* If this device makes that mistake, tell the sd driver. */
USB_SC_8070, USB_PR_CB, NULL,
US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ),
+/* Submitted by Oleksandr Chumachenko <ledest@gmail.com> */
+UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100,
+ "Casio",
+ "EX-N1 DigitalCamera",
+ USB_SC_8070, USB_PR_DEVICE, NULL, 0),
+
/* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/
UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001,
"Samsung",
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
- int err, headcount, mergeable;
+ int err, mergeable;
+ s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
/* TODO: check that we are running from vhost_worker? */
}
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
- _iov->iov_len = min((u64)len, size);
+ _iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
The LTV350QV panel is present on all ATSTK1000 boards.
config LCD_ILI9320
- tristate
+ tristate "ILI Technology ILI9320 controller support"
+ depends on SPI
help
If you have a panel based on the ILI9320 controller chip
then say y to include a power driver for it.
struct omap_dss_output *out;
enum omap_dss_output_id id;
- id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+ switch (module) {
+ case 0:
+ id = OMAP_DSS_OUTPUT_DSI1;
+ break;
+ case 1:
+ id = OMAP_DSS_OUTPUT_DSI2;
+ break;
+ default:
+ return NULL;
+ }
out = omap_dss_get_output(id);
- return out->pdev;
+ return out ? out->pdev : NULL;
}
static inline void dsi_write_reg(struct platform_device *dsidev,
dss.dss_clk = clk;
- clk = clk_get(NULL, dss.feat->clk_name);
- if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->clk_name);
- r = PTR_ERR(clk);
- goto err;
+ if (dss.feat->clk_name) {
+ clk = clk_get(NULL, dss.feat->clk_name);
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get %s\n", dss.feat->clk_name);
+ r = PTR_ERR(clk);
+ goto err;
+ }
+ } else {
+ clk = NULL;
}
dss.dpll4_m4_ck = clk;
if (cpu_is_omap24xx())
src = &omap24xx_dss_feats;
- else if (cpu_is_omap34xx())
- src = &omap34xx_dss_feats;
else if (cpu_is_omap3630())
src = &omap3630_dss_feats;
+ else if (cpu_is_omap34xx())
+ src = &omap34xx_dss_feats;
else if (cpu_is_omap44xx())
src = &omap44xx_dss_feats;
else if (soc_is_omap54xx())
{
mutex_lock(&hdmi.lock);
- if (hdmi_runtime_get())
+ if (hdmi_runtime_get()) {
+ mutex_unlock(&hdmi.lock);
return;
+ }
hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
- if (!display && !display->output && !display->output->manager) {
+ if (!display || !display->output || !display->output->manager) {
r = -EINVAL;
break;
}
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
info->feature_resize = val;
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
void unregister_virtio_device(struct virtio_device *dev)
{
+ int index = dev->index; /* save for after device release */
+
device_unregister(&dev->dev);
- ida_simple_remove(&virtio_index_ida, dev->index);
+ ida_simple_remove(&virtio_index_ida, index);
}
EXPORT_SYMBOL_GPL(unregister_virtio_device);
config XEN_BALLOON
bool "Xen memory balloon driver"
+ depends on !ARM
default y
help
The balloon driver allows the Xen domain to request more memory from
config XEN_TMEM
bool
+ depends on !ARM
default y if (CLEANCACHE || FRONTSWAP)
help
Shim to interface in-kernel Transcendent Memory hooks
obj-y += manage.o balloon.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
+obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o events.o
obj-y += xenbus/
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
-#include <asm/e820.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
-static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
+static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_HIGHMEM
#define inc_totalhigh_pages() (totalhigh_pages++)
static int xen_dbgp_op(struct usb_hcd *hcd, int op)
{
+#ifdef CONFIG_PCI
const struct device *ctrlr = hcd_to_bus(hcd)->controller;
+#endif
struct physdev_dbgp_op dbgp;
if (!xen_initial_domain())
#define PIRQ_SHAREABLE (1 << 1)
static int *evtchn_to_irq;
+#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
+#endif
static bool (*pirq_needs_eoi)(unsigned irq);
static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
return ret;
}
+#ifdef CONFIG_X86
static bool pirq_check_eoi_map(unsigned irq)
{
return test_bit(pirq_from_irq(irq), pirq_eoi_map);
}
+#endif
static bool pirq_needs_eoi_flag(unsigned irq)
{
{
struct pt_regs *old_regs = set_irq_regs(regs);
+ irq_enter();
#ifdef CONFIG_X86
exit_idle();
#endif
- irq_enter();
__xen_evtchn_do_upcall();
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <asm/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+int xen_event_channel_op_compat(int cmd, void *arg)
+{
+ struct evtchn_op op;
+ int rc;
+
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+
+ switch (cmd) {
+ case EVTCHNOP_close:
+ case EVTCHNOP_send:
+ case EVTCHNOP_bind_vcpu:
+ case EVTCHNOP_unmask:
+ /* no output */
+ break;
+
+#define COPY_BACK(eop) \
+ case EVTCHNOP_##eop: \
+ memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \
+ break
+
+ COPY_BACK(bind_interdomain);
+ COPY_BACK(bind_virq);
+ COPY_BACK(bind_pirq);
+ COPY_BACK(status);
+ COPY_BACK(alloc_unbound);
+ COPY_BACK(bind_ipi);
+#undef COPY_BACK
+
+ default:
+ WARN_ON(rc != -ENOSYS);
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
+
+int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
+{
+ struct physdev_op op;
+ int rc;
+
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+
+ switch (cmd) {
+ case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
+ case PHYSDEVOP_set_iopl:
+ case PHYSDEVOP_set_iobitmap:
+ case PHYSDEVOP_apic_write:
+ /* no output */
+ break;
+
+#define COPY_BACK(pop, fld) \
+ case PHYSDEVOP_##pop: \
+ memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \
+ break
+
+ COPY_BACK(irq_status_query, irq_status_query);
+ COPY_BACK(apic_read, apic_op);
+ COPY_BACK(ASSIGN_VECTOR, irq_op);
+#undef COPY_BACK
+
+ default:
+ WARN_ON(rc != -ENOSYS);
+ break;
+ }
+
+ return rc;
+}
#endif
}
+static void gntdev_free_map(struct grant_map *map)
+{
+ if (map == NULL)
+ return;
+
+ if (map->pages)
+ free_xenballooned_pages(map->count, map->pages);
+ kfree(map->pages);
+ kfree(map->grants);
+ kfree(map->map_ops);
+ kfree(map->unmap_ops);
+ kfree(map->kmap_ops);
+ kfree(map);
+}
+
static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
{
struct grant_map *add;
return add;
err:
- kfree(add->pages);
- kfree(add->grants);
- kfree(add->map_ops);
- kfree(add->unmap_ops);
- kfree(add->kmap_ops);
- kfree(add);
+ gntdev_free_map(add);
return NULL;
}
evtchn_put(map->notify.event);
}
- if (map->pages) {
- if (!use_ptemod)
- unmap_grant_pages(map, 0, map->count);
-
- free_xenballooned_pages(map->count, map->pages);
- }
- kfree(map->pages);
- kfree(map->grants);
- kfree(map->map_ops);
- kfree(map->unmap_ops);
- kfree(map);
+ if (map->pages && !use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
+ gntdev_free_map(map);
}
/* ------------------------------------------------------------------ */
* nr_gframes is the number of frames to map grant table. Returning
* GNTST_okay means success and negative value means failure.
*/
- int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
+ int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
/*
* Release a list of frames which are mapped in map_frames for grant
* entry status.
return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
}
-static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
+static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
}
-static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
+static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
{
uint64_t *sframes;
unsigned int nr_sframes;
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
- unsigned long *frames;
+ xen_pfn_t *frames;
unsigned int nr_gframes = end_idx + 1;
int rc;
down_write(&mm->mmap_sem);
vma = find_vma(mm, m.addr);
- ret = -EINVAL;
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
(m.addr != vma->vm_start) ||
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
!privcmd_enforce_singleshot_mapping(vma)) {
up_write(&mm->mmap_sem);
+ ret = -EINVAL;
goto out;
}
up_write(&mm->mmap_sem);
- if (state.global_error && (version == 1)) {
- /* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
- state.err = err_array;
- ret = traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_return_errors_v1, &state);
+ if (version == 1) {
+ if (state.global_error) {
+ /* Write back errors in second pass. */
+ state.user_mfn = (xen_pfn_t *)m.arr;
+ state.err = err_array;
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_return_errors_v1, &state);
+ } else
+ ret = 0;
+
} else if (version == 2) {
ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
if (ret)
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kobject.h>
+#include <linux/err.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
parms);
if (!ret)
- ret = sprintf(buffer, "%lx\n", parms->virt_start);
+ ret = sprintf(buffer, "%"PRI_xen_ulong"\n",
+ parms->virt_start);
kfree(parms);
}
mutex_lock(&vpci_dev->lock);
- /* Keep multi-function devices together on the virtual PCI bus */
- for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
- if (!list_empty(&vpci_dev->dev_list[slot])) {
+ /*
+ * Keep multi-function devices together on the virtual PCI bus, except
+ * virtual functions.
+ */
+ if (!dev->is_virtfn) {
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ if (list_empty(&vpci_dev->dev_list[slot]))
+ continue;
+
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
pci_name(dev), slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = PCI_FUNC(dev->devfn);
+ func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock;
}
}
goto out;
/* Can't write a xenbus message larger we can buffer */
- if ((len + u->len) > sizeof(u->u.buffer)) {
+ if (len > sizeof(u->u.buffer) - u->len) {
/* On error, dump existing buffer */
u->len = 0;
rc = -EINVAL;
*/
static bool xen_strict_xenbus_quirk(void)
{
+#ifdef CONFIG_X86
uint32_t eax, ebx, ecx, edx, base;
base = xen_cpuid_base();
if ((eax >> 16) < 4)
return true;
+#endif
return false;
}
unsigned int sz = sizeof(struct bio) + extra_size;
struct kmem_cache *slab = NULL;
struct bio_slab *bslab, *new_bio_slabs;
+ unsigned int new_bio_slab_max;
unsigned int i, entry = -1;
mutex_lock(&bio_slab_lock);
goto out_unlock;
if (bio_slab_nr == bio_slab_max && entry == -1) {
- bio_slab_max <<= 1;
+ new_bio_slab_max = bio_slab_max << 1;
new_bio_slabs = krealloc(bio_slabs,
- bio_slab_max * sizeof(struct bio_slab),
+ new_bio_slab_max * sizeof(struct bio_slab),
GFP_KERNEL);
if (!new_bio_slabs)
goto out_unlock;
+ bio_slab_max = new_bio_slab_max;
bio_slabs = new_bio_slabs;
}
if (entry == -1)
return ret;
}
+static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_splice_read(file, ppos, pipe, len, flags);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe,
+ struct file *file, loff_t *ppos, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_splice_write(pipe, file, ppos, len, flags);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+
/*
* Try to release a page associated with block device when the system
* is under memory pressure.
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_blkdev_ioctl,
#endif
- .splice_read = generic_file_splice_read,
- .splice_write = generic_file_splice_write,
+ .splice_read = blkdev_splice_read,
+ .splice_write = blkdev_splice_write,
};
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
goto out;
}
- rcu_read_lock();
- root_level = btrfs_header_level(root->node);
- rcu_read_unlock();
+ root_level = btrfs_old_root_level(root, time_seq);
if (root_level + 1 == level)
goto out;
return ret;
}
-static char *ref_to_path(struct btrfs_root *fs_root,
- struct btrfs_path *path,
- u32 name_len, unsigned long name_off,
- struct extent_buffer *eb_in, u64 parent,
- char *dest, u32 size)
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size)
{
int slot;
u64 next_inum;
int ret;
- s64 bytes_left = size - 1;
+ s64 bytes_left = ((s64)size) - 1;
struct extent_buffer *eb = eb_in;
struct btrfs_key found_key;
int leave_spinning = path->leave_spinning;
struct extent_buffer *eb_in, u64 parent,
char *dest, u32 size)
{
- return ref_to_path(fs_root, path,
- btrfs_inode_ref_name_len(eb_in, iref),
- (unsigned long)(iref + 1),
- eb_in, parent, dest, size);
+ return btrfs_ref_to_path(fs_root, path,
+ btrfs_inode_ref_name_len(eb_in, iref),
+ (unsigned long)(iref + 1),
+ eb_in, parent, dest, size);
}
/*
ipath->fspath->bytes_left - s_ptr : 0;
fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
- fspath = ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
- name_off, eb, inum, fspath_min,
- bytes_left);
+ fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
+ name_off, eb, inum, fspath_min, bytes_left);
if (IS_ERR(fspath))
return PTR_ERR(fspath);
char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
struct btrfs_inode_ref *iref, struct extent_buffer *eb,
u64 parent, char *dest, u32 size);
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size);
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
if (tree_mod_dont_log(fs_info, eb))
return 0;
+ /*
+ * When we override something during the move, we log these removals.
+ * This can only happen when we move towards the beginning of the
+ * buffer, i.e. dst_slot < src_slot.
+ */
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
MOD_LOG_KEY_REMOVE_WHILE_MOVING);
if (tree_mod_dont_log(fs_info, NULL))
return 0;
- __tree_mod_log_free_eb(fs_info, old_root);
-
ret = tree_mod_alloc(fs_info, flags, &tm);
if (ret < 0)
goto out;
ret = btrfs_dec_ref(trans, root, buf, 1, 1);
BUG_ON(ret); /* -ENOMEM */
}
- /*
- * don't log freeing in case we're freeing the root node, this
- * is done by tree_mod_log_set_root_pointer later
- */
- if (buf != root->node && btrfs_header_level(buf) != 0)
- tree_mod_log_free_eb(root->fs_info, buf);
+ tree_mod_log_free_eb(root->fs_info, buf);
clean_tree_block(trans, root, buf);
*last_ref = 1;
}
free_extent_buffer(eb);
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
+ WARN_ON(btrfs_header_nritems(eb_rewin) >
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
return eb_rewin;
}
{
struct tree_mod_elem *tm;
struct extent_buffer *eb;
+ struct extent_buffer *old;
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
u64 logical;
+ u32 blocksize;
eb = btrfs_read_lock_root_node(root);
tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
}
tm = tree_mod_log_search(root->fs_info, logical, time_seq);
- if (old_root)
+ if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+ btrfs_tree_read_unlock(root->node);
+ free_extent_buffer(root->node);
+ blocksize = btrfs_level_size(root, old_root->level);
+ old = read_tree_block(root, logical, blocksize, 0);
+ if (!old) {
+ pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
+ logical);
+ WARN_ON(1);
+ } else {
+ eb = btrfs_clone_extent_buffer(old);
+ free_extent_buffer(old);
+ }
+ } else if (old_root) {
+ btrfs_tree_read_unlock(root->node);
+ free_extent_buffer(root->node);
eb = alloc_dummy_extent_buffer(logical, root->nodesize);
- else
+ } else {
eb = btrfs_clone_extent_buffer(root->node);
- btrfs_tree_read_unlock(root->node);
- free_extent_buffer(root->node);
+ btrfs_tree_read_unlock(root->node);
+ free_extent_buffer(root->node);
+ }
+
if (!eb)
return NULL;
+ extent_buffer_get(eb);
btrfs_tree_read_lock(eb);
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
__tree_mod_log_rewind(eb, time_seq, tm);
else
WARN_ON(btrfs_header_level(eb) != 0);
- extent_buffer_get(eb);
+ WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
return eb;
}
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
+{
+ struct tree_mod_elem *tm;
+ int level;
+
+ tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+ if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
+ level = tm->old_root.level;
+ } else {
+ rcu_read_lock();
+ level = btrfs_header_level(root->node);
+ rcu_read_unlock();
+ }
+
+ return level;
+}
+
static inline int should_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
goto enospc;
}
+ tree_mod_log_free_eb(root->fs_info, root->node);
tree_mod_log_set_root_pointer(root, child);
rcu_assign_pointer(root->node, child);
push_items * sizeof(struct btrfs_key_ptr));
if (push_items < src_nritems) {
- tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
- src_nritems - push_items);
+ /*
+ * don't call tree_mod_log_eb_move here, key removal was already
+ * fully logged by tree_mod_log_eb_copy above.
+ */
memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
btrfs_node_key_ptr_offset(push_items),
(src_nritems - push_items) *
{
return atomic_inc_return(&fs_info->tree_mod_seq);
}
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
+int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
return eb;
err:
- for (i--; i >= 0; i--)
- __free_page(eb->pages[i]);
+ for (; i > 0; i--)
+ __free_page(eb->pages[i - 1]);
__free_extent_buffer(eb);
return NULL;
}
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
return btrfs_update_inode_item(trans, root, inode);
}
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode)
+noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode)
{
int ret;
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
- if (range.start > total_bytes)
+ if (range.start > total_bytes ||
+ range.len < fs_info->sb->s_blocksize)
return -EINVAL;
range.len = min(range.len, total_bytes - range.start);
ret = btrfs_commit_transaction(trans,
root->fs_info->extent_root);
}
- BUG_ON(ret);
+ if (ret)
+ goto fail;
ret = pending_snapshot->error;
if (ret)
}
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ ret = -ENOMEM;
+ goto out_free_root;
+ }
key.objectid = 0;
key.type = BTRFS_QGROUP_STATUS_KEY;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*ptr));
if (ret)
- goto out;
+ goto out_free_path;
leaf = path->nodes[0];
ptr = btrfs_item_ptr(leaf, path->slots[0],
fs_info->quota_root = quota_root;
fs_info->pending_quota_state = 1;
spin_unlock(&fs_info->qgroup_lock);
-out:
+out_free_path:
btrfs_free_path(path);
+out_free_root:
+ if (ret) {
+ free_extent_buffer(quota_root->node);
+ free_extent_buffer(quota_root->commit_root);
+ kfree(quota_root);
+ }
+out:
return ret;
}
void *ctx);
/*
- * Helper function to iterate the entries in ONE btrfs_inode_ref.
+ * Helper function to iterate the entries in ONE btrfs_inode_ref or
+ * btrfs_inode_extref.
* The iterate callback may return a non zero value to stop iteration. This can
* be a negative value for error codes or 1 to simply stop it.
*
- * path must point to the INODE_REF when called.
+ * path must point to the INODE_REF or INODE_EXTREF when called.
*/
static int iterate_inode_ref(struct send_ctx *sctx,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *found_key, int resolve,
iterate_inode_ref_t iterate, void *ctx)
{
- struct extent_buffer *eb;
+ struct extent_buffer *eb = path->nodes[0];
struct btrfs_item *item;
struct btrfs_inode_ref *iref;
+ struct btrfs_inode_extref *extref;
struct btrfs_path *tmp_path;
struct fs_path *p;
- u32 cur;
- u32 len;
+ u32 cur = 0;
u32 total;
- int slot;
+ int slot = path->slots[0];
u32 name_len;
char *start;
int ret = 0;
- int num;
+ int num = 0;
int index;
+ u64 dir;
+ unsigned long name_off;
+ unsigned long elem_size;
+ unsigned long ptr;
p = fs_path_alloc_reversed(sctx);
if (!p)
return -ENOMEM;
}
- eb = path->nodes[0];
- slot = path->slots[0];
- item = btrfs_item_nr(eb, slot);
- iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
- cur = 0;
- len = 0;
- total = btrfs_item_size(eb, item);
- num = 0;
+ if (found_key->type == BTRFS_INODE_REF_KEY) {
+ ptr = (unsigned long)btrfs_item_ptr(eb, slot,
+ struct btrfs_inode_ref);
+ item = btrfs_item_nr(eb, slot);
+ total = btrfs_item_size(eb, item);
+ elem_size = sizeof(*iref);
+ } else {
+ ptr = btrfs_item_ptr_offset(eb, slot);
+ total = btrfs_item_size_nr(eb, slot);
+ elem_size = sizeof(*extref);
+ }
+
while (cur < total) {
fs_path_reset(p);
- name_len = btrfs_inode_ref_name_len(eb, iref);
- index = btrfs_inode_ref_index(eb, iref);
+ if (found_key->type == BTRFS_INODE_REF_KEY) {
+ iref = (struct btrfs_inode_ref *)(ptr + cur);
+ name_len = btrfs_inode_ref_name_len(eb, iref);
+ name_off = (unsigned long)(iref + 1);
+ index = btrfs_inode_ref_index(eb, iref);
+ dir = found_key->offset;
+ } else {
+ extref = (struct btrfs_inode_extref *)(ptr + cur);
+ name_len = btrfs_inode_extref_name_len(eb, extref);
+ name_off = (unsigned long)&extref->name;
+ index = btrfs_inode_extref_index(eb, extref);
+ dir = btrfs_inode_extref_parent(eb, extref);
+ }
+
if (resolve) {
- start = btrfs_iref_to_path(root, tmp_path, iref, eb,
- found_key->offset, p->buf,
- p->buf_len);
+ start = btrfs_ref_to_path(root, tmp_path, name_len,
+ name_off, eb, dir,
+ p->buf, p->buf_len);
if (IS_ERR(start)) {
ret = PTR_ERR(start);
goto out;
p->buf_len + p->buf - start);
if (ret < 0)
goto out;
- start = btrfs_iref_to_path(root, tmp_path, iref,
- eb, found_key->offset, p->buf,
- p->buf_len);
+ start = btrfs_ref_to_path(root, tmp_path,
+ name_len, name_off,
+ eb, dir,
+ p->buf, p->buf_len);
if (IS_ERR(start)) {
ret = PTR_ERR(start);
goto out;
}
p->start = start;
} else {
- ret = fs_path_add_from_extent_buffer(p, eb,
- (unsigned long)(iref + 1), name_len);
+ ret = fs_path_add_from_extent_buffer(p, eb, name_off,
+ name_len);
if (ret < 0)
goto out;
}
-
- len = sizeof(*iref) + name_len;
- iref = (struct btrfs_inode_ref *)((char *)iref + len);
- cur += len;
-
- ret = iterate(num, found_key->offset, index, p, ctx);
+ cur += elem_size + name_len;
+ ret = iterate(num, dir, index, p, ctx);
if (ret)
goto out;
-
num++;
}
}
btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
if (found_key.objectid != ino ||
- found_key.type != BTRFS_INODE_REF_KEY) {
+ (found_key.type != BTRFS_INODE_REF_KEY &&
+ found_key.type != BTRFS_INODE_EXTREF_KEY)) {
ret = -ENOENT;
goto out;
}
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
- struct btrfs_inode_ref *iref;
int len;
+ u64 parent_dir;
path = alloc_path_for_send();
if (!path)
if (!ret)
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
- if (ret || found_key.objectid != key.objectid ||
- found_key.type != key.type) {
+ if (ret || found_key.objectid != ino ||
+ (found_key.type != BTRFS_INODE_REF_KEY &&
+ found_key.type != BTRFS_INODE_EXTREF_KEY)) {
ret = -ENOENT;
goto out;
}
- iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_inode_ref);
- len = btrfs_inode_ref_name_len(path->nodes[0], iref);
- ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
- (unsigned long)(iref + 1), len);
+ if (key.type == BTRFS_INODE_REF_KEY) {
+ struct btrfs_inode_ref *iref;
+ iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ len = btrfs_inode_ref_name_len(path->nodes[0], iref);
+ ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+ (unsigned long)(iref + 1),
+ len);
+ parent_dir = found_key.offset;
+ } else {
+ struct btrfs_inode_extref *extref;
+ extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_extref);
+ len = btrfs_inode_extref_name_len(path->nodes[0], extref);
+ ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+ (unsigned long)&extref->name, len);
+ parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
+ }
if (ret < 0)
goto out;
btrfs_release_path(path);
- ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
+ ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
NULL, NULL);
if (ret < 0)
goto out;
- *dir = found_key.offset;
+ *dir = parent_dir;
out:
btrfs_free_path(path);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
} else if (S_ISCHR(mode) || S_ISBLK(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode)) {
- TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
}
ret = send_cmd(sctx);
btrfs_item_key_to_cpu(eb, &found_key, slot);
if (found_key.objectid != key.objectid ||
- found_key.type != key.type)
+ (found_key.type != BTRFS_INODE_REF_KEY &&
+ found_key.type != BTRFS_INODE_EXTREF_KEY))
break;
ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
if (sctx->cur_ino == 0)
goto out;
if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
- sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
+ sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
goto out;
if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
goto out;
if (ret < 0)
goto out;
- if (!S_ISLNK(sctx->cur_inode_mode)) {
- if (!sctx->parent_root || sctx->cur_inode_new) {
+ if (!sctx->parent_root || sctx->cur_inode_new) {
+ need_chown = 1;
+ if (!S_ISLNK(sctx->cur_inode_mode))
need_chmod = 1;
- need_chown = 1;
- } else {
- ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
- NULL, NULL, &right_mode, &right_uid,
- &right_gid, NULL);
- if (ret < 0)
- goto out;
+ } else {
+ ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
+ NULL, NULL, &right_mode, &right_uid,
+ &right_gid, NULL);
+ if (ret < 0)
+ goto out;
- if (left_uid != right_uid || left_gid != right_gid)
- need_chown = 1;
- if (left_mode != right_mode)
- need_chmod = 1;
- }
+ if (left_uid != right_uid || left_gid != right_gid)
+ need_chown = 1;
+ if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
+ need_chmod = 1;
}
if (S_ISREG(sctx->cur_inode_mode)) {
if (key->type == BTRFS_INODE_ITEM_KEY)
ret = changed_inode(sctx, result);
- else if (key->type == BTRFS_INODE_REF_KEY)
+ else if (key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_INODE_EXTREF_KEY)
ret = changed_ref(sctx, result);
else if (key->type == BTRFS_XATTR_ITEM_KEY)
ret = changed_xattr(sctx, result);
btrfs_i_size_write(parent_inode, parent_inode->i_size +
dentry->d_name.len * 2);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, parent_root, parent_inode);
+ ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, root, ret);
fail:
"Failed to relocate sys chunks after "
"device initialization. This can be fixed "
"using the \"btrfs balance\" command.");
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ if (PTR_ERR(trans) == -ENOENT)
+ return 0;
+ return PTR_ERR(trans);
+ }
+ ret = btrfs_commit_transaction(trans, root);
}
return ret;
*max_len = handle_length;
type = 255;
}
+ if (dentry)
+ dput(dentry);
return type;
}
*/
int cdev_add(struct cdev *p, dev_t dev, unsigned count)
{
+ int error;
+
p->dev = dev;
p->count = count;
- return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
+
+ error = kobj_map(cdev_map, dev, count, NULL,
+ exact_match, exact_lock, p);
+ if (error)
+ return error;
+
+ kobject_get(p->kobj.parent);
+
+ return 0;
}
static void cdev_unmap(dev_t dev, unsigned count)
static void cdev_default_release(struct kobject *kobj)
{
struct cdev *p = container_of(kobj, struct cdev, kobj);
+ struct kobject *parent = kobj->parent;
+
cdev_purge(p);
+ kobject_put(parent);
}
static void cdev_dynamic_release(struct kobject *kobj)
{
struct cdev *p = container_of(kobj, struct cdev, kobj);
+ struct kobject *parent = kobj->parent;
+
cdev_purge(p);
kfree(p);
+ kobject_put(parent);
}
static struct kobj_type ktype_cdev_default = {
}
static void
+cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+ dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
+}
+
+static void
id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
struct cifs_sid_id **psidid, char *typestr)
{
}
}
- memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
+ cifs_copy_sid(&(*psidid)->sid, sidptr);
(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
(*psidid)->refcount = 0;
* any fields of the node after a reference is put .
*/
if (test_bit(SID_ID_MAPPED, &psidid->state)) {
- memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
+ cifs_copy_sid(ssid, &psidid->sid);
psidid->time = jiffies; /* update ts for accessing */
goto id_sid_out;
}
if (IS_ERR(sidkey)) {
rc = -EINVAL;
cFYI(1, "%s: Can't map and id to a SID", __func__);
+ } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
+ rc = -EIO;
+ cFYI(1, "%s: Downcall contained malformed key "
+ "(datalen=%hu)", __func__, sidkey->datalen);
} else {
lsid = (struct cifs_sid *)sidkey->payload.data;
- memcpy(&psidid->sid, lsid,
- sidkey->datalen < sizeof(struct cifs_sid) ?
- sidkey->datalen : sizeof(struct cifs_sid));
- memcpy(ssid, &psidid->sid,
- sidkey->datalen < sizeof(struct cifs_sid) ?
- sidkey->datalen : sizeof(struct cifs_sid));
+ cifs_copy_sid(&psidid->sid, lsid);
+ cifs_copy_sid(ssid, &psidid->sid);
set_bit(SID_ID_MAPPED, &psidid->state);
key_put(sidkey);
kfree(psidid->sidstr);
return rc;
}
if (test_bit(SID_ID_MAPPED, &psidid->state))
- memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
+ cifs_copy_sid(ssid, &psidid->sid);
else
rc = -EINVAL;
}
static void copy_sec_desc(const struct cifs_ntsd *pntsd,
struct cifs_ntsd *pnntsd, __u32 sidsoffset)
{
- int i;
-
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
-
- nowner_sid_ptr->revision = owner_sid_ptr->revision;
- nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
- for (i = 0; i < 6; i++)
- nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
- for (i = 0; i < 5; i++)
- nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
+ cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
/* copy group sid */
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
sizeof(struct cifs_sid));
-
- ngroup_sid_ptr->revision = group_sid_ptr->revision;
- ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
- for (i = 0; i < 6; i++)
- ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
- for (i = 0; i < 5; i++)
- ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
+ cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
return;
}
kfree(nowner_sid_ptr);
return rc;
}
- memcpy(owner_sid_ptr, nowner_sid_ptr,
- sizeof(struct cifs_sid));
+ cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
kfree(nowner_sid_ptr);
*aclflag = CIFS_ACL_OWNER;
}
kfree(ngroup_sid_ptr);
return rc;
}
- memcpy(group_sid_ptr, ngroup_sid_ptr,
- sizeof(struct cifs_sid));
+ cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
kfree(ngroup_sid_ptr);
*aclflag = CIFS_ACL_GROUP;
}
* in network traffic in the other paths.
*/
if (!(oflags & O_CREAT)) {
- struct dentry *res = cifs_lookup(inode, direntry, 0);
+ struct dentry *res;
+
+ /*
+ * Check for hashed negative dentry. We have already revalidated
+ * the dentry and it is fine. No need to perform another lookup.
+ */
+ if (!d_unhashed(direntry))
+ return -ENOENT;
+
+ res = cifs_lookup(inode, direntry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
err = get_user(palp, &up->palette);
err |= get_user(length, &up->length);
+ if (err)
+ return -EFAULT;
up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
err = put_user(compat_ptr(palp), &up_native->palette);
/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
static inline int ep_op_has_event(int op)
{
- return op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD;
+ return op != EPOLL_CTL_DEL;
}
/* Initialize the poll safe wake up structure */
return 0;
}
-/*
- * Disables a "struct epitem" in the eventpoll set. Returns -EBUSY if the item
- * had no event flags set, indicating that another thread may be currently
- * handling that item's events (in the case that EPOLLONESHOT was being
- * used). Otherwise a zero result indicates that the item has been disabled
- * from receiving events. A disabled item may be re-enabled via
- * EPOLL_CTL_MOD. Must be called with "mtx" held.
- */
-static int ep_disable(struct eventpoll *ep, struct epitem *epi)
-{
- int result = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&ep->lock, flags);
- if (epi->event.events & ~EP_PRIVATE_BITS) {
- if (ep_is_linked(&epi->rdllink))
- list_del_init(&epi->rdllink);
- /* Ensure ep_poll_callback will not add epi back onto ready
- list: */
- epi->event.events &= EP_PRIVATE_BITS;
- }
- else
- result = -EBUSY;
- spin_unlock_irqrestore(&ep->lock, flags);
-
- return result;
-}
-
static void ep_free(struct eventpoll *ep)
{
struct rb_node *rbp;
rb_insert_color(&epi->rbn, &ep->rbr);
}
+
+
#define PATH_ARR_SIZE 5
/*
* These are the number paths of length 1 to 5, that we are allowing to emanate
} else
error = -ENOENT;
break;
- case EPOLL_CTL_DISABLE:
- if (epi)
- error = ep_disable(ep, epi);
- else
- error = -ENOENT;
- break;
}
mutex_unlock(&ep->mtx);
bprm->mm = NULL; /* We're using it now */
set_fs(USER_DS);
- current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD);
+ current->flags &=
+ ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE);
flush_thread();
current->personality &= ~bprm->per_clear;
end = start + (range->len >> sb->s_blocksize_bits) - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
- if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) ||
- unlikely(start >= max_blks))
+ if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
+ start >= max_blks ||
+ range->len < sb->s_blocksize)
return -EINVAL;
if (end >= max_blks)
end = max_blks - 1;
ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0);
memset(bh->b_data, 0xff, sb->s_blocksize);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
return;
}
memset(bh->b_data, 0, sb->s_blocksize);
*/
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
}
return;
}
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
- desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
+ desc, bh))) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
return;
int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 hi;
__u32 provided, calculated;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
+ int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh, int sz);
void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
/* balloc.c */
extern void ext4_validate_block_bitmap(struct super_block *sb,
extern int ext4_calculate_overhead(struct super_block *sb);
extern int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es);
-extern void ext4_superblock_csum_set(struct super_block *sb,
- struct ext4_super_block *es);
+extern void ext4_superblock_csum_set(struct super_block *sb);
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
extern void *ext4_kvzalloc(size_t size, gfp_t flags);
extern void ext4_kvfree(void *ptr);
struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0;
+ ext4_superblock_csum_set(sb);
if (ext4_handle_valid(handle)) {
- ext4_superblock_csum_set(sb,
- (struct ext4_super_block *)bh->b_data);
err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
- } else {
- ext4_superblock_csum_set(sb,
- (struct ext4_super_block *)bh->b_data);
+ } else
mark_buffer_dirty(bh);
- }
return err;
}
#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
+
static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
unsigned int ee_len, depth;
int err = 0;
+ BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
+ (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
+
ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split);
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
- err = ext4_ext_zeroout(inode, &orig_ex);
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+ if (split_flag & EXT4_EXT_DATA_VALID1)
+ err = ext4_ext_zeroout(inode, ex2);
+ else
+ err = ext4_ext_zeroout(inode, ex);
+ } else
+ err = ext4_ext_zeroout(inode, &orig_ex);
+
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
uninitialized = ext4_ext_is_uninitialized(ex);
if (map->m_lblk + map->m_len < ee_block + ee_len) {
- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
- EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
EXT4_EXT_MARK_UNINIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+ split_flag1 |= EXT4_EXT_DATA_VALID1;
err = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1);
if (err)
return PTR_ERR(path);
if (map->m_lblk >= ee_block) {
- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
- EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_DATA_VALID2);
if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1;
if (split_flag & EXT4_EXT_MARK_UNINIT2)
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= EXT4_EXT_MARK_UNINIT2;
-
+ if (flags & EXT4_GET_BLOCKS_CONVERT)
+ split_flag |= EXT4_EXT_DATA_VALID2;
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, path, map, split_flag, flags);
}
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path *path)
+ struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
{
struct ext4_extent *ex;
+ ext4_lblk_t ee_block;
+ unsigned int ee_len;
int depth;
int err = 0;
depth = ext_depth(inode);
ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)le32_to_cpu(ex->ee_block),
- ext4_ext_get_actual_len(ex));
+ (unsigned long long)ee_block, ee_len);
+
+ /* If extent is larger than requested then split is required */
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+ err = ext4_split_unwritten_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT);
+ if (err < 0)
+ goto out;
+ ext4_ext_drop_refs(path);
+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
}
/* IO end_io complete, convert the filled extent to written */
if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
- ret = ext4_convert_unwritten_extents_endio(handle, inode,
+ ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
path);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
*/
if (len <= EXT_UNINIT_MAX_LEN << blkbits)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
+
+ /* Prevent race condition between unwritten */
+ ext4_flush_unwritten_io(inode);
retry:
while (ret >= 0 && ret < max_blocks) {
map.m_lblk = map.m_lblk + ret;
"inode=%lu", ino + 1);
continue;
}
+ BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
+ if (err)
+ goto fail;
ext4_lock_group(sb, group);
ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
ext4_unlock_group(sb, group);
goto out;
got:
+ BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
+ if (err)
+ goto fail;
+
/* We may have to initialize the block bitmap if it isn't already */
if (ext4_has_group_desc_csum(sb) &&
gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
ext4_block_bitmap_csum_set(sb, group, gdp,
- block_bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) /
- 8);
+ block_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
goto fail;
}
- BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
- if (err)
- goto fail;
-
BUFFER_TRACE(group_desc_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, group_desc_bh);
if (err)
}
ext4_unlock_group(sb, group);
- BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
- if (err)
- goto fail;
-
BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
if (err)
}
len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
ext4_free_group_clusters_set(sb, gdp, len);
- ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
ext4_free_group_clusters_set(sb, gdp, ret);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
mb_free_blocks(NULL, &e4b, bit, count);
blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
ext4_free_group_clusters_set(sb, desc, blk_free_count);
- ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter,
minlen = EXT4_NUM_B2C(EXT4_SB(sb),
range->minlen >> sb->s_blocksize_bits);
- if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
- unlikely(start >= max_blks))
+ if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
+ start >= max_blks ||
+ range->len < sb->s_blocksize)
return -EINVAL;
if (end >= max_blks)
end = max_blks - 1;
bh = ext4_get_bitmap(sb, group_data->block_bitmap);
if (!bh)
return -EIO;
- ext4_block_bitmap_csum_set(sb, group, gdp, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, group, gdp, bh);
brelse(bh);
return 0;
return es->s_checksum == ext4_superblock_csum(sb, es);
}
-void ext4_superblock_csum_set(struct super_block *sb,
- struct ext4_super_block *es)
+void ext4_superblock_csum_set(struct super_block *sb)
{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return;
sbi->s_log_groups_per_flex = 0;
return 1;
}
- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+ groups_per_flex = 1U << sbi->s_log_groups_per_flex;
err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
if (err)
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
- ext4_superblock_csum_set(sb, es);
+ ext4_superblock_csum_set(sb);
mark_buffer_dirty(sbh);
if (sync) {
error = sync_dirty_buffer(sbh);
struct fdtable *fdt;
/* exec unshares first */
- BUG_ON(atomic_read(&files->count) != 1);
spin_lock(&files->file_lock);
for (i = 0; ; i++) {
unsigned long set;
return __close_fd(files, fd);
if (fd >= rlimit(RLIMIT_NOFILE))
- return -EMFILE;
+ return -EBADF;
spin_lock(&files->file_lock);
err = expand_files(files, fd);
return -EINVAL;
if (newfd >= rlimit(RLIMIT_NOFILE))
- return -EMFILE;
+ return -EBADF;
spin_lock(&files->file_lock);
err = expand_files(files, newfd);
static void inode_sync_complete(struct inode *inode)
{
inode->i_state &= ~I_SYNC;
+ /* If inode is clean an unused, put it into LRU now... */
+ inode_add_lru(inode);
/* Waiters must see I_SYNC cleared before being woken up */
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
struct gfs2_holder i_gh;
int error;
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
- error = gfs2_glock_nq(&i_gh);
- if (error == 0) {
- file_accessed(file);
- gfs2_glock_dq(&i_gh);
- }
- gfs2_holder_uninit(&i_gh);
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
+ &i_gh);
if (error)
return error;
+ /* grab lock to update inode */
+ gfs2_glock_dq_uninit(&i_gh);
+ file_accessed(file);
}
vma->vm_ops = &gfs2_vm_ops;
size_t writesize = iov_length(iov, nr_segs);
struct dentry *dentry = file->f_dentry;
struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
- struct gfs2_sbd *sdp;
int ret;
- sdp = GFS2_SB(file->f_mapping->host);
ret = gfs2_rs_alloc(ip);
if (ret)
return ret;
struct gfs2_meta_header *mh;
struct gfs2_trans *tr;
- lock_buffer(bd->bd_bh);
- gfs2_log_lock(sdp);
tr = current->journal_info;
tr->tr_touched = 1;
if (!list_empty(&bd->bd_list))
- goto out;
+ return;
set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
sdp->sd_log_num_buf++;
list_add(&bd->bd_list, &sdp->sd_log_le_buf);
tr->tr_num_buf_new++;
-out:
- gfs2_log_unlock(sdp);
- unlock_buffer(bd->bd_bh);
}
static void gfs2_check_magic(struct buffer_head *bh)
static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
{
- struct gfs2_log_descriptor *ld;
struct gfs2_meta_header *mh;
unsigned int offset;
struct list_head *head = &sdp->sd_log_le_revoke;
length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
- ld = page_address(page);
offset = sizeof(struct gfs2_log_descriptor);
list_for_each_entry(bd, head, bd_list) {
struct address_space *mapping = bd->bd_bh->b_page->mapping;
struct gfs2_inode *ip = GFS2_I(mapping->host);
- lock_buffer(bd->bd_bh);
- gfs2_log_lock(sdp);
if (tr)
tr->tr_touched = 1;
if (!list_empty(&bd->bd_list))
- goto out;
+ return;
set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
if (gfs2_is_jdata(ip)) {
} else {
list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
}
-out:
- gfs2_log_unlock(sdp);
- unlock_buffer(bd->bd_bh);
}
/**
struct gfs2_quota_data **qd;
int error;
- if (ip->i_res == NULL)
- gfs2_rs_alloc(ip);
+ if (ip->i_res == NULL) {
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ return error;
+ }
qd = ip->i_res->rs_qa_qd;
*/
int gfs2_rs_alloc(struct gfs2_inode *ip)
{
- int error = 0;
struct gfs2_blkreserv *res;
if (ip->i_res)
res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
if (!res)
- error = -ENOMEM;
+ return -ENOMEM;
RB_CLEAR_NODE(&res->rs_node);
else
ip->i_res = res;
up_write(&ip->i_rw_mutex);
- return error;
+ return 0;
}
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
int ret = 0;
u64 amt;
u64 trimmed = 0;
+ u64 start, end, minlen;
unsigned int x;
+ unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
- if (argp == NULL) {
- r.start = 0;
- r.len = ULLONG_MAX;
- r.minlen = 0;
- } else if (copy_from_user(&r, argp, sizeof(r)))
+ if (copy_from_user(&r, argp, sizeof(r)))
return -EFAULT;
ret = gfs2_rindex_update(sdp);
if (ret)
return ret;
- rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
- rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
+ start = r.start >> bs_shift;
+ end = start + (r.len >> bs_shift);
+ minlen = max_t(u64, r.minlen,
+ q->limits.discard_granularity) >> bs_shift;
+
+ rgd = gfs2_blk2rgrpd(sdp, start, 0);
+ rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0);
+
+ if (end <= start ||
+ minlen > sdp->sd_max_rg_data ||
+ start > rgd_end->rd_data0 + rgd_end->rd_data)
+ return -EINVAL;
while (1) {
/* Trim each bitmap in the rgrp */
for (x = 0; x < rgd->rd_length; x++) {
struct gfs2_bitmap *bi = rgd->rd_bits + x;
- ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
+ ret = gfs2_rgrp_send_discards(sdp,
+ rgd->rd_data0, NULL, bi, minlen,
+ &amt);
if (ret) {
gfs2_glock_dq_uninit(&gh);
goto out;
out:
r.len = trimmed << 9;
- if (argp && copy_to_user(argp, &r, sizeof(r)))
+ if (copy_to_user(argp, &r, sizeof(r)))
return -EFAULT;
return ret;
return;
}
need_unlock = 1;
- }
+ } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
+ return;
if (current->journal_info == NULL) {
ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_bufdata *bd;
+ lock_buffer(bh);
+ gfs2_log_lock(sdp);
bd = bh->b_private;
if (bd)
gfs2_assert(sdp, bd->bd_gl == gl);
else {
+ gfs2_log_unlock(sdp);
+ unlock_buffer(bh);
gfs2_attach_bufdata(gl, bh, meta);
bd = bh->b_private;
+ lock_buffer(bh);
+ gfs2_log_lock(sdp);
}
lops_add(sdp, bd);
+ gfs2_log_unlock(sdp);
+ unlock_buffer(bh);
}
void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
spin_unlock(&inode->i_sb->s_inode_lru_lock);
}
+/*
+ * Add inode to LRU if needed (inode is unused and clean).
+ *
+ * Needs inode->i_lock held.
+ */
+void inode_add_lru(struct inode *inode)
+{
+ if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
+ !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
+ inode_lru_list_add(inode);
+}
+
+
static void inode_lru_list_del(struct inode *inode)
{
spin_lock(&inode->i_sb->s_inode_lru_lock);
if (!drop && (sb->s_flags & MS_ACTIVE)) {
inode->i_state |= I_REFERENCED;
- if (!(inode->i_state & (I_DIRTY|I_SYNC)))
- inode_lru_list_add(inode);
+ inode_add_lru(inode);
spin_unlock(&inode->i_lock);
return;
}
* inode.c
*/
extern spinlock_t inode_sb_list_lock;
+extern void inode_add_lru(struct inode *inode);
/*
* fs-writeback.c
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
+ unlock_buffer(bh);
log_wait_commit(journal, tid);
+ lock_buffer(bh);
goto retry;
}
/*
struct page *pg;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_raw_inode ri;
+ uint32_t alloc_len = 0;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
uint32_t pageofs = index << PAGE_CACHE_SHIFT;
int ret = 0;
+ jffs2_dbg(1, "%s()\n", __func__);
+
+ if (pageofs > inode->i_size) {
+ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&f->sem);
pg = grab_cache_page_write_begin(mapping, index, flags);
- if (!pg)
+ if (!pg) {
+ if (alloc_len)
+ jffs2_complete_reservation(c);
+ mutex_unlock(&f->sem);
return -ENOMEM;
+ }
*pagep = pg;
- jffs2_dbg(1, "%s()\n", __func__);
-
- if (pageofs > inode->i_size) {
+ if (alloc_len) {
/* Make new hole frag from old EOF to new page */
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
- uint32_t alloc_len;
jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs);
- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
- if (ret)
- goto out_page;
-
- mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
- mutex_unlock(&f->sem);
goto out_page;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
- mutex_unlock(&f->sem);
goto out_page;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
- mutex_unlock(&f->sem);
}
/*
* case of a short-copy.
*/
if (!PageUptodate(pg)) {
- mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
- mutex_unlock(&f->sem);
if (ret)
goto out_page;
}
+ mutex_unlock(&f->sem);
jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
return ret;
out_page:
unlock_page(pg);
page_cache_release(pg);
+ mutex_unlock(&f->sem);
return ret;
}
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
struct super_block *sb = ipbmap->i_sb;
int agno, agno_end;
- s64 start, end, minlen;
+ u64 start, end, minlen;
u64 trimmed = 0;
/**
* minlen: minimum extent length in Bytes
*/
start = range->start >> sb->s_blocksize_bits;
- if (start < 0)
- start = 0;
end = start + (range->len >> sb->s_blocksize_bits) - 1;
- if (end >= bmp->db_mapsize)
- end = bmp->db_mapsize - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
- if (minlen <= 0)
+ if (minlen == 0)
minlen = 1;
+ if (minlen > bmp->db_agsize ||
+ start >= bmp->db_mapsize ||
+ range->len < sb->s_blocksize)
+ return -EINVAL;
+
+ if (end >= bmp->db_mapsize)
+ end = bmp->db_mapsize - 1;
+
/**
* we trim all ag's within the range
*/
return rpc_create(&args);
}
+static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
+ struct rpc_clnt *clnt)
+{
+ spin_lock(&ln->nsm_clnt_lock);
+ if (ln->nsm_users == 0) {
+ if (clnt == NULL)
+ goto out;
+ ln->nsm_clnt = clnt;
+ }
+ clnt = ln->nsm_clnt;
+ ln->nsm_users++;
+out:
+ spin_unlock(&ln->nsm_clnt_lock);
+ return clnt;
+}
+
static struct rpc_clnt *nsm_client_get(struct net *net)
{
- static DEFINE_MUTEX(nsm_create_mutex);
- struct rpc_clnt *clnt;
+ struct rpc_clnt *clnt, *new;
struct lockd_net *ln = net_generic(net, lockd_net_id);
- spin_lock(&ln->nsm_clnt_lock);
- if (ln->nsm_users) {
- ln->nsm_users++;
- clnt = ln->nsm_clnt;
- spin_unlock(&ln->nsm_clnt_lock);
+ clnt = nsm_client_set(ln, NULL);
+ if (clnt != NULL)
goto out;
- }
- spin_unlock(&ln->nsm_clnt_lock);
- mutex_lock(&nsm_create_mutex);
- clnt = nsm_create(net);
- if (!IS_ERR(clnt)) {
- ln->nsm_clnt = clnt;
- smp_wmb();
- ln->nsm_users = 1;
- }
- mutex_unlock(&nsm_create_mutex);
+ clnt = new = nsm_create(net);
+ if (IS_ERR(clnt))
+ goto out;
+
+ clnt = nsm_client_set(ln, new);
+ if (clnt != new)
+ rpc_shutdown_client(new);
out:
return clnt;
}
static void nsm_client_put(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
- struct rpc_clnt *clnt = ln->nsm_clnt;
- int shutdown = 0;
+ struct rpc_clnt *clnt = NULL;
spin_lock(&ln->nsm_clnt_lock);
- if (ln->nsm_users) {
- if (--ln->nsm_users)
- ln->nsm_clnt = NULL;
- shutdown = !ln->nsm_users;
+ ln->nsm_users--;
+ if (ln->nsm_users == 0) {
+ clnt = ln->nsm_clnt;
+ ln->nsm_clnt = NULL;
}
spin_unlock(&ln->nsm_clnt_lock);
-
- if (shutdown)
+ if (clnt != NULL)
rpc_shutdown_client(clnt);
}
path_put(link);
}
-int sysctl_protected_symlinks __read_mostly = 1;
-int sysctl_protected_hardlinks __read_mostly = 1;
+int sysctl_protected_symlinks __read_mostly = 0;
+int sysctl_protected_hardlinks __read_mostly = 0;
/**
* may_follow_link - Check symlink following for unsafe situations
svc_exit_thread(cb_info->rqst);
cb_info->rqst = NULL;
cb_info->task = NULL;
- return PTR_ERR(cb_info->task);
+ return ret;
}
dprintk("nfs_callback_up: service started\n");
return 0;
{
char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
struct nfs_dns_ent key, *item;
- unsigned long ttl;
+ unsigned int ttl;
ssize_t len;
int ret = -EINVAL;
key.namelen = len;
memset(&key.h, 0, sizeof(key.h));
- ttl = get_expiry(&buf);
+ if (get_uint(&buf, &ttl) < 0)
+ goto out;
if (ttl == 0)
goto out;
key.h.expiry_time = ttl + seconds_since_boot();
if (ctx->cred != NULL)
put_rpccred(ctx->cred);
dput(ctx->dentry);
- nfs_sb_deactive(sb);
+ if (is_sync)
+ nfs_sb_deactive(sb);
+ else
+ nfs_sb_deactive_async(sb);
kfree(ctx->mdsthreshold);
kfree(ctx);
}
extern void __exit unregister_nfs_fs(void);
extern void nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
+extern void nfs_sb_deactive_async(struct super_block *sb);
/* namespace.c */
+#define NFS_PATH_CANONICAL 1
extern char *nfs_path(char **p, struct dentry *dentry,
- char *buffer, ssize_t buflen);
+ char *buffer, ssize_t buflen, unsigned flags);
extern struct vfsmount *nfs_d_automount(struct path *path);
struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *);
char *buffer, ssize_t buflen)
{
char *dummy;
- return nfs_path(&dummy, dentry, buffer, buflen);
+ return nfs_path(&dummy, dentry, buffer, buflen, NFS_PATH_CANONICAL);
}
/*
else
msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
- status = rpc_call_sync(mnt_clnt, &msg, 0);
+ status = rpc_call_sync(mnt_clnt, &msg, RPC_TASK_SOFT|RPC_TASK_TIMEOUT);
rpc_shutdown_client(mnt_clnt);
if (status < 0)
* @dentry - pointer to dentry
* @buffer - result buffer
* @buflen - length of buffer
+ * @flags - options (see below)
*
* Helper function for constructing the server pathname
* by arbitrary hashed dentry.
* This is mainly for use in figuring out the path on the
* server side when automounting on top of an existing partition
* and in generating /proc/mounts and friends.
+ *
+ * Supported flags:
+ * NFS_PATH_CANONICAL: ensure there is exactly one slash after
+ * the original device (export) name
+ * (if unset, the original name is returned verbatim)
*/
-char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen)
+char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen,
+ unsigned flags)
{
char *end;
int namelen;
rcu_read_unlock();
goto rename_retry;
}
- if (*end != '/') {
+ if ((flags & NFS_PATH_CANONICAL) && *end != '/') {
if (--buflen < 0) {
spin_unlock(&dentry->d_lock);
rcu_read_unlock();
return end;
}
namelen = strlen(base);
- /* Strip off excess slashes in base string */
- while (namelen > 0 && base[namelen - 1] == '/')
- namelen--;
+ if (flags & NFS_PATH_CANONICAL) {
+ /* Strip off excess slashes in base string */
+ while (namelen > 0 && base[namelen - 1] == '/')
+ namelen--;
+ }
buflen -= namelen;
if (buflen < 0) {
spin_unlock(&dentry->d_lock);
}
}
+static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
+{
+ if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+ return;
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
+ pnfs_return_layout(inode);
+}
+
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg)
{
- struct inode *inode = lseg->pls_layout->plh_inode;
+ struct pnfs_layout_hdr *lo = lseg->pls_layout;
+ struct inode *inode = lo->plh_inode;
struct nfs_server *mds_server = NFS_SERVER(inode);
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs_client *mds_client = mds_server->nfs_client;
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
nfs4_mark_deviceid_unavailable(devid);
- clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
- _pnfs_return_layout(inode);
+ set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
rpc_wake_up(&tbl->slot_tbl_waitq);
- nfs4_ds_disconnect(clp);
/* fall through */
default:
reset:
static void filelayout_read_release(void *data)
{
struct nfs_read_data *rdata = data;
+ struct pnfs_layout_hdr *lo = rdata->header->lseg->pls_layout;
+ filelayout_fenceme(lo->plh_inode, lo);
nfs_put_client(rdata->ds_clp);
rdata->header->mds_ops->rpc_release(data);
}
static void filelayout_write_release(void *data)
{
struct nfs_write_data *wdata = data;
+ struct pnfs_layout_hdr *lo = wdata->header->lseg->pls_layout;
+ filelayout_fenceme(lo->plh_inode, lo);
nfs_put_client(wdata->ds_clp);
wdata->header->mds_ops->rpc_release(data);
}
goto out_err;
if (fl->num_fh > 0) {
- fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+ fl->fh_array = kcalloc(fl->num_fh, sizeof(fl->fh_array[0]),
gfp_flags);
if (!fl->fh_array)
goto out_err;
extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
filelayout_get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
-void nfs4_ds_disconnect(struct nfs_client *clp);
#endif /* FS_NFS_NFS4FILELAYOUT_H */
}
/*
- * Lookup DS by nfs_client pointer. Zero data server client pointer
- */
-void nfs4_ds_disconnect(struct nfs_client *clp)
-{
- struct nfs4_pnfs_ds *ds;
- struct nfs_client *found = NULL;
-
- dprintk("%s clp %p\n", __func__, clp);
- spin_lock(&nfs4_ds_cache_lock);
- list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
- if (ds->ds_clp && ds->ds_clp == clp) {
- found = ds->ds_clp;
- ds->ds_clp = NULL;
- }
- spin_unlock(&nfs4_ds_cache_lock);
- if (found) {
- set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
- nfs_put_client(clp);
- }
-}
-
-/*
* Create an rpc connection to the nfs4_pnfs_ds data server
* Currently only supports IPv4 and IPv6 addresses
*/
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
+#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
{
char *limit;
- char *path = nfs_path(&limit, dentry, buffer, buflen);
+ char *path = nfs_path(&limit, dentry, buffer, buflen,
+ NFS_PATH_CANONICAL);
if (!IS_ERR(path)) {
char *path_component = nfs_path_component(path, limit);
if (path_component)
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
nfs4_schedule_session_recovery(clp->cl_session, errorcode);
- exception->retry = 1;
- break;
+ goto wait_on_recovery;
#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->o_arg.server,
&data->o_arg.seq_args,
- &data->o_res.seq_res, task))
- return;
- rpc_call_start(task);
+ &data->o_res.seq_res,
+ task) != 0)
+ nfs_release_seqid(data->o_arg.seqid);
+ else
+ rpc_call_start(task);
return;
unlock_no_action:
rcu_read_unlock();
/* even though OPEN succeeded, access is denied. Close the file */
nfs4_close_state(state, fmode);
- return -NFS4ERR_ACCESS;
+ return -EACCES;
}
/*
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
- nfs_sb_deactive(sb);
+ nfs_sb_deactive_async(sb);
kfree(calldata);
}
if (nfs4_setup_sequence(NFS_SERVER(inode),
&calldata->arg.seq_args,
&calldata->res.seq_res,
- task))
- goto out;
- rpc_call_start(task);
+ task) != 0)
+ nfs_release_seqid(calldata->arg.seqid);
+ else
+ rpc_call_start(task);
out:
dprintk("%s: done!\n", __func__);
}
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
rpc_restart_call_prepare(task);
}
+ nfs_release_seqid(calldata->arg.seqid);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(calldata->server,
&calldata->arg.seq_args,
- &calldata->res.seq_res, task))
- return;
- rpc_call_start(task);
+ &calldata->res.seq_res,
+ task) != 0)
+ nfs_release_seqid(calldata->arg.seqid);
+ else
+ rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_locku_ops = {
/* Do we need to do an open_to_lock_owner? */
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
- return;
+ goto out_release_lock_seqid;
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->server,
&data->arg.seq_args,
- &data->res.seq_res, task))
+ &data->res.seq_res,
+ task) == 0) {
+ rpc_call_start(task);
return;
- rpc_call_start(task);
- dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
+ }
+ nfs_release_seqid(data->arg.open_seqid);
+out_release_lock_seqid:
+ nfs_release_seqid(data->arg.lock_seqid);
+ dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
}
static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
tbl->slots = new;
tbl->max_slots = max_slots;
}
- tbl->highest_used_slotid = -1; /* no slot is currently used */
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
for (i = 0; i < tbl->max_slots; i++)
tbl->slots[i].seq_nr = ivalue;
spin_unlock(&tbl->slot_tbl_lock);
kfree(objios);
}
-enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
+static enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
{
switch (oep) {
case OSD_ERR_PRI_NO_ERROR:
(unsigned long)pgio->pg_layout_private;
}
-void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+static void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
pnfs_generic_pg_init_read(pgio, req);
if (unlikely(pgio->pg_lseg == NULL))
return false;
}
-void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+static void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
unsigned long stripe_end = 0;
u64 wb_size;
if (likely(nfsi->layout == NULL)) { /* Won the race? */
nfsi->layout = new;
return new;
- }
- pnfs_free_layout_hdr(new);
+ } else if (new != NULL)
+ pnfs_free_layout_hdr(new);
out_existing:
pnfs_get_layout_hdr(nfsi->layout);
return nfsi->layout;
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
+ NFS_LAYOUT_RETURN, /* Return this layout ASAP */
};
enum layoutdriver_policy_flags {
#include <linux/parser.h>
#include <linux/nsproxy.h>
#include <linux/rcupdate.h>
+#include <linux/kthread.h>
#include <asm/uaccess.h>
}
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
+static int nfs_deactivate_super_async_work(void *ptr)
+{
+ struct super_block *sb = ptr;
+
+ deactivate_super(sb);
+ module_put_and_exit(0);
+ return 0;
+}
+
+/*
+ * same effect as deactivate_super, but will do final unmount in kthread
+ * context
+ */
+static void nfs_deactivate_super_async(struct super_block *sb)
+{
+ struct task_struct *task;
+ char buf[INET6_ADDRSTRLEN + 1];
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_client *clp = server->nfs_client;
+
+ if (!atomic_add_unless(&sb->s_active, -1, 1)) {
+ rcu_read_lock();
+ snprintf(buf, sizeof(buf),
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+ rcu_read_unlock();
+
+ __module_get(THIS_MODULE);
+ task = kthread_run(nfs_deactivate_super_async_work, sb,
+ "%s-deactivate-super", buf);
+ if (IS_ERR(task)) {
+ pr_err("%s: kthread_run: %ld\n",
+ __func__, PTR_ERR(task));
+ /* make synchronous call and hope for the best */
+ deactivate_super(sb);
+ module_put(THIS_MODULE);
+ }
+ }
+}
+
+void nfs_sb_deactive_async(struct super_block *sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ if (atomic_dec_and_test(&server->active))
+ nfs_deactivate_super_async(sb);
+}
+EXPORT_SYMBOL_GPL(nfs_sb_deactive_async);
+
/*
* Deliver file system statistics to userspace
*/
int err = 0;
if (!page)
return -ENOMEM;
- devname = nfs_path(&dummy, root, page, PAGE_SIZE);
+ devname = nfs_path(&dummy, root, page, PAGE_SIZE, 0);
if (IS_ERR(devname))
err = PTR_ERR(devname);
else
nfs_dec_sillycount(data->dir);
nfs_free_unlinkdata(data);
- nfs_sb_deactive(sb);
+ nfs_sb_deactive_async(sb);
}
static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
+ break;
case (FSNOTIFY_EVENT_NONE):
return true;
default:
if (ret)
goto out_close_fd;
- fd_install(fd, f);
+ if (fd != FAN_NOFD)
+ fd_install(fd, f);
return fanotify_event_metadata.event_len;
out_close_fd:
.release = mem_release,
};
+static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ char buffer[PROC_NUMBUF];
+ int oom_adj = OOM_ADJUST_MIN;
+ size_t len;
+ unsigned long flags;
+
+ if (!task)
+ return -ESRCH;
+ if (lock_task_sighand(task, &flags)) {
+ if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
+ oom_adj = OOM_ADJUST_MAX;
+ else
+ oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
+ OOM_SCORE_ADJ_MAX;
+ unlock_task_sighand(task, &flags);
+ }
+ put_task_struct(task);
+ len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
+ return simple_read_from_buffer(buf, count, ppos, buffer, len);
+}
+
+static ssize_t oom_adj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ char buffer[PROC_NUMBUF];
+ int oom_adj;
+ unsigned long flags;
+ int err;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = kstrtoint(strstrip(buffer), 0, &oom_adj);
+ if (err)
+ goto out;
+ if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
+ oom_adj != OOM_DISABLE) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task) {
+ err = -ESRCH;
+ goto out;
+ }
+
+ task_lock(task);
+ if (!task->mm) {
+ err = -EINVAL;
+ goto err_task_lock;
+ }
+
+ if (!lock_task_sighand(task, &flags)) {
+ err = -ESRCH;
+ goto err_task_lock;
+ }
+
+ /*
+ * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
+ * value is always attainable.
+ */
+ if (oom_adj == OOM_ADJUST_MAX)
+ oom_adj = OOM_SCORE_ADJ_MAX;
+ else
+ oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+
+ if (oom_adj < task->signal->oom_score_adj &&
+ !capable(CAP_SYS_RESOURCE)) {
+ err = -EACCES;
+ goto err_sighand;
+ }
+
+ /*
+ * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
+ * /proc/pid/oom_score_adj instead.
+ */
+ printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+ current->comm, task_pid_nr(current), task_pid_nr(task),
+ task_pid_nr(task));
+
+ task->signal->oom_score_adj = oom_adj;
+ trace_oom_score_adj_update(task);
+err_sighand:
+ unlock_task_sighand(task, &flags);
+err_task_lock:
+ task_unlock(task);
+ put_task_struct(task);
+out:
+ return err < 0 ? err : count;
+}
+
+static const struct file_operations proc_oom_adj_operations = {
+ .read = oom_adj_read,
+ .write = oom_adj_write,
+ .llseek = generic_file_llseek,
+};
+
static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
if (!vma)
goto out_no_vma;
- result = proc_map_files_instantiate(dir, dentry, task,
- (void *)(unsigned long)vma->vm_file->f_mode);
+ if (vma->vm_file)
+ result = proc_map_files_instantiate(dir, dentry, task,
+ (void *)(unsigned long)vma->vm_file->f_mode);
out_no_vma:
up_read(&mm->mmap_sem);
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
+ REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
+ REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
static u64 get_idle_time(int cpu)
{
- u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
+ u64 idle, idle_time = -1ULL;
+
+ if (cpu_online(cpu))
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
if (idle_time == -1ULL)
- /* !NO_HZ so we can rely on cpustat.idle */
+ /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
else
idle = usecs_to_cputime64(idle_time);
static u64 get_iowait_time(int cpu)
{
- u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
+ u64 iowait, iowait_time = -1ULL;
+
+ if (cpu_online(cpu))
+ iowait_time = get_cpu_iowait_time_us(cpu, NULL);
if (iowait_time == -1ULL)
- /* !NO_HZ so we can rely on cpustat.iowait */
+ /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
iowait = usecs_to_cputime64(iowait_time);
while (s < e) {
unsigned long flags;
+ u64 id;
if (c > psinfo->bufsize)
c = psinfo->bufsize;
spin_lock_irqsave(&psinfo->buf_lock, flags);
}
memcpy(psinfo->buf, s, c);
- psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo);
+ psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo);
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
s += c;
c = e - s;
BUG_ON(!th->t_trans_id);
- dquot_initialize(inode);
+ reiserfs_write_unlock(inode->i_sb);
err = dquot_alloc_inode(inode);
+ reiserfs_write_lock(inode->i_sb);
if (err)
goto out_end_trans;
if (!dir->i_nlink) {
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
+ reiserfs_write_unlock(inode->i_sb);
/* Drop can be outside and it needs more credits so it's better to have it outside */
dquot_drop(inode);
+ reiserfs_write_lock(inode->i_sb);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
/* must be turned off for recursive notify_change calls */
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
- depth = reiserfs_write_lock_once(inode->i_sb);
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
-
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
error = journal_begin(&th, inode->i_sb, jbegin_count);
if (error)
goto out;
+ reiserfs_write_unlock_once(inode->i_sb, depth);
error = dquot_transfer(inode, attr);
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (error) {
journal_end(&th, inode->i_sb, jbegin_count);
goto out;
key2type(&(key->on_disk_key)));
#endif
+ reiserfs_write_unlock(inode->i_sb);
retval = dquot_alloc_space_nodirty(inode, pasted_size);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
pathrelse(search_path);
return retval;
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
+ reiserfs_write_unlock(inode->i_sb);
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
pathrelse(path);
return retval;
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
+ reiserfs_write_unlock(s);
dquot_initialize(inode);
+ reiserfs_write_lock(s);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
kfree(qf_names[i]);
#endif
err = -EINVAL;
- goto out_err;
+ goto out_unlock;
}
#ifdef CONFIG_QUOTA
handle_quota_files(s, qf_names, &qfmt);
if (blocks) {
err = reiserfs_resize(s, blocks);
if (err != 0)
- goto out_err;
+ goto out_unlock;
}
if (*mount_flags & MS_RDONLY) {
/* it is read-only already */
goto out_ok;
+ /*
+ * Drop write lock. Quota will retake it when needed and lock
+ * ordering requires calling dquot_suspend() without it.
+ */
+ reiserfs_write_unlock(s);
err = dquot_suspend(s, -1);
if (err < 0)
goto out_err;
+ reiserfs_write_lock(s);
/* try to remount file system with read-only permissions */
if (sb_umount_state(rs) == REISERFS_VALID_FS
err = journal_begin(&th, s, 10);
if (err)
- goto out_err;
+ goto out_unlock;
/* Mounting a rw partition read-only. */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
if (reiserfs_is_journal_aborted(journal)) {
err = journal->j_errno;
- goto out_err;
+ goto out_unlock;
}
handle_data_mode(s, mount_options);
s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
err = journal_begin(&th, s, 10);
if (err)
- goto out_err;
+ goto out_unlock;
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
SB_JOURNAL(s)->j_must_wait = 1;
err = journal_end(&th, s, 10);
if (err)
- goto out_err;
+ goto out_unlock;
if (!(*mount_flags & MS_RDONLY)) {
+ /*
+ * Drop write lock. Quota will retake it when needed and lock
+ * ordering requires calling dquot_resume() without it.
+ */
+ reiserfs_write_unlock(s);
dquot_resume(s, -1);
+ reiserfs_write_lock(s);
finish_unfinished(s);
reiserfs_xattr_init(s, *mount_flags);
}
reiserfs_write_unlock(s);
return 0;
+out_unlock:
+ reiserfs_write_unlock(s);
out_err:
kfree(new_opts);
- reiserfs_write_unlock(s);
return err;
}
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
+ reiserfs_write_unlock(dquot->dq_sb);
ret = dquot_commit(dquot);
+ reiserfs_write_lock(dquot->dq_sb);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (!ret && err)
ret = err;
- out:
+out:
reiserfs_write_unlock(dquot->dq_sb);
return ret;
}
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
+ reiserfs_write_unlock(dquot->dq_sb);
ret = dquot_acquire(dquot);
+ reiserfs_write_lock(dquot->dq_sb);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (!ret && err)
ret = err;
- out:
+out:
reiserfs_write_unlock(dquot->dq_sb);
return ret;
}
ret =
journal_begin(&th, dquot->dq_sb,
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+ reiserfs_write_unlock(dquot->dq_sb);
if (ret) {
/* Release dquot anyway to avoid endless cycle in dqput() */
dquot_release(dquot);
goto out;
}
ret = dquot_release(dquot);
+ reiserfs_write_lock(dquot->dq_sb);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (!ret && err)
ret = err;
- out:
reiserfs_write_unlock(dquot->dq_sb);
+out:
return ret;
}
ret = journal_begin(&th, sb, 2);
if (ret)
goto out;
+ reiserfs_write_unlock(sb);
ret = dquot_commit_info(sb, type);
+ reiserfs_write_lock(sb);
err = journal_end(&th, sb, 2);
if (!ret && err)
ret = err;
- out:
+out:
reiserfs_write_unlock(sb);
return ret;
}
struct reiserfs_transaction_handle th;
int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
- if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
- return -EINVAL;
+ reiserfs_write_lock(sb);
+ if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) {
+ err = -EINVAL;
+ goto out;
+ }
/* Quotafile not on the same filesystem? */
if (path->dentry->d_sb != sb) {
if (err)
goto out;
}
- err = dquot_quota_on(sb, type, format_id, path);
+ reiserfs_write_unlock(sb);
+ return dquot_quota_on(sb, type, format_id, path);
out:
+ reiserfs_write_unlock(sb);
return err;
}
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
+ reiserfs_write_lock(sb);
err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
+ reiserfs_write_unlock(sb);
if (err)
goto out;
if (offset || tocopy != sb->s_blocksize)
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
unlock_buffer(bh);
+ reiserfs_write_lock(sb);
reiserfs_prepare_for_journal(sb, bh, 1);
journal_mark_dirty(current->journal_info, sb, bh);
if (!journal_quota)
reiserfs_add_ordered_list(inode, bh);
+ reiserfs_write_unlock(sb);
brelse(bh);
offset = 0;
towrite -= tocopy;
/**
* sysfs_pathname - return full path to sysfs dirent
* @sd: sysfs_dirent whose path we want
- * @path: caller allocated buffer
+ * @path: caller allocated buffer of size PATH_MAX
*
* Gives the name "/" to the sysfs_root entry; any path returned
* is relative to wherever sysfs is mounted.
- *
- * XXX: does no error checking on @path size
*/
static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
{
if (sd->s_parent) {
sysfs_pathname(sd->s_parent, path);
- strcat(path, "/");
+ strlcat(path, "/", PATH_MAX);
}
- strcat(path, sd->s_name);
+ strlcat(path, sd->s_name, PATH_MAX);
return path;
}
char *path = kzalloc(PATH_MAX, GFP_KERNEL);
WARN(1, KERN_WARNING
"sysfs: cannot create duplicate filename '%s'\n",
- (path == NULL) ? sd->s_name :
- strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"),
- sd->s_name));
+ (path == NULL) ? sd->s_name
+ : (sysfs_pathname(acxt->parent_sd, path),
+ strlcat(path, "/", PATH_MAX),
+ strlcat(path, sd->s_name, PATH_MAX),
+ path));
kfree(path);
}
if (!lprops) {
lprops = ubifs_fast_find_freeable(c);
if (!lprops) {
- ubifs_assert(c->freeable_cnt == 0);
- if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
+ /*
+ * The first condition means the following: go scan the
+ * LPT if there are uncategorized lprops, which means
+ * there may be freeable LEBs there (UBIFS does not
+ * store the information about freeable LEBs in the
+ * master node).
+ */
+ if (c->in_a_category_cnt != c->main_lebs ||
+ c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
+ ubifs_assert(c->freeable_cnt == 0);
lprops = scan_for_leb_for_idx(c);
if (IS_ERR(lprops)) {
err = PTR_ERR(lprops);
default:
ubifs_assert(0);
}
+
lprops->flags &= ~LPROPS_CAT_MASK;
lprops->flags |= cat;
+ c->in_a_category_cnt += 1;
+ ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
}
/**
default:
ubifs_assert(0);
}
+
+ c->in_a_category_cnt -= 1;
+ ubifs_assert(c->in_a_category_cnt >= 0);
}
/**
* @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size)
* @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size)
* @freeable_cnt: number of freeable LEBs in @freeable_list
+ * @in_a_category_cnt: count of lprops which are in a certain category, which
+ * basically meants that they were loaded from the flash
*
* @ltab_lnum: LEB number of LPT's own lprops table
* @ltab_offs: offset of LPT's own lprops table
struct list_head freeable_list;
struct list_head frdi_idx_list;
int freeable_cnt;
+ int in_a_category_cnt;
int ltab_lnum;
int ltab_offs;
/*
* Initialize the args structure.
*/
+ memset(&targs, 0, sizeof(targs));
targs.tp = tp;
targs.mp = mp;
targs.agbp = agbp;
* group or loop over the allocation groups to find the result.
*/
int /* error */
-__xfs_alloc_vextent(
+xfs_alloc_vextent(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_agblock_t agsize; /* allocation group size */
return error;
}
-static void
-xfs_alloc_vextent_worker(
- struct work_struct *work)
-{
- struct xfs_alloc_arg *args = container_of(work,
- struct xfs_alloc_arg, work);
- unsigned long pflags;
-
- /* we are in a transaction context here */
- current_set_flags_nested(&pflags, PF_FSTRANS);
-
- args->result = __xfs_alloc_vextent(args);
- complete(args->done);
-
- current_restore_flags_nested(&pflags, PF_FSTRANS);
-}
-
-/*
- * Data allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Metadata
- * requests, OTOH, are generally from low stack usage paths, so avoid the
- * context switch overhead here.
- */
-int
-xfs_alloc_vextent(
- struct xfs_alloc_arg *args)
-{
- DECLARE_COMPLETION_ONSTACK(done);
-
- if (!args->userdata)
- return __xfs_alloc_vextent(args);
-
-
- args->done = &done;
- INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
- queue_work(xfs_alloc_wq, &args->work);
- wait_for_completion(&done);
- return args->result;
-}
-
/*
* Free an extent.
* Just break up the extent address and hand off to xfs_free_ag_extent
char isfl; /* set if is freelist blocks - !acctg */
char userdata; /* set if this is user data */
xfs_fsblock_t firstblock; /* io first block allocated */
- struct completion *done;
- struct work_struct work;
- int result;
} xfs_alloc_arg_t;
/*
xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
+ xfs_trans_binval(cur->bc_tp, bp);
return 0;
}
*
* The fix is two passes across the ioend list - one to start writeback on the
* buffer_heads, and then submit them for I/O on the second pass.
+ *
+ * If @fail is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the ioend chain rather
+ * than submit it to IO. This typically only happens on a filesystem shutdown.
*/
STATIC void
xfs_submit_ioend(
struct writeback_control *wbc,
- xfs_ioend_t *ioend)
+ xfs_ioend_t *ioend,
+ int fail)
{
xfs_ioend_t *head = ioend;
xfs_ioend_t *next;
next = ioend->io_list;
bio = NULL;
+ /*
+ * If we are failing the IO now, just mark the ioend with an
+ * error and finish it. This will run IO completion immediately
+ * as there is only one reference to the ioend at this point in
+ * time.
+ */
+ if (fail) {
+ ioend->io_error = -fail;
+ xfs_finish_ioend(ioend);
+ continue;
+ }
+
for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
if (!bio) {
xfs_start_page_writeback(page, 1, count);
- if (ioend && imap_valid) {
+ /* if there is no IO to be submitted for this page, we are done */
+ if (!ioend)
+ return 0;
+
+ ASSERT(iohead);
+
+ /*
+ * Any errors from this point onwards need tobe reported through the IO
+ * completion path as we have marked the initial page as under writeback
+ * and unlocked it.
+ */
+ if (imap_valid) {
xfs_off_t end_index;
end_index = imap.br_startoff + imap.br_blockcount;
wbc, end_index);
}
- if (iohead) {
- /*
- * Reserve log space if we might write beyond the on-disk
- * inode size.
- */
- if (ioend->io_type != XFS_IO_UNWRITTEN &&
- xfs_ioend_is_append(ioend)) {
- err = xfs_setfilesize_trans_alloc(ioend);
- if (err)
- goto error;
- }
- xfs_submit_ioend(wbc, iohead);
- }
+ /*
+ * Reserve log space if we might write beyond the on-disk inode size.
+ */
+ err = 0;
+ if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
+ err = xfs_setfilesize_trans_alloc(ioend);
+
+ xfs_submit_ioend(wbc, iohead, err);
return 0;
leaf2 = blk2->bp->b_addr;
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+ ASSERT(leaf2->hdr.count == 0);
args = state->args;
trace_xfs_attr_leaf_rebalance(args);
* I assert that since all callers pass in an empty
* second buffer, this code should never execute.
*/
+ ASSERT(0);
/*
* Figure the total bytes to be added to the destination leaf.
args->index2 = 0;
args->blkno2 = blk2->blkno;
} else {
+ /*
+ * On a double leaf split, the original attr location
+ * is already stored in blkno2/index2, so don't
+ * overwrite it overwise we corrupt the tree.
+ */
blk2->index = blk1->index
- be16_to_cpu(leaf1->hdr.count);
- args->index = args->index2 = blk2->index;
- args->blkno = args->blkno2 = blk2->blkno;
+ args->index = blk2->index;
+ args->blkno = blk2->blkno;
+ if (!state->extravalid) {
+ /*
+ * set the new attr location to match the old
+ * one and let the higher level split code
+ * decide where in the leaf to place it.
+ */
+ args->index2 = blk2->index;
+ args->blkno2 = blk2->blkno;
+ }
}
} else {
ASSERT(state->inleaf == 1);
* Normal allocation, done through xfs_alloc_vextent.
*/
tryagain = isaligned = 0;
+ memset(&args, 0, sizeof(args));
args.tp = ap->tp;
args.mp = mp;
args.fsbno = ap->blkno;
* Convert to a btree with two levels, one record in root.
*/
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
+ memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = mp;
args.firstblock = *firstblock;
xfs_buf_t *bp; /* buffer for extent block */
xfs_bmbt_rec_host_t *ep;/* extent record pointer */
+ memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
args.firstblock = *firstblock;
STATIC int
-xfs_bmapi_allocate(
- struct xfs_bmalloca *bma,
- int flags)
+__xfs_bmapi_allocate(
+ struct xfs_bmalloca *bma)
{
struct xfs_mount *mp = bma->ip->i_mount;
- int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+ int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
int tmp_logflags = 0;
* Indicate if this is the first user data in the file, or just any
* user data.
*/
- if (!(flags & XFS_BMAPI_METADATA)) {
+ if (!(bma->flags & XFS_BMAPI_METADATA)) {
bma->userdata = (bma->offset == 0) ?
XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
}
- bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
+ bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
/*
* Only want to do the alignment at the eof if it is userdata and
* allocation length is larger than a stripe unit.
*/
if (mp->m_dalign && bma->length >= mp->m_dalign &&
- !(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
+ !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
error = xfs_bmap_isaeof(bma, whichfork);
if (error)
return error;
}
+ if (bma->flags & XFS_BMAPI_STACK_SWITCH)
+ bma->stack_switch = 1;
+
error = xfs_bmap_alloc(bma);
if (error)
return error;
* A wasdelay extent has been initialized, so shouldn't be flagged
* as unwritten.
*/
- if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) &&
+ if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
xfs_sb_version_hasextflgbit(&mp->m_sb))
bma->got.br_state = XFS_EXT_UNWRITTEN;
return 0;
}
+static void
+xfs_bmapi_allocate_worker(
+ struct work_struct *work)
+{
+ struct xfs_bmalloca *args = container_of(work,
+ struct xfs_bmalloca, work);
+ unsigned long pflags;
+
+ /* we are in a transaction context here */
+ current_set_flags_nested(&pflags, PF_FSTRANS);
+
+ args->result = __xfs_bmapi_allocate(args);
+ complete(args->done);
+
+ current_restore_flags_nested(&pflags, PF_FSTRANS);
+}
+
+/*
+ * Some allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Otherwise just
+ * call directly to avoid the context switch overhead here.
+ */
+int
+xfs_bmapi_allocate(
+ struct xfs_bmalloca *args)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (!args->stack_switch)
+ return __xfs_bmapi_allocate(args);
+
+
+ args->done = &done;
+ INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
+ queue_work(xfs_alloc_wq, &args->work);
+ wait_for_completion(&done);
+ return args->result;
+}
+
STATIC int
xfs_bmapi_convert_unwritten(
struct xfs_bmalloca *bma,
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
bma.offset = bno;
+ bma.flags = flags;
/*
* There's a 32/64 bit type mismatch between the
ASSERT(len > 0);
ASSERT(bma.length > 0);
- error = xfs_bmapi_allocate(&bma, flags);
+ error = xfs_bmapi_allocate(&bma);
if (error)
goto error0;
if (bma.blkno == NULLFSBLOCK)
* from written to unwritten, otherwise convert from unwritten to written.
*/
#define XFS_BMAPI_CONVERT 0x040
+#define XFS_BMAPI_STACK_SWITCH 0x080
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
{ XFS_BMAPI_IGSTATE, "IGSTATE" }, \
{ XFS_BMAPI_CONTIG, "CONTIG" }, \
- { XFS_BMAPI_CONVERT, "CONVERT" }
+ { XFS_BMAPI_CONVERT, "CONVERT" }, \
+ { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
static inline int xfs_bmapi_aflag(int w)
char userdata;/* set if is user data */
char aeof; /* allocated space at eof */
char conv; /* overwriting unwritten extents */
+ char stack_switch;
+ int flags;
+ struct completion *done;
+ struct work_struct work;
+ int result;
} xfs_bmalloca_t;
/*
{
xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
- xfs_buf_ioerror(bp, -error);
+ /*
+ * don't overwrite existing errors - otherwise we can lose errors on
+ * buffers that require multiple bios to complete.
+ */
+ if (!bp->b_error)
+ xfs_buf_ioerror(bp, -error);
- if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+ if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
_xfs_buf_ioend(bp, 1);
if (size)
goto next_chunk;
} else {
+ /*
+ * This is guaranteed not to be the last io reference count
+ * because the caller (xfs_buf_iorequest) holds a count itself.
+ */
+ atomic_dec(&bp->b_io_remaining);
xfs_buf_ioerror(bp, EIO);
bio_put(bio);
}
}
xfs_buf_relse(bp);
} else if (freed && remove) {
+ /*
+ * There are currently two references to the buffer - the active
+ * LRU reference and the buf log item. What we are about to do
+ * here - simulate a failed IO completion - requires 3
+ * references.
+ *
+ * The LRU reference is removed by the xfs_buf_stale() call. The
+ * buf item reference is removed by the xfs_buf_iodone()
+ * callback that is run by xfs_buf_do_callbacks() during ioend
+ * processing (via the bp->b_iodone callback), and then finally
+ * the ioend processing will drop the IO reference if the buffer
+ * is marked XBF_ASYNC.
+ *
+ * Hence we need to take an additional reference here so that IO
+ * completion processing doesn't free the buffer prematurely.
+ */
xfs_buf_lock(bp);
+ xfs_buf_hold(bp);
+ bp->b_flags |= XBF_ASYNC;
xfs_buf_ioerror(bp, EIO);
XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp);
/* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
- error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+ error = 0;
+ /*
+ * new secondary superblocks need to be zeroed, not read from
+ * disk as the contents of the new area we are growing into is
+ * completely unknown.
+ */
+ if (agno < oagcount) {
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp);
+ } else {
+ bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
+ XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0);
+ if (bp)
+ xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
+ else
+ error = ENOMEM;
+ }
+
if (error) {
xfs_warn(mp,
"error %d reading secondary superblock for ag %d",
break; /* no point in continuing */
}
}
- return 0;
+ return error;
error0:
xfs_trans_cancel(tp, XFS_TRANS_ABORT);
/* boundary */
struct xfs_perag *pag;
+ memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = tp->t_mountp;
* to mark all the active inodes on the buffer stale.
*/
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
- mp->m_bsize * blks_per_cluster, 0);
+ mp->m_bsize * blks_per_cluster,
+ XBF_UNMAPPED);
if (!bp)
return ENOMEM;
int hsize;
xfs_handle_t handle;
struct inode *inode;
- struct fd f;
+ struct fd f = {0};
struct path path;
int error;
struct xfs_inode *ip;
* pointer that the caller gave to us.
*/
error = xfs_bmapi_write(tp, ip, map_start_fsb,
- count_fsb, 0, &first_block, 1,
+ count_fsb,
+ XFS_BMAPI_STACK_SWITCH,
+ &first_block, 1,
imap, &nimaps, &free_list);
if (error)
goto trans_cancel;
/*
- * update the last_sync_lsn before we drop the
+ * Completion of a iclog IO does not imply that
+ * a transaction has completed, as transactions
+ * can be large enough to span many iclogs. We
+ * cannot change the tail of the log half way
+ * through a transaction as this may be the only
+ * transaction in the log and moving th etail to
+ * point to the middle of it will prevent
+ * recovery from finding the start of the
+ * transaction. Hence we should only update the
+ * last_sync_lsn if this iclog contains
+ * transaction completion callbacks on it.
+ *
+ * We have to do this before we drop the
* icloglock to ensure we are the only one that
* can update it.
*/
ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
- atomic64_set(&log->l_last_sync_lsn,
- be64_to_cpu(iclog->ic_header.h_lsn));
+ if (iclog->ic_callback)
+ atomic64_set(&log->l_last_sync_lsn,
+ be64_to_cpu(iclog->ic_header.h_lsn));
} else
ioerrors++;
* - order is important.
*/
error = xlog_bread_offset(log, 0,
- bblks - split_bblks, hbp,
+ bblks - split_bblks, dbp,
offset + BBTOB(split_bblks));
if (error)
goto bread_err2;
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
+#define BUILD_BUG_ON_INVALID(e) (0)
#define BUILD_BUG_ON(condition)
#define BUILD_BUG() (0)
#else /* __CHECKER__ */
struct clk_hw *__clk_get_hw(struct clk *clk);
u8 __clk_get_num_parents(struct clk *clk);
struct clk *__clk_get_parent(struct clk *clk);
-inline int __clk_get_enable_count(struct clk *clk);
-inline int __clk_get_prepare_count(struct clk *clk);
+int __clk_get_enable_count(struct clk *clk);
+int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
unsigned long __clk_get_flags(struct clk *clk);
int __clk_is_enabled(struct clk *clk);
const char *fmt, ...);
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- static struct _ddebug __used __aligned(8) \
+ static struct _ddebug __aligned(8) \
__attribute__((section("__verbose"))) name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
EXTCON_VIDEO_OUT,
EXTCON_MECHANICAL,
};
-extern const char *extcon_cable_name[];
+extern const char extcon_cable_name[][CABLE_NAME_MAX + 1];
struct extcon_cable;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
-#define ___GFP_NOTRACK 0x200000u
-#define ___GFP_OTHER_NODE 0x800000u
-#define ___GFP_WRITE 0x1000000u
+#define ___GFP_NOTRACK 0x100000u
+#define ___GFP_OTHER_NODE 0x200000u
+#define ___GFP_WRITE 0x400000u
/*
* GFP bitmasks..
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
--- /dev/null
+/*
+ * Statically sized hash table implementation
+ * (C) 2012 Sasha Levin <levinsasha928@gmail.com>
+ */
+
+#ifndef _LINUX_HASHTABLE_H
+#define _LINUX_HASHTABLE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/hash.h>
+#include <linux/rculist.h>
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_add_rcu - add an object to a rcu enabled hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add_rcu(hashtable, node, key) \
+ hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_del_rcu - remove an object from a rcu enabled hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del_rcu(struct hlist_node *node)
+{
+ hlist_del_init_rcu(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, node, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry(obj, node, &name[bkt], member)
+
+/**
+ * hash_for_each_rcu - iterate over a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_rcu(name, bkt, node, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, node, member, key) \
+ hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
+ * same bucket in an rcu enabled hashtable
+ * in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_rcu(name, obj, node, member, key) \
+ hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_safe - iterate over all possible objects hashing to the
+ * same bucket safe against removals
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, node, tmp, \
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+
+#endif
u32 clkrate;
u32 rev;
u32 flags;
+ void (*set_mpu_wkup_lat)(struct device *dev, long set);
};
#endif
};
#endif
+/**
+ * IIO_DEGREE_TO_RAD() - Convert degree to rad
+ * @deg: A value in degree
+ *
+ * Returns the given value converted from degree to rad
+ */
+#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
+
+/**
+ * IIO_G_TO_M_S_2() - Convert g to meter / second**2
+ * @g: A value in g
+ *
+ * Returns the given value converted from g to meter / second**2
+ */
+#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+
#endif /* _INDUSTRIAL_IO_H_ */
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
-/*
- * If we support unaligned MMIO, at most one fragment will be split into two:
- */
-#ifdef KVM_UNALIGNED_MMIO
-# define KVM_EXTRA_MMIO_FRAGMENTS 1
-#else
-# define KVM_EXTRA_MMIO_FRAGMENTS 0
-#endif
-
-#define KVM_USER_MMIO_SIZE 8
-
-#define KVM_MAX_MMIO_FRAGMENTS \
- (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
+/* Two fragments for cross MMIO pages. */
+#define KVM_MAX_MMIO_FRAGMENTS 2
/*
* For the normal pfn, the highest 12 bits should be zero,
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
#ifndef __LINUX_MFD_MAX77693_H
#define __LINUX_MFD_MAX77693_H
+struct max77693_reg_data {
+ u8 addr;
+ u8 data;
+};
+
+struct max77693_muic_platform_data {
+ struct max77693_reg_data *init_data;
+ int num_init_data;
+};
+
struct max77693_platform_data {
int wakeup;
+
+ /* muic data */
+ struct max77693_muic_platform_data *muic_data;
};
#endif /* __LINUX_MFD_MAX77693_H */
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
-extern void reset_zone_present_pages(void);
-extern void fixup_zone_present_pages(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
dma_addr_t sg_dma;
void *sg_cpu;
- struct dw_mci_dma_ops *dma_ops;
+ const struct dw_mci_dma_ops *dma_ops;
#ifdef CONFIG_MMC_DW_IDMAC
unsigned int ring_size;
#else
u16 data_offset;
struct device *dev;
struct dw_mci_board *pdata;
- struct dw_mci_drv_data *drv_data;
+ const struct dw_mci_drv_data *drv_data;
void *priv;
struct clk *biu_clk;
struct clk *ciu_clk;
struct regulator *vmmc; /* Power regulator */
unsigned long irq_flags; /* IRQ flags */
- unsigned int irq;
+ int irq;
};
/* DMA ops for Internal/External DMAC interface */
unsigned int quirks2; /* More deviations from spec. */
#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0)
+#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
unsigned long size,
enum memmap_context context);
-extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+extern void lruvec_init(struct lruvec *lruvec);
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
{
#endif
#else /* CONFIG_OF_ADDRESS */
+#ifndef of_address_to_resource
static inline int of_address_to_resource(struct device_node *dev, int index,
struct resource *r)
{
return -EINVAL;
}
+#endif
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
const struct of_device_id *matches,
{
return NULL;
}
+#ifndef of_iomap
static inline void __iomem *of_iomap(struct device_node *device, int index)
{
return NULL;
}
+#endif
static inline const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags)
{
struct mutex mtx;
};
+#define light_mb() barrier()
+#define heavy_mb() synchronize_sched_expedited()
+
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
- rcu_read_lock();
+ rcu_read_lock_sched();
if (unlikely(p->locked)) {
- rcu_read_unlock();
+ rcu_read_unlock_sched();
mutex_lock(&p->mtx);
this_cpu_inc(*p->counters);
mutex_unlock(&p->mtx);
return;
}
this_cpu_inc(*p->counters);
- rcu_read_unlock();
+ rcu_read_unlock_sched();
+ light_mb(); /* A, between read of p->locked and read of data, paired with D */
}
static inline void percpu_up_read(struct percpu_rw_semaphore *p)
{
- /*
- * On X86, write operation in this_cpu_dec serves as a memory unlock
- * barrier (i.e. memory accesses may be moved before the write, but
- * no memory accesses are moved past the write).
- * On other architectures this may not be the case, so we need smp_mb()
- * there.
- */
-#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
- barrier();
-#else
- smp_mb();
-#endif
+ light_mb(); /* B, between read of the data and write to p->counter, paired with C */
this_cpu_dec(*p->counters);
}
{
mutex_lock(&p->mtx);
p->locked = true;
- synchronize_rcu();
+ synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters))
msleep(1);
- smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
+ heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
}
static inline void percpu_up_write(struct percpu_rw_semaphore *p)
{
+ heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
p->locked = false;
mutex_unlock(&p->mtx);
}
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
+ unsigned long cpu = smp_processor_id(); \
+ unsigned long flags; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
- (void *)(unsigned long)smp_processor_id()); \
+ (void *)(unsigned long)cpu); \
+ local_irq_save(flags); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
- (void *)(unsigned long)smp_processor_id()); \
+ (void *)(unsigned long)cpu); \
+ local_irq_restore(flags); \
fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
- (void *)(unsigned long)smp_processor_id()); \
+ (void *)(unsigned long)cpu); \
register_cpu_notifier(&fn##_nb); \
} while (0)
--- /dev/null
+/*
+ * omap_ocp2scp.h -- ocp2scp header file
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_OMAP_OCP2SCP_H
+#define __DRIVERS_OMAP_OCP2SCP_H
+
+struct omap_ocp2scp_dev {
+ const char *drv_name;
+ struct resource *res;
+};
+
+struct omap_ocp2scp_platform_data {
+ int dev_cnt;
+ struct omap_ocp2scp_dev **devices;
+};
+#endif /* __DRIVERS_OMAP_OCP2SCP_H */
* clock operations
*
* @adjfreq: Adjusts the frequency of the hardware clock.
- * parameter delta: Desired period change in parts per billion.
+ * parameter delta: Desired frequency offset from nominal frequency
+ * in parts per billion
*
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
-header-y += md_p.h
-header-y += md_u.h
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
#ifndef _MD_U_H
#define _MD_U_H
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define MD_MAJOR_VERSION 0
-#define MD_MINOR_VERSION 90
-/*
- * MD_PATCHLEVEL_VERSION indicates kernel functionality.
- * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
- * and major_version/minor_version accordingly
- * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
- * in the super status byte
- * >=3 means that bitmap superblock version 4 is supported, which uses
- * little-ending representation rather than host-endian
- */
-#define MD_PATCHLEVEL_VERSION 3
-
-/* ioctls */
-
-/* status */
-#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
-#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
-#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
-#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
-#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
-#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t)
-
-/* configuration */
-#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20)
-#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t)
-#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22)
-#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t)
-#define SET_DISK_INFO _IO (MD_MAJOR, 0x24)
-#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25)
-#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26)
-#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27)
-#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28)
-#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29)
-#define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a)
-#define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int)
+#include <uapi/linux/raid/md_u.h>
-/* usage */
-#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t)
-/* 0x31 was START_ARRAY */
-#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
-#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
-#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
-
-/* 63 partitions with the alternate major number (mdp) */
-#define MdpMinorShift 6
-#ifdef __KERNEL__
extern int mdp_major;
-#endif
-
-typedef struct mdu_version_s {
- int major;
- int minor;
- int patchlevel;
-} mdu_version_t;
-
-typedef struct mdu_array_info_s {
- /*
- * Generic constant information
- */
- int major_version;
- int minor_version;
- int patch_version;
- int ctime;
- int level;
- int size;
- int nr_disks;
- int raid_disks;
- int md_minor;
- int not_persistent;
-
- /*
- * Generic state information
- */
- int utime; /* 0 Superblock update time */
- int state; /* 1 State bits (clean, ...) */
- int active_disks; /* 2 Number of currently active disks */
- int working_disks; /* 3 Number of working disks */
- int failed_disks; /* 4 Number of failed disks */
- int spare_disks; /* 5 Number of spare disks */
-
- /*
- * Personality information
- */
- int layout; /* 0 the array's physical layout */
- int chunk_size; /* 1 chunk size in bytes */
-
-} mdu_array_info_t;
-
-/* non-obvious values for 'level' */
-#define LEVEL_MULTIPATH (-4)
-#define LEVEL_LINEAR (-1)
-#define LEVEL_FAULTY (-5)
-
-/* we need a value for 'no level specified' and 0
- * means 'raid0', so we need something else. This is
- * for internal use only
- */
-#define LEVEL_NONE (-1000000)
-
-typedef struct mdu_disk_info_s {
- /*
- * configuration/status of one particular disk
- */
- int number;
- int major;
- int minor;
- int raid_disk;
- int state;
-
-} mdu_disk_info_t;
-
-typedef struct mdu_start_info_s {
- /*
- * configuration/status of one particular disk
- */
- int major;
- int minor;
- int raid_disk;
- int state;
-
-} mdu_start_info_t;
-
-typedef struct mdu_bitmap_file_s
-{
- char pathname[4096];
-} mdu_bitmap_file_t;
-
-typedef struct mdu_param_s
-{
- int personality; /* 1,2,3,4 */
- int chunk_size; /* in bytes */
- int max_fault; /* unused for now */
-} mdu_param_t;
-
#endif
-
#ifndef _LINUX_RBTREE_AUGMENTED_H
#define _LINUX_RBTREE_AUGMENTED_H
+#include <linux/compiler.h>
#include <linux/rbtree.h>
/*
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
+ * @switches: List of switches in this netowrk
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
+ * @destid_table: destID allocation table
*/
struct rio_net {
struct list_head node; /* node in list of networks */
u16 debounce_rep; /* additional consecutive good readings
* required after the first two */
int gpio_pendown; /* the GPIO used to decide the pendown
- * state if get_pendown_state == NULL
- */
+ * state if get_pendown_state == NULL */
+ int gpio_pendown_debounce; /* platform specific debounce time for
+ * the gpio_pendown */
int (*get_pendown_state)(void);
int (*filter_init) (const struct ads7846_platform_data *pdata,
void **filter_data);
*
* Copyright (C) 2009-2010 Nokia Corporation
*
- * Contact: Aaro Koskinen <aaro.koskinen@nokia.com>
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
early_retrans_delayed:1, /* Delayed ER timer installed */
syn_data:1, /* SYN includes data */
- syn_fastopen:1; /* SYN includes Fast Open option */
+ syn_fastopen:1, /* SYN includes Fast Open option */
+ syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
/* RTT measurement */
u32 srtt; /* smoothed round trip time << 3 */
# include <asm/uprobes.h>
#endif
-/* flags that denote/change uprobes behaviour */
-
-/* Have a copy of original instruction */
-#define UPROBE_COPY_INSN 0x1
-
-/* Dont run handlers when first register/ last unregister in progress*/
-#define UPROBE_RUN_HANDLER 0x2
-/* Can skip singlestep */
-#define UPROBE_SKIP_SSTEP 0x4
-
struct uprobe_consumer {
int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
/*
#ifdef CONFIG_UPROBES
enum uprobe_task_state {
UTASK_RUNNING,
- UTASK_BP_HIT,
UTASK_SSTEP,
UTASK_SSTEP_ACK,
UTASK_SSTEP_TRAPPED,
ADV7604_OP_CH_SEL_RBG = 5,
};
-/* Primary mode (IO register 0x01, [3:0]) */
-enum adv7604_prim_mode {
- ADV7604_PRIM_MODE_COMP = 1,
- ADV7604_PRIM_MODE_RGB = 2,
- ADV7604_PRIM_MODE_HDMI_COMP = 5,
- ADV7604_PRIM_MODE_HDMI_GR = 6,
-};
-
/* Input Color Space (IO register 0x02, [7:4]) */
enum adv7604_inp_color_space {
ADV7604_INP_COLOR_SPACE_LIM_RGB = 0,
/* Bus rotation and reordering */
enum adv7604_op_ch_sel op_ch_sel;
- /* Primary mode */
- enum adv7604_prim_mode prim_mode;
-
/* Select output format */
enum adv7604_op_format_sel op_format_sel;
u8 i2c_vdp;
};
+/*
+ * Mode of operation.
+ * This is used as the input argument of the s_routing video op.
+ */
+enum adv7604_mode {
+ ADV7604_MODE_COMP,
+ ADV7604_MODE_GR,
+ ADV7604_MODE_HDMI,
+};
+
#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000)
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001)
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
const u8 *ie;
size_t ie_len;
u16 reason_code;
+ bool local_state_change;
};
/**
unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
/**
+ * ieee80211_get_mesh_hdrlen - get mesh extension header length
+ * @meshhdr: the mesh extension header, only the flags field
+ * (first byte) will be accessed
+ * Returns the length of the extension header, which is always at
+ * least 6 bytes and at most 18 if address 5 and 6 are present.
+ */
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
+
+/**
* DOC: Data path helpers
*
* In addition to generic utilities, cfg80211 also offers
};
extern void xfrm_init(void);
-extern void xfrm4_init(int rt_hash_size);
+extern void xfrm4_init(void);
extern int xfrm_state_init(struct net *net);
extern void xfrm_state_fini(struct net *net);
extern void xfrm4_state_init(void);
* because we did a bus reset. */
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
+ unsigned no_write_same:1; /* no WRITE SAME command */
unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
unsigned skip_vpd_pages:1; /* do not read VPD pages */
int retries, struct scsi_sense_hdr *sshdr);
extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
int buf_len);
+extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+ unsigned int len, unsigned char opcode);
extern int scsi_device_set_state(struct scsi_device *sdev,
enum scsi_device_state state);
extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
int shutdown; /* this card is going down */
int free_on_last_close; /* free in context of file_release */
wait_queue_head_t shutdown_sleep;
+ atomic_t refcount; /* refcount for disconnection */
struct device *dev; /* device assigned to this card */
struct device *card_dev; /* cardX object for sysfs */
const struct file_operations *f_ops; /* file operations */
void *private_data; /* private data for f_ops->open */
struct device *dev; /* device for sysfs */
+ struct snd_card *card_ptr; /* assigned card instance */
};
/* return a device pointer linked to each sound device as a parent */
int snd_component_add(struct snd_card *card, const char *component);
int snd_card_file_add(struct snd_card *card, struct file *file);
int snd_card_file_remove(struct snd_card *card, struct file *file);
+void snd_card_unref(struct snd_card *card);
#define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+TRACE_EVENT(xen_mmu_flush_tlb_all,
+ TP_PROTO(int x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(__array(char, x, 0)),
+ TP_fast_assign((void)x),
+ TP_printk("%s", "")
+ );
+
TRACE_EVENT(xen_mmu_flush_tlb,
TP_PROTO(int x),
TP_ARGS(x),
#define EPOLL_CTL_ADD 1
#define EPOLL_CTL_DEL 2
#define EPOLL_CTL_MOD 3
-#define EPOLL_CTL_DISABLE 4
/*
* Request the handling of system wakeup events so as to prevent system suspends
#define OOM_SCORE_ADJ_MIN (-1000)
#define OOM_SCORE_ADJ_MAX 1000
+/*
+ * /proc/<pid>/oom_adj set to -17 protects from the oom killer for legacy
+ * purposes.
+ */
+#define OOM_DISABLE (-17)
+/* inclusive */
+#define OOM_ADJUST_MIN (-16)
+#define OOM_ADJUST_MAX 15
+
#endif /* _UAPI__INCLUDE_LINUX_OOM_H */
# UAPI Header export list
+header-y += md_p.h
+header-y += md_u.h
--- /dev/null
+/*
+ md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
+ Copyright (C) 1998 Ingo Molnar
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _UAPI_MD_U_H
+#define _UAPI_MD_U_H
+
+/*
+ * Different major versions are not compatible.
+ * Different minor versions are only downward compatible.
+ * Different patchlevel versions are downward and upward compatible.
+ */
+#define MD_MAJOR_VERSION 0
+#define MD_MINOR_VERSION 90
+/*
+ * MD_PATCHLEVEL_VERSION indicates kernel functionality.
+ * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
+ * and major_version/minor_version accordingly
+ * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
+ * in the super status byte
+ * >=3 means that bitmap superblock version 4 is supported, which uses
+ * little-ending representation rather than host-endian
+ */
+#define MD_PATCHLEVEL_VERSION 3
+
+/* ioctls */
+
+/* status */
+#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
+#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
+#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
+#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
+#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
+#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t)
+
+/* configuration */
+#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20)
+#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t)
+#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22)
+#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t)
+#define SET_DISK_INFO _IO (MD_MAJOR, 0x24)
+#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25)
+#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26)
+#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27)
+#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28)
+#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29)
+#define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a)
+#define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int)
+
+/* usage */
+#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t)
+/* 0x31 was START_ARRAY */
+#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
+#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
+#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+
+/* 63 partitions with the alternate major number (mdp) */
+#define MdpMinorShift 6
+
+typedef struct mdu_version_s {
+ int major;
+ int minor;
+ int patchlevel;
+} mdu_version_t;
+
+typedef struct mdu_array_info_s {
+ /*
+ * Generic constant information
+ */
+ int major_version;
+ int minor_version;
+ int patch_version;
+ int ctime;
+ int level;
+ int size;
+ int nr_disks;
+ int raid_disks;
+ int md_minor;
+ int not_persistent;
+
+ /*
+ * Generic state information
+ */
+ int utime; /* 0 Superblock update time */
+ int state; /* 1 State bits (clean, ...) */
+ int active_disks; /* 2 Number of currently active disks */
+ int working_disks; /* 3 Number of working disks */
+ int failed_disks; /* 4 Number of failed disks */
+ int spare_disks; /* 5 Number of spare disks */
+
+ /*
+ * Personality information
+ */
+ int layout; /* 0 the array's physical layout */
+ int chunk_size; /* 1 chunk size in bytes */
+
+} mdu_array_info_t;
+
+/* non-obvious values for 'level' */
+#define LEVEL_MULTIPATH (-4)
+#define LEVEL_LINEAR (-1)
+#define LEVEL_FAULTY (-5)
+
+/* we need a value for 'no level specified' and 0
+ * means 'raid0', so we need something else. This is
+ * for internal use only
+ */
+#define LEVEL_NONE (-1000000)
+
+typedef struct mdu_disk_info_s {
+ /*
+ * configuration/status of one particular disk
+ */
+ int number;
+ int major;
+ int minor;
+ int raid_disk;
+ int state;
+
+} mdu_disk_info_t;
+
+typedef struct mdu_start_info_s {
+ /*
+ * configuration/status of one particular disk
+ */
+ int major;
+ int minor;
+ int raid_disk;
+ int state;
+
+} mdu_start_info_t;
+
+typedef struct mdu_bitmap_file_s
+{
+ char pathname[4096];
+} mdu_bitmap_file_t;
+
+typedef struct mdu_param_s
+{
+ int personality; /* 1,2,3,4 */
+ int chunk_size; /* in bytes */
+ int max_fault; /* unused for now */
+} mdu_param_t;
+
+#endif /* _UAPI_MD_U_H */
#define TCPI_OPT_WSCALE 4
#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
+#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
enum tcp_ca_state {
TCP_CA_Open = 0,
unmap->dev_bus_addr = 0;
}
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared);
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
#include <xen/interface/hvm/params.h>
#include <asm/xen/hypercall.h>
+static const char *param_name(int op)
+{
+#define PARAM(x) [HVM_PARAM_##x] = #x
+ static const char *const names[] = {
+ PARAM(CALLBACK_IRQ),
+ PARAM(STORE_PFN),
+ PARAM(STORE_EVTCHN),
+ PARAM(PAE_ENABLED),
+ PARAM(IOREQ_PFN),
+ PARAM(BUFIOREQ_PFN),
+ PARAM(TIMER_MODE),
+ PARAM(HPET_ENABLED),
+ PARAM(IDENT_PT),
+ PARAM(DM_DOMAIN),
+ PARAM(ACPI_S_STATE),
+ PARAM(VM86_TSS),
+ PARAM(VPT_ALIGN),
+ PARAM(CONSOLE_PFN),
+ PARAM(CONSOLE_EVTCHN),
+ };
+#undef PARAM
+
+ if (op >= ARRAY_SIZE(names))
+ return "unknown";
+
+ if (!names[op])
+ return "reserved";
+
+ return names[op];
+}
static inline int hvm_get_parameter(int idx, uint64_t *value)
{
struct xen_hvm_param xhv;
xhv.index = idx;
r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
if (r < 0) {
- printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n",
- idx, r);
+ printk(KERN_ERR "Cannot get hvm parameter %s (%d): %d!\n",
+ param_name(idx), idx, r);
return r;
}
*value = xhv.value;
uint32_t nr_frames;
/* OUT parameters. */
int16_t status; /* GNTST_* */
- GUEST_HANDLE(ulong) frame_list;
+ GUEST_HANDLE(xen_pfn_t) frame_list;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
-/*
- * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
- * code on failure. This call only works for auto-translated guests.
- */
-#define XENMEM_translate_gpfn_list 8
-struct xen_translate_gpfn_list {
- /* Which domain to translate for? */
- domid_t domid;
-
- /* Length of list. */
- xen_ulong_t nr_gpfns;
-
- /* List of GPFNs to translate. */
- GUEST_HANDLE(ulong) gpfn_list;
-
- /*
- * Output list to contain MFN translations. May be the same as the input
- * list (in which case each input GPFN is overwritten with the output MFN).
- */
- GUEST_HANDLE(ulong) mfn_list;
-};
-DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
+/*** REMOVED ***/
+/*#define XENMEM_translate_gpfn_list 8*/
/*
* Returns the pseudo-physical memory map as it was when the domain
{
}
+# if THREAD_SIZE >= PAGE_SIZE
void __init __weak thread_info_cache_init(void)
{
}
+#endif
/*
* Set up kernel memory allocators
@echo "###"
@echo "### If this takes a long time, you might wish to run rngd in the"
@echo "### background to keep the supply of entropy topped up. It"
- @echo "### needs to be run as root, and should use a hardware random"
- @echo "### number generator if one is available, eg:"
- @echo "###"
- @echo "### rngd -r /dev/hwrandom"
+ @echo "### needs to be run as root, and uses a hardware random"
+ @echo "### number generator if one is available."
@echo "###"
openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \
-x509 -config x509.genkey \
* trading it for newcg is protected by cgroup_mutex, we're safe to drop
* it here; it will be freed under RCU.
*/
- put_css_set(oldcg);
-
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+ put_css_set(oldcg);
}
/**
*
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
- * it was not made under the protection of RCU, cgroup_mutex or
- * threadgroup_change_begin(), so it might no longer be a valid
- * cgroup pointer. cgroup_attach_task() might have already changed
- * current->cgroups, allowing the previously referenced cgroup
- * group to be removed and freed.
- *
- * Outside the pointer validity we also need to process the css_set
- * inheritance between threadgoup_change_begin() and
- * threadgoup_change_end(), this way there is no leak in any process
- * wide migration performed by cgroup_attach_proc() that could otherwise
- * miss a thread because it is too early or too late in the fork stage.
+ * it was not made under the protection of RCU or cgroup_mutex, so
+ * might no longer be a valid cgroup pointer. cgroup_attach_task() might
+ * have already changed current->cgroups, allowing the previously
+ * referenced cgroup group to be removed and freed.
*
* At the point that cgroup_fork() is called, 'current' is the parent
* task, and the passed argument 'child' points to the child task.
*/
void cgroup_fork(struct task_struct *child)
{
- /*
- * We don't need to task_lock() current because current->cgroups
- * can't be changed concurrently here. The parent obviously hasn't
- * exited and called cgroup_exit(), and we are synchronized against
- * cgroup migration through threadgroup_change_begin().
- */
+ task_lock(current);
child->cgroups = current->cgroups;
get_css_set(child->cgroups);
+ task_unlock(current);
INIT_LIST_HEAD(&child->cg_list);
}
*/
if (use_task_css_set_links) {
write_lock(&css_set_lock);
- if (list_empty(&child->cg_list)) {
- /*
- * It's safe to use child->cgroups without task_lock()
- * here because we are protected through
- * threadgroup_change_begin() against concurrent
- * css_set change in cgroup_task_migrate(). Also
- * the task can't exit at that point until
- * wake_up_new_task() is called, so we are protected
- * against cgroup_exit() setting child->cgroup to
- * init_css_set.
- */
+ task_lock(child);
+ if (list_empty(&child->cg_list))
list_add(&child->cg_list, &child->cgroups->tasks);
- }
+ task_unlock(child);
write_unlock(&css_set_lock);
}
}
*/
static atomic_t uprobe_events = ATOMIC_INIT(0);
+/* Have a copy of original instruction */
+#define UPROBE_COPY_INSN 0
+/* Dont run handlers when first register/ last unregister in progress*/
+#define UPROBE_RUN_HANDLER 1
+/* Can skip singlestep */
+#define UPROBE_SKIP_SSTEP 2
+
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
atomic_t ref;
struct rw_semaphore consumer_rwsem;
+ struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */
struct list_head pending_list;
struct uprobe_consumer *consumers;
struct inode *inode; /* Also hold a ref to inode */
loff_t offset;
- int flags;
+ unsigned long flags;
struct arch_uprobe arch;
};
*/
static bool valid_vma(struct vm_area_struct *vma, bool is_register)
{
- if (!vma->vm_file)
- return false;
-
- if (!is_register)
- return true;
+ vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
- if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
- == (VM_READ|VM_EXEC))
- return true;
+ if (is_register)
+ flags |= VM_WRITE;
- return false;
+ return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
}
static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
return *insn == UPROBE_SWBP_INSN;
}
+static void copy_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *opcode)
+{
+ void *kaddr = kmap_atomic(page);
+ memcpy(opcode, kaddr + (vaddr & ~PAGE_MASK), UPROBE_SWBP_INSN_SIZE);
+ kunmap_atomic(kaddr);
+}
+
+static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
+{
+ uprobe_opcode_t old_opcode;
+ bool is_swbp;
+
+ copy_opcode(page, vaddr, &old_opcode);
+ is_swbp = is_swbp_insn(&old_opcode);
+
+ if (is_swbp_insn(new_opcode)) {
+ if (is_swbp) /* register: already installed? */
+ return 0;
+ } else {
+ if (!is_swbp) /* unregister: was it changed by us? */
+ return 0;
+ }
+
+ return 1;
+}
+
/*
* NOTE:
* Expect the breakpoint instruction to be the smallest size instruction for
* the architecture. If an arch has variable length instruction and the
* breakpoint instruction is not of the smallest length instruction
- * supported by that architecture then we need to modify read_opcode /
+ * supported by that architecture then we need to modify is_swbp_at_addr and
* write_opcode accordingly. This would never be a problem for archs that
* have fixed length instructions.
*/
/*
* write_opcode - write the opcode at a given virtual address.
- * @auprobe: arch breakpointing information.
* @mm: the probed process address space.
* @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr.
* For mm @mm, write the opcode at @vaddr.
* Return 0 (success) or a negative errno.
*/
-static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
- unsigned long vaddr, uprobe_opcode_t opcode)
+static int write_opcode(struct mm_struct *mm, unsigned long vaddr,
+ uprobe_opcode_t opcode)
{
struct page *old_page, *new_page;
void *vaddr_old, *vaddr_new;
retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
+ ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
if (ret <= 0)
return ret;
+ ret = verify_opcode(old_page, vaddr, &opcode);
+ if (ret <= 0)
+ goto put_old;
+
ret = -ENOMEM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (!new_page)
}
/**
- * read_opcode - read the opcode at a given virtual address.
- * @mm: the probed process address space.
- * @vaddr: the virtual address to read the opcode.
- * @opcode: location to store the read opcode.
- *
- * Called with mm->mmap_sem held (for read and with a reference to
- * mm.
- *
- * For mm @mm, read the opcode at @vaddr and store it in @opcode.
- * Return 0 (success) or a negative errno.
- */
-static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
-{
- struct page *page;
- void *vaddr_new;
- int ret;
-
- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
- if (ret <= 0)
- return ret;
-
- vaddr_new = kmap_atomic(page);
- vaddr &= ~PAGE_MASK;
- memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
- kunmap_atomic(vaddr_new);
-
- put_page(page);
-
- return 0;
-}
-
-static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
-{
- uprobe_opcode_t opcode;
- int result;
-
- if (current->mm == mm) {
- pagefault_disable();
- result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
- sizeof(opcode));
- pagefault_enable();
-
- if (likely(result == 0))
- goto out;
- }
-
- result = read_opcode(mm, vaddr, &opcode);
- if (result)
- return result;
-out:
- if (is_swbp_insn(&opcode))
- return 1;
-
- return 0;
-}
-
-/**
* set_swbp - store breakpoint at a given address.
* @auprobe: arch specific probepoint information.
* @mm: the probed process address space.
*/
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
- int result;
- /*
- * See the comment near uprobes_hash().
- */
- result = is_swbp_at_addr(mm, vaddr);
- if (result == 1)
- return 0;
-
- if (result)
- return result;
-
- return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
+ return write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
}
/**
int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
- int result;
-
- result = is_swbp_at_addr(mm, vaddr);
- if (!result)
- return -EINVAL;
-
- if (result != 1)
- return result;
-
- return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
+ return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
}
static int match_uprobe(struct uprobe *l, struct uprobe *r)
spin_unlock(&uprobes_treelock);
/* For now assume that the instruction need not be single-stepped */
- uprobe->flags |= UPROBE_SKIP_SSTEP;
+ __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
return u;
}
uprobe->inode = igrab(inode);
uprobe->offset = offset;
init_rwsem(&uprobe->consumer_rwsem);
+ mutex_init(&uprobe->copy_mutex);
/* add to uprobes_tree, sorted on inode:offset */
cur_uprobe = insert_uprobe(uprobe);
{
struct uprobe_consumer *uc;
- if (!(uprobe->flags & UPROBE_RUN_HANDLER))
+ if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
return;
down_read(&uprobe->consumer_rwsem);
return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
}
-/*
- * How mm->uprobes_state.count gets updated
- * uprobe_mmap() increments the count if
- * - it successfully adds a breakpoint.
- * - it cannot add a breakpoint, but sees that there is a underlying
- * breakpoint (via a is_swbp_at_addr()).
- *
- * uprobe_munmap() decrements the count if
- * - it sees a underlying breakpoint, (via is_swbp_at_addr)
- * (Subsequent uprobe_unregister wouldnt find the breakpoint
- * unless a uprobe_mmap kicks in, since the old vma would be
- * dropped just after uprobe_munmap.)
- *
- * uprobe_register increments the count if:
- * - it successfully adds a breakpoint.
- *
- * uprobe_unregister decrements the count if:
- * - it sees a underlying breakpoint and removes successfully.
- * (via is_swbp_at_addr)
- * (Subsequent uprobe_munmap wouldnt find the breakpoint
- * since there is no underlying breakpoint after the
- * breakpoint removal.)
- */
+static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
+ struct mm_struct *mm, unsigned long vaddr)
+{
+ int ret = 0;
+
+ if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
+ return ret;
+
+ mutex_lock(&uprobe->copy_mutex);
+ if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
+ goto out;
+
+ ret = copy_insn(uprobe, file);
+ if (ret)
+ goto out;
+
+ ret = -ENOTSUPP;
+ if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
+ goto out;
+
+ ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
+ if (ret)
+ goto out;
+
+ /* write_opcode() assumes we don't cross page boundary */
+ BUG_ON((uprobe->offset & ~PAGE_MASK) +
+ UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+
+ smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
+ set_bit(UPROBE_COPY_INSN, &uprobe->flags);
+
+ out:
+ mutex_unlock(&uprobe->copy_mutex);
+
+ return ret;
+}
+
static int
install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long vaddr)
if (!uprobe->consumers)
return 0;
- if (!(uprobe->flags & UPROBE_COPY_INSN)) {
- ret = copy_insn(uprobe, vma->vm_file);
- if (ret)
- return ret;
-
- if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
- return -ENOTSUPP;
-
- ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
- if (ret)
- return ret;
-
- /* write_opcode() assumes we don't cross page boundary */
- BUG_ON((uprobe->offset & ~PAGE_MASK) +
- UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
-
- uprobe->flags |= UPROBE_COPY_INSN;
- }
+ ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
+ if (ret)
+ return ret;
/*
* set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
return ret;
}
-static void
+static int
remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
{
/* can happen if uprobe_register() fails */
if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
- return;
+ return 0;
set_bit(MMF_RECALC_UPROBES, &mm->flags);
- set_orig_insn(&uprobe->arch, mm, vaddr);
+ return set_orig_insn(&uprobe->arch, mm, vaddr);
}
/*
struct mm_struct *mm = info->mm;
struct vm_area_struct *vma;
- if (err)
+ if (err && is_register)
goto free;
down_write(&mm->mmap_sem);
if (is_register)
err = install_breakpoint(uprobe, mm, vma, info->vaddr);
else
- remove_breakpoint(uprobe, mm, info->vaddr);
+ err |= remove_breakpoint(uprobe, mm, info->vaddr);
unlock:
up_write(&mm->mmap_sem);
mutex_lock(uprobes_hash(inode));
uprobe = alloc_uprobe(inode, offset);
- if (uprobe && !consumer_add(uprobe, uc)) {
+ if (!uprobe) {
+ ret = -ENOMEM;
+ } else if (!consumer_add(uprobe, uc)) {
ret = __uprobe_register(uprobe);
if (ret) {
uprobe->consumers = NULL;
__uprobe_unregister(uprobe);
} else {
- uprobe->flags |= UPROBE_RUN_HANDLER;
+ set_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
}
}
if (consumer_del(uprobe, uc)) {
if (!uprobe->consumers) {
__uprobe_unregister(uprobe);
- uprobe->flags &= ~UPROBE_RUN_HANDLER;
+ clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
}
}
*/
static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
{
- if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
- return true;
-
- uprobe->flags &= ~UPROBE_SKIP_SSTEP;
+ if (test_bit(UPROBE_SKIP_SSTEP, &uprobe->flags)) {
+ if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
+ return true;
+ clear_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
+ }
return false;
}
clear_bit(MMF_HAS_UPROBES, &mm->flags);
}
+static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
+{
+ struct page *page;
+ uprobe_opcode_t opcode;
+ int result;
+
+ pagefault_disable();
+ result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
+ sizeof(opcode));
+ pagefault_enable();
+
+ if (likely(result == 0))
+ goto out;
+
+ result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+ if (result < 0)
+ return result;
+
+ copy_opcode(page, vaddr, &opcode);
+ put_page(page);
+ out:
+ return is_swbp_insn(&opcode);
+}
+
static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
{
struct mm_struct *mm = current->mm;
}
return;
}
+ /*
+ * TODO: move copy_insn/etc into _register and remove this hack.
+ * After we hit the bp, _unregister + _register can install the
+ * new and not-yet-analyzed uprobe at the same address, restart.
+ */
+ smp_rmb(); /* pairs with wmb() in install_breakpoint() */
+ if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
+ goto restart;
utask = current->utask;
if (!utask) {
utask = add_utask();
/* Cannot allocate; re-execute the instruction. */
if (!utask)
- goto cleanup_ret;
+ goto restart;
}
- utask->active_uprobe = uprobe;
+
handler_chain(uprobe, regs);
- if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
- goto cleanup_ret;
+ if (can_skip_sstep(uprobe, regs))
+ goto out;
- utask->state = UTASK_SSTEP;
if (!pre_ssout(uprobe, regs, bp_vaddr)) {
arch_uprobe_enable_step(&uprobe->arch);
+ utask->active_uprobe = uprobe;
+ utask->state = UTASK_SSTEP;
return;
}
-cleanup_ret:
- if (utask) {
- utask->active_uprobe = NULL;
- utask->state = UTASK_RUNNING;
- }
- if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
-
- /*
- * cannot singlestep; cannot skip instruction;
- * re-execute the instruction.
- */
- instruction_pointer_set(regs, bp_vaddr);
-
+restart:
+ /*
+ * cannot singlestep; cannot skip instruction;
+ * re-execute the instruction.
+ */
+ instruction_pointer_set(regs, bp_vaddr);
+out:
put_uprobe(uprobe);
}
}
/*
- * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
- * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
- * allows the thread to return from interrupt.
+ * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
+ * allows the thread to return from interrupt. After that handle_swbp()
+ * sets utask->active_uprobe.
*
- * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
- * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
- * interrupt.
+ * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
+ * and allows the thread to return from interrupt.
*
* While returning to userspace, thread notices the TIF_UPROBE flag and calls
* uprobe_notify_resume().
{
struct uprobe_task *utask;
+ clear_thread_flag(TIF_UPROBE);
+
utask = current->utask;
- if (!utask || utask->state == UTASK_BP_HIT)
- handle_swbp(regs);
- else
+ if (utask && utask->active_uprobe)
handle_singlestep(utask, regs);
+ else
+ handle_swbp(regs);
}
/*
*/
int uprobe_pre_sstep_notifier(struct pt_regs *regs)
{
- struct uprobe_task *utask;
-
if (!current->mm || !test_bit(MMF_HAS_UPROBES, ¤t->mm->flags))
return 0;
- utask = current->utask;
- if (utask)
- utask->state = UTASK_BP_HIT;
-
set_thread_flag(TIF_UPROBE);
-
return 1;
}
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
- int lock_taken, ret, ownerdied = 0;
+ int lock_taken, ret, force_take = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
newval = curval | FUTEX_WAITERS;
/*
- * There are two cases, where a futex might have no owner (the
- * owner TID is 0): OWNER_DIED. We take over the futex in this
- * case. We also do an unconditional take over, when the owner
- * of the futex died.
- *
- * This is safe as we are protected by the hash bucket lock !
+ * Should we force take the futex? See below.
*/
- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
- /* Keep the OWNER_DIED bit */
+ if (unlikely(force_take)) {
+ /*
+ * Keep the OWNER_DIED and the WAITERS bit and set the
+ * new TID value.
+ */
newval = (curval & ~FUTEX_TID_MASK) | vpid;
- ownerdied = 0;
+ force_take = 0;
lock_taken = 1;
}
goto retry;
/*
- * We took the lock due to owner died take over.
+ * We took the lock due to forced take over.
*/
if (unlikely(lock_taken))
return 1;
switch (ret) {
case -ESRCH:
/*
- * No owner found for this futex. Check if the
- * OWNER_DIED bit is set to figure out whether
- * this is a robust futex or not.
+ * We failed to find an owner for this
+ * futex. So we have no pi_state to block
+ * on. This can happen in two cases:
+ *
+ * 1) The owner died
+ * 2) A stale FUTEX_WAITERS bit
+ *
+ * Re-read the futex value.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
- * We simply start over in case of a robust
- * futex. The code above will take the futex
- * and return happy.
+ * If the owner died or we have a stale
+ * WAITERS bit the owner TID in the user space
+ * futex is 0.
*/
- if (curval & FUTEX_OWNER_DIED) {
- ownerdied = 1;
+ if (!(curval & FUTEX_TID_MASK)) {
+ force_take = 1;
goto retry;
}
default:
{
struct task_struct *p = q->task;
+ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
+ return;
+
/*
* We set q->lock_ptr = NULL _before_ we wake up the task. If
* a non-futex wake up happens on another CPU then the task
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
+ if (this->pi_state || this->rt_waiter) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
wake_futex(this);
if (++ret >= nr_wake)
break;
op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) {
+ if (this->pi_state || this->rt_waiter) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
wake_futex(this);
if (++op_ret >= nr_wake2)
break;
ret += op_ret;
}
+out_unlock:
double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(&key2);
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
+ *
+ * We should never be requeueing a futex_q with a pi_state,
+ * which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
- (!requeue_pi && this->rt_waiter)) {
+ (!requeue_pi && this->rt_waiter) ||
+ this->pi_state) {
ret = -EINVAL;
break;
}
src = (void *)info->hdr + symsect->sh_offset;
nsrc = symsect->sh_size / sizeof(*src);
+ /* strtab always starts with a nul, so offset 0 is the empty string. */
+ strtab_size = 1;
+
/* Compute total space required for the core symbols' strtab. */
- for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src)
- if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
- strtab_size += strlen(&info->strtab[src->st_name]) + 1;
+ for (ndst = i = 0; i < nsrc; i++) {
+ if (i == 0 ||
+ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+ strtab_size += strlen(&info->strtab[src[i].st_name])+1;
ndst++;
}
+ }
/* Append room for core symbols at end of core part. */
info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
mod->core_symtab = dst = mod->module_core + info->symoffs;
mod->core_strtab = s = mod->module_core + info->stroffs;
src = mod->symtab;
- *dst = *src;
*s++ = 0;
- for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
- if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
- continue;
-
- dst[ndst] = *src;
- dst[ndst++].st_name = s - mod->core_strtab;
- s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
+ for (ndst = i = 0; i < mod->num_symtab; i++) {
+ if (i == 0 ||
+ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+ dst[ndst] = src[i];
+ dst[ndst++].st_name = s - mod->core_strtab;
+ s += strlcpy(s, &mod->strtab[src[i].st_name],
+ KSYM_NAME_LEN) + 1;
+ }
}
mod->core_num_syms = ndst;
}
size_t modlen = *_modlen, sig_len;
int ret;
- pr_devel("==>%s(,%lu)\n", __func__, modlen);
+ pr_devel("==>%s(,%zu)\n", __func__, modlen);
if (modlen <= sizeof(ms))
return -EBADMSG;
return NULL;
}
+/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
+#define MAX_PID_NS_LEVEL 32
+
static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
{
struct pid_namespace *ns;
unsigned int level = parent_pid_ns->level + 1;
- int i, err = -ENOMEM;
+ int i;
+ int err;
+
+ if (level > MAX_PID_NS_LEVEL) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = -ENOMEM;
ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
if (ns == NULL)
goto out;
put_online_cpus();
} else {
+ /* Make sure this CPU has been intitialized */
+ if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
+ goto out;
+
cpu_buffer = buffer->buffers[cpu_id];
if (nr_pages == cpu_buffer->nr_pages)
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
-static unsigned long get_sample_period(void)
+static u64 get_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
* and hard thresholds) to increment before the
* hardlockup detector generates a warning
*/
- return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
+ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
}
/* Commands for resetting the watchdog */
set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
local_irq_restore(flags);
- return true;
+ return ret;
}
EXPORT_SYMBOL(cancel_delayed_work);
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) +
- (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
+ BITS_TO_LONGS(nbits) * sizeof(long);
chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL))
************** MIPS *****************
***************************************/
#if defined(__mips__) && W_TYPE_SIZE == 32
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ UDItype __ll = (UDItype)(u) * (v); \
+ w1 = __ll >> 32; \
+ w0 = __ll; \
+} while (0)
+#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("multu %2,%3" \
: "=l" ((USItype)(w0)), \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
+#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
+ __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
+ w1 = __ll >> 64; \
+ w0 = __ll; \
+} while (0)
+#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("dmultu %2,%3" \
: "=l" ((UDItype)(w0)), \
int order = ilog2(BITS_PER_LONG);
__free_pages_bootmem(pfn_to_page(start), order);
- fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
- start, start + BITS_PER_LONG);
count += BITS_PER_LONG;
start += BITS_PER_LONG;
} else {
if (vec & 1) {
page = pfn_to_page(start + off);
__free_pages_bootmem(page, 0);
- fixup_zone_present_pages(
- page_to_nid(page),
- start + off, start + off + 1);
count++;
}
vec >>= 1;
pages = bdata->node_low_pfn - bdata->node_min_pfn;
pages = bootmem_bootmap_pages(pages);
count += pages;
- while (pages--) {
- fixup_zone_present_pages(page_to_nid(page),
- page_to_pfn(page), page_to_pfn(page) + 1);
+ while (pages--)
__free_pages_bootmem(page++, 0);
- }
bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
{
unsigned long addr = (unsigned long)vaddr;
- if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+ if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
return pte_page(pkmap_page_table[i]);
}
return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
}
+void __init_memblock memblock_trim_memory(phys_addr_t align)
+{
+ int i;
+ phys_addr_t start, end, orig_start, orig_end;
+ struct memblock_type *mem = &memblock.memory;
+
+ for (i = 0; i < mem->cnt; i++) {
+ orig_start = mem->regions[i].base;
+ orig_end = mem->regions[i].base + mem->regions[i].size;
+ start = round_up(orig_start, align);
+ end = round_down(orig_end, align);
+
+ if (start == orig_start && end == orig_end)
+ continue;
+
+ if (start < end) {
+ mem->regions[i].base = start;
+ mem->regions[i].size = end - start;
+ } else {
+ memblock_remove_region(mem, i);
+ i--;
+ }
+ }
+}
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
{
struct mem_cgroup *memcg)
{
struct mem_cgroup_per_zone *mz;
+ struct lruvec *lruvec;
- if (mem_cgroup_disabled())
- return &zone->lruvec;
+ if (mem_cgroup_disabled()) {
+ lruvec = &zone->lruvec;
+ goto out;
+ }
mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
- return &mz->lruvec;
+ lruvec = &mz->lruvec;
+out:
+ /*
+ * Since a node can be onlined after the mem_cgroup was created,
+ * we have to be prepared to initialize lruvec->zone here;
+ * and if offlined then reonlined, we need to reinitialize it.
+ */
+ if (unlikely(lruvec->zone != zone))
+ lruvec->zone = zone;
+ return lruvec;
}
/*
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc;
+ struct lruvec *lruvec;
- if (mem_cgroup_disabled())
- return &zone->lruvec;
+ if (mem_cgroup_disabled()) {
+ lruvec = &zone->lruvec;
+ goto out;
+ }
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
- return &mz->lruvec;
+ lruvec = &mz->lruvec;
+out:
+ /*
+ * Since a node can be onlined after the mem_cgroup was created,
+ * we have to be prepared to initialize lruvec->zone here;
+ * and if offlined then reonlined, we need to reinitialize it.
+ */
+ if (unlikely(lruvec->zone != zone))
+ lruvec->zone = zone;
+ return lruvec;
}
/**
static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
{
u64 limit;
- u64 memsw;
limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
- limit += total_swap_pages << PAGE_SHIFT;
- memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
/*
- * If memsw is finite and limits the amount of swap space available
- * to this memcg, return that limit.
+ * Do not consider swap space if we cannot swap due to swappiness
*/
- return min(limit, memsw);
+ if (mem_cgroup_swappiness(memcg)) {
+ u64 memsw;
+
+ limit += total_swap_pages << PAGE_SHIFT;
+ memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+
+ /*
+ * If memsw is finite and limits the amount of swap space
+ * available to this memcg, return that limit.
+ */
+ limit = min(limit, memsw);
+ }
+
+ return limit;
}
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
int node, int zid, enum lru_list lru)
{
- struct mem_cgroup_per_zone *mz;
+ struct lruvec *lruvec;
unsigned long flags, loop;
struct list_head *list;
struct page *busy;
struct zone *zone;
zone = &NODE_DATA(node)->node_zones[zid];
- mz = mem_cgroup_zoneinfo(memcg, node, zid);
- list = &mz->lruvec.lists[lru];
+ lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+ list = &lruvec->lists[lru];
- loop = mz->lru_size[lru];
+ loop = mem_cgroup_get_lru_size(lruvec, lru);
/* give some margin against EBUSY etc...*/
loop += 256;
busy = NULL;
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
- lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
+ lruvec_init(&mz->lruvec);
mz->usage_in_excess = 0;
mz->on_tree = false;
mz->memcg = memcg;
{
int ret;
unsigned long pfn = page_to_pfn(page);
+ struct page *hpage = compound_trans_head(page);
if (PageHuge(page))
return soft_offline_huge_page(page, flags);
+ if (PageTransHuge(hpage)) {
+ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+ pr_info("soft offline: %#lx: failed to split THP\n",
+ pfn);
+ return -EBUSY;
+ }
+ }
ret = get_any_page(page, pfn, flags);
if (ret < 0)
int ret = 0;
int page_mkwrite = 0;
struct page *dirty_page = NULL;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
- bool mmun_called = false; /* For mmu_notifiers */
+ unsigned long mmun_start = 0; /* For mmu_notifiers */
+ unsigned long mmun_end = 0; /* For mmu_notifiers */
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
goto oom_free_new;
mmun_start = address & PAGE_MASK;
- mmun_end = (address & PAGE_MASK) + PAGE_SIZE;
- mmun_called = true;
+ mmun_end = mmun_start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
page_cache_release(new_page);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (mmun_called)
+ if (mmun_end > mmun_start)
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
void __ref put_page_bootmem(struct page *page)
{
unsigned long type;
- struct zone *zone;
type = (unsigned long) page->lru.next;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
__free_pages_bootmem(page, 0);
-
- zone = page_zone(page);
- zone_span_writelock(zone);
- zone->present_pages++;
- zone_span_writeunlock(zone);
- totalram_pages++;
}
}
struct vm_area_struct *vma = mm->mmap;
while (vma) {
struct anon_vma_chain *avc;
+ vma_lock_anon_vma(vma);
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
+ vma_unlock_anon_vma(vma);
vma = vma->vm_next;
i++;
}
BUG_ON(atomic_read(&mm->mm_users) <= 0);
/*
- * Verify that mmu_notifier_init() already run and the global srcu is
- * initialized.
- */
+ * Verify that mmu_notifier_init() already run and the global srcu is
+ * initialized.
+ */
BUG_ON(!srcu.per_cpu_ref);
+ ret = -ENOMEM;
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ if (unlikely(!mmu_notifier_mm))
+ goto out;
+
if (take_mmap_sem)
down_write(&mm->mmap_sem);
ret = mm_take_all_locks(mm);
if (unlikely(ret))
- goto out;
+ goto out_clean;
if (!mm_has_notifiers(mm)) {
- mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
- GFP_KERNEL);
- if (unlikely(!mmu_notifier_mm)) {
- ret = -ENOMEM;
- goto out_of_mem;
- }
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock);
mm->mmu_notifier_mm = mmu_notifier_mm;
+ mmu_notifier_mm = NULL;
}
atomic_inc(&mm->mm_count);
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
-out_of_mem:
mm_drop_all_locks(mm);
-out:
+out_clean:
if (take_mmap_sem)
up_write(&mm->mmap_sem);
-
+ kfree(mmu_notifier_mm);
+out:
BUG_ON(atomic_read(&mm->mm_users) <= 0);
return ret;
}
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+void lruvec_init(struct lruvec *lruvec)
{
enum lru_list lru;
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
-
-#ifdef CONFIG_MEMCG
- lruvec->zone = zone;
-#endif
}
return 0;
__free_pages_memory(start_pfn, end_pfn);
- fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT),
- start_pfn, end_pfn);
return end_pfn - start_pfn;
}
phys_addr_t start, end, size;
u64 i;
- reset_zone_present_pages();
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
count += __free_memory_core(start, end);
mt = get_pageblock_migratetype(page);
if (unlikely(mt != MIGRATE_ISOLATE))
- __mod_zone_freepage_state(zone, -(1UL << order), mt);
+ __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
if (alloc_order != order)
expand(zone, page, alloc_order, order,
}
}
- return 1UL << order;
+ return 1UL << alloc_order;
}
/*
int i;
for_each_online_node(i)
- if (node_distance(nid, i) <= RECLAIM_DISTANCE) {
+ if (node_distance(nid, i) <= RECLAIM_DISTANCE)
node_set(i, NODE_DATA(nid)->reclaim_nodes);
+ else
zone_reclaim_mode = 1;
- }
}
#else /* CONFIG_NUMA */
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
}
+/* Returns true if the allocation is likely for THP */
+static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
+{
+ if (order == pageblock_order &&
+ (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
+ return true;
+ return false;
+}
+
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
goto nopage;
restart:
- wake_all_kswapd(order, zonelist, high_zoneidx,
+ /* The decision whether to wake kswapd for THP is made later */
+ if (!is_thp_alloc(gfp_mask, order))
+ wake_all_kswapd(order, zonelist, high_zoneidx,
zone_idx(preferred_zone));
/*
goto got_pg;
sync_migration = true;
- /*
- * If compaction is deferred for high-order allocations, it is because
- * sync compaction recently failed. In this is the case and the caller
- * requested a movable allocation that does not heavily disrupt the
- * system then fail the allocation instead of entering direct reclaim.
- */
- if ((deferred_compaction || contended_compaction) &&
- (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
- goto nopage;
+ if (is_thp_alloc(gfp_mask, order)) {
+ /*
+ * If compaction is deferred for high-order allocations, it is
+ * because sync compaction recently failed. If this is the case
+ * and the caller requested a movable allocation that does not
+ * heavily disrupt the system then fail the allocation instead
+ * of entering direct reclaim.
+ */
+ if (deferred_compaction || contended_compaction)
+ goto nopage;
+
+ /* If process is willing to reclaim/compact then wake kswapd */
+ wake_all_kswapd(order, zonelist, high_zoneidx,
+ zone_idx(preferred_zone));
+ }
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
zone->zone_pgdat = pgdat;
zone_pcp_init(zone);
- lruvec_init(&zone->lruvec, zone);
+ lruvec_init(&zone->lruvec);
if (!size)
continue;
ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
if (ret)
- goto done;
+ return ret;
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)
dump_page_flags(page->flags);
mem_cgroup_print_bad_page(page);
}
-
-/* reset zone->present_pages */
-void reset_zone_present_pages(void)
-{
- struct zone *z;
- int i, nid;
-
- for_each_node_state(nid, N_HIGH_MEMORY) {
- for (i = 0; i < MAX_NR_ZONES; i++) {
- z = NODE_DATA(nid)->node_zones + i;
- z->present_pages = 0;
- }
- }
-}
-
-/* calculate zone's present pages in buddy system */
-void fixup_zone_present_pages(int nid, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- struct zone *z;
- unsigned long zone_start_pfn, zone_end_pfn;
- int i;
-
- for (i = 0; i < MAX_NR_ZONES; i++) {
- z = NODE_DATA(nid)->node_zones + i;
- zone_start_pfn = z->zone_start_pfn;
- zone_end_pfn = zone_start_pfn + z->spanned_pages;
-
- /* if the two regions intersect */
- if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn))
- z->present_pages += min(end_pfn, zone_end_pfn) -
- max(start_pfn, zone_start_pfn);
- }
-}
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
+#include <linux/backing-dev.h>
#include <asm/tlbflush.h>
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
- if (mapping) {
+ if (mapping)
ret = page_mkclean_file(mapping, page);
- if (page_test_and_clear_dirty(page_to_pfn(page), 1))
- ret = 1;
- }
}
return ret;
*/
void page_remove_rmap(struct page *page)
{
+ struct address_space *mapping = page_mapping(page);
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
* this if the page is anon, so about to be freed; but perhaps
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
+ *
+ * And we can skip it on file pages, so long as the filesystem
+ * participates in dirty tracking; but need to catch shm and tmpfs
+ * and ramfs pages which have been modified since creation by read
+ * fault.
+ *
+ * Note that mapping must be decided above, before decrementing
+ * mapcount (which luckily provides a barrier): once page is unmapped,
+ * it could be truncated and page->mapping reset to NULL at any moment.
+ * Note also that we are relying on page_mapping(page) to set mapping
+ * to &swapper_space when PageSwapCache(page).
*/
- if ((!anon || PageSwapCache(page)) &&
+ if (mapping && !mapping_cap_account_dirty(mapping) &&
page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
/*
kfree(info->symlink);
simple_xattrs_free(&info->xattrs);
- BUG_ON(inode->i_blocks);
+ WARN_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
clear_inode(inode);
}
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, swp_to_radix_entry(swap));
- /* We already confirmed swap, and make no allocation */
- VM_BUG_ON(error);
+ /*
+ * We already confirmed swap under page lock, and make
+ * no memory allocation here, so usually no possibility
+ * of error; but free_swap_and_cache() only trylocks a
+ * page, so it is just possible that the entry has been
+ * truncated or holepunched since swap was confirmed.
+ * shmem_undo_range() will have done some of the
+ * unaccounting, now delete_from_swap_cache() will do
+ * the rest (including mem_cgroup_uncharge_swapcache).
+ * Reset swap.val? No, leave it so "failed" goes back to
+ * "repeat": reading a hole and writing should succeed.
+ */
+ if (error)
+ delete_from_swap_cache(page);
}
if (error)
goto failed;
{
return; /* XXX: Not implemented yet */
}
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
{
}
#else
get_order(sizeof(struct page) * nr_pages));
}
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
{
unsigned long maps_section_nr, removing_section_nr, i;
unsigned long magic;
+ struct page *page = virt_to_page(memmap);
for (i = 0; i < nr_pages; i++, page++) {
magic = (unsigned long) page->lru.next;
*/
if (memmap) {
- struct page *memmap_page;
- memmap_page = virt_to_page(memmap);
-
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT;
- free_map_bootmem(memmap_page, nr_pages);
+ free_map_bootmem(memmap, nr_pages);
}
}
BUG_ON(!current->mm);
pathname = getname(specialfile);
- err = PTR_ERR(pathname);
if (IS_ERR(pathname))
- goto out;
+ return PTR_ERR(pathname);
victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
err = PTR_ERR(victim);
out_dput:
filp_close(victim, NULL);
out:
+ putname(pathname);
return err;
}
return false;
}
-#ifdef CONFIG_COMPACTION
-/*
- * If compaction is deferred for sc->order then scale the number of pages
- * reclaimed based on the number of consecutive allocation failures
- */
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- struct zone *zone = lruvec_zone(lruvec);
-
- if (zone->compact_order_failed <= sc->order)
- pages_for_compaction <<= zone->compact_defer_shift;
- return pages_for_compaction;
-}
-#else
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- return pages_for_compaction;
-}
-#endif
-
/*
* Reclaim/compaction is used for high-order allocation requests. It reclaims
* order-0 pages before compacting the zone. should_continue_reclaim() returns
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
-
- pages_for_compaction = scale_for_compaction(pages_for_compaction,
- lruvec, sc);
inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (nr_swap_pages > 0)
inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
* Throttle direct reclaimers if backing storage is backed by the network
* and the PFMEMALLOC reserve for the preferred node is getting dangerously
* depleted. kswapd will continue to make progress and wake the processes
- * when the low watermark is reached
+ * when the low watermark is reached.
+ *
+ * Returns true if a fatal signal was delivered during throttling. If this
+ * happens, the page allocator should not consider triggering the OOM killer.
*/
-static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
+static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
nodemask_t *nodemask)
{
struct zone *zone;
* processes to block on log_wait_commit().
*/
if (current->flags & PF_KTHREAD)
- return;
+ goto out;
+
+ /*
+ * If a fatal signal is pending, this process should not throttle.
+ * It should return quickly so it can exit and free its memory
+ */
+ if (fatal_signal_pending(current))
+ goto out;
/* Check if the pfmemalloc reserves are ok */
first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
pgdat = zone->zone_pgdat;
if (pfmemalloc_watermark_ok(pgdat))
- return;
+ goto out;
/* Account for the throttling */
count_vm_event(PGSCAN_DIRECT_THROTTLE);
if (!(gfp_mask & __GFP_FS)) {
wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
pfmemalloc_watermark_ok(pgdat), HZ);
- return;
+
+ goto check_pending;
}
/* Throttle until kswapd wakes the process */
wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
pfmemalloc_watermark_ok(pgdat));
+
+check_pending:
+ if (fatal_signal_pending(current))
+ return true;
+
+out:
+ return false;
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.gfp_mask = sc.gfp_mask,
};
- throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
-
/*
- * Do not enter reclaim if fatal signal is pending. 1 is returned so
- * that the page allocator does not consider triggering OOM
+ * Do not enter reclaim if fatal signal was delivered while throttled.
+ * 1 is returned so that the page allocator does not OOM kill at this
+ * point.
*/
- if (fatal_signal_pending(current))
+ if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
return 1;
trace_mm_vmscan_direct_reclaim_begin(order,
} while (memcg);
}
+static bool zone_balanced(struct zone *zone, int order,
+ unsigned long balance_gap, int classzone_idx)
+{
+ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+ balance_gap, classzone_idx, 0))
+ return false;
+
+ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+ return false;
+
+ return true;
+}
+
/*
* pgdat_balanced is used when checking if a node is balanced for high-order
* allocations. Only zones that meet watermarks and are in a zone allowed
continue;
}
- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
- i, 0))
+ if (!zone_balanced(zone, order, 0, i))
all_zones_ok = false;
else
balanced += zone->present_pages;
break;
}
- if (!zone_watermark_ok_safe(zone, order,
- high_wmark_pages(zone), 0, 0)) {
+ if (!zone_balanced(zone, order, 0, 0)) {
end_zone = i;
break;
} else {
testorder = 0;
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
- !zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone) + balance_gap,
- end_zone, 0)) {
+ !zone_balanced(zone, testorder,
+ balance_gap, end_zone)) {
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
continue;
}
- if (!zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone), end_zone, 0)) {
+ if (!zone_balanced(zone, testorder, 0, end_zone)) {
all_zones_ok = 0;
/*
* We are still under min water mark. This
&balanced_classzone_idx);
}
}
+
+ current->reclaim_state = NULL;
return 0;
}
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
- return NOTIFY_BAD;
+ if (vlan_uses_dev(dev))
+ return NOTIFY_BAD;
+ break;
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
uint16_t crc;
unsigned long entrytime;
+ spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
+
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
/**
* batadv_bla_check_bcast_duplist
* @bat_priv: the bat priv with all the soft interface information
- * @bcast_packet: originator mac address
- * @hdr_size: maximum length of the frame
+ * @bcast_packet: encapsulated broadcast frame plus batman header
+ * @bcast_packet_len: length of encapsulated broadcast frame plus batman header
*
* check if it is on our broadcast list. Another gateway might
* have sent the same packet because it is connected to the same backbone,
*/
int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct batadv_bcast_packet *bcast_packet,
- int hdr_size)
+ int bcast_packet_len)
{
- int i, length, curr;
+ int i, length, curr, ret = 0;
uint8_t *content;
uint16_t crc;
struct batadv_bcast_duplist_entry *entry;
- length = hdr_size - sizeof(*bcast_packet);
+ length = bcast_packet_len - sizeof(*bcast_packet);
content = (uint8_t *)bcast_packet;
content += sizeof(*bcast_packet);
/* calculate the crc ... */
crc = crc16(0, content, length);
+ spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
+
for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
curr = (bat_priv->bla.bcast_duplist_curr + i);
curr %= BATADV_DUPLIST_SIZE;
/* this entry seems to match: same crc, not too old,
* and from another gw. therefore return 1 to forbid it.
*/
- return 1;
+ ret = 1;
+ goto out;
}
- /* not found, add a new entry (overwrite the oldest entry) */
+ /* not found, add a new entry (overwrite the oldest entry)
+ * and allow it, its the first occurence.
+ */
curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bla.bcast_duplist[curr];
memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
bat_priv->bla.bcast_duplist_curr = curr;
- /* allow it, its the first occurence. */
- return 0;
+out:
+ spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
+
+ return ret;
}
spin_unlock_bh(&orig_node->bcast_seqno_lock);
+ /* keep skb linear for crc calculation */
+ if (skb_linearize(skb) < 0)
+ goto out;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+
/* check whether this has been sent by another originator before */
- if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+ if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len))
goto out;
/* rebroadcast packet */
soft_iface->last_rx = jiffies;
+ /* Let the bridge loop avoidance check the packet. If will
+ * not handle it, we can safely push it up.
+ */
+ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
+ goto out;
+
if (orig_node)
batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
ethhdr->h_source);
if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
- /* Let the bridge loop avoidance check the packet. If will
- * not handle it, we can safely push it up.
- */
- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
- goto out;
-
netif_rx(skb);
goto out;
*/
tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
+ /* the change can carry possible "attribute" flags like the
+ * TT_CLIENT_WIFI, therefore they have to be copied in the
+ * client entry
+ */
+ tt_global_entry->common.flags |= flags;
+
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
* delete + roaming change for this originator.
memcpy(tt_change->addr, tt_common_entry->addr,
ETH_ALEN);
- tt_change->flags = BATADV_NO_FLAGS;
+ tt_change->flags = tt_common_entry->flags;
tt_count++;
tt_change++;
{
bool ret = false;
+ /* if the originator is a backbone node (meaning it belongs to the same
+ * LAN of this node) the temporary client must not be added because to
+ * reach such destination the node must use the LAN instead of the mesh
+ */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+ goto out;
+
if (!batadv_tt_global_add(bat_priv, orig_node, addr,
BATADV_TT_CLIENT_TEMP,
atomic_read(&orig_node->last_ttvn)))
struct batadv_hashtable *backbone_hash;
struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
int bcast_duplist_curr;
+ /* protects bcast_duplist and bcast_duplist_curr */
+ spinlock_t bcast_duplist_lock;
struct batadv_bla_claim_dst claim_dest;
struct delayed_work work;
};
if (hdev->dev_type != HCI_AMP)
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
- schedule_work(&hdev->power_on);
-
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
+ schedule_work(&hdev->power_on);
+
return id;
err_wqueue:
struct hci_dev *d;
size_t rp_len;
u16 count;
- int i, err;
+ int err;
BT_DBG("sock %p", sk);
return -ENOMEM;
}
- rp->num_controllers = cpu_to_le16(count);
-
- i = 0;
+ count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
if (!mgmt_valid_hdev(d))
continue;
- rp->index[i++] = cpu_to_le16(d->id);
+ rp->index[count++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
+ rp->num_controllers = cpu_to_le16(count);
+ rp_len = sizeof(*rp) + (2 * count);
+
read_unlock(&hci_dev_list_lock);
err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
continue;
list_del(&match->list);
+ kfree(match);
found++;
}
#define SMP_TIMEOUT msecs_to_jiffies(30000)
+#define AUTH_REQ_MASK 0x07
+
static inline void swap128(u8 src[16], u8 dst[16])
{
int i;
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
req->init_key_dist = 0;
req->resp_key_dist = dist_keys;
- req->auth_req = authreq;
+ req->auth_req = (authreq & AUTH_REQ_MASK);
return;
}
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
rsp->init_key_dist = 0;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
- rsp->auth_req = authreq;
+ rsp->auth_req = (authreq & AUTH_REQ_MASK);
}
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
- hcon->dst_type, reason);
+ hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
cancel_delayed_work_sync(&conn->security_timer);
op->sk = sk;
op->ifindex = ifindex;
+ /* ifindex for timeout events w/o previous frame reception */
+ op->rx_ifindex = ifindex;
+
/* initialize uninitialized (kzalloc) structure */
hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->timer.function = bcm_rx_timeout_handler;
mutex_unlock(&con->mutex);
return;
} else {
- con->ops->put(con);
dout("con_work %p FAILED to back off %lu\n", con,
con->delay);
+ set_bit(CON_FLAG_BACKOFF, &con->flags);
}
+ goto done;
}
if (con->state == CON_STATE_STANDBY) {
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
if (con->state != CON_STATE_OPEN) {
- ceph_msg_put(msg);
+ if (msg)
+ ceph_msg_put(msg);
return -EAGAIN;
}
con->in_msg = msg;
static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
{
- if (ptype->af_packet_priv == NULL)
+ if (!ptype->af_packet_priv || !skb->sk)
return false;
if (ptype->id_match)
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
- rflow->last_qtail)) >= 0))
+ rflow->last_qtail)) >= 0)) {
+ tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+ }
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
*/
ha = list_first_entry(&dev->dev_addrs.list,
struct netdev_hw_addr, list);
- if (ha->addr == dev->dev_addr && ha->refcount == 1)
+ if (!memcmp(ha->addr, addr, dev->addr_len) &&
+ ha->type == addr_type && ha->refcount == 1)
return -ENOENT;
err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
.name = "statistics",
.attrs = netstat_attrs,
};
+
+#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+static struct attribute *wireless_attrs[] = {
+ NULL
+};
+
+static struct attribute_group wireless_group = {
+ .name = "wireless",
+ .attrs = wireless_attrs,
+};
+#endif
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_RPS
groups++;
*groups++ = &netstat_group;
+
+#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+ if (net->ieee80211_ptr)
+ *groups++ = &wireless_group;
+#if IS_ENABLED(CONFIG_WIRELESS_EXT)
+ else if (net->wireless_handlers)
+ *groups++ = &wireless_group;
+#endif
+#endif
#endif /* CONFIG_SYSFS */
error = device_add(dev);
goto skip;
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
- portid, seq, 0, NTF_SELF);
+ portid, seq,
+ RTM_NEWNEIGH, NTF_SELF);
if (err < 0)
return err;
skip:
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
- if (head_stolen)
+ if (head_stolen) {
+ skb_release_head_state(skb);
kmem_cache_free(skbuff_head_cache, skb);
- else
+ } else {
__kfree_skb(skb);
+ }
}
EXPORT_SYMBOL(kfree_skb_partial);
struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
- inet_putpeer(peer);
+ if (peer)
+ inet_putpeer(peer);
}
out:
return rc;
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
const struct inet_diag_handler *handler;
+ int err = 0;
handler = inet_diag_lock_handler(r->sdiag_protocol);
if (!IS_ERR(handler))
handler->dump(skb, cb, r, bc);
+ else
+ err = PTR_ERR(handler);
inet_diag_unlock_handler(handler);
- return skb->len;
+ return err ? : skb->len;
}
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
- if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
- (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
- (1<<IP_RETOPTS) | (1<<IP_TOS) |
- (1<<IP_TTL) | (1<<IP_HDRINCL) |
- (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
- (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
- (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
- optname == IP_UNICAST_IF ||
- optname == IP_MULTICAST_TTL ||
- optname == IP_MULTICAST_ALL ||
- optname == IP_MULTICAST_LOOP ||
- optname == IP_RECVORIGDSTADDR) {
+ switch (optname) {
+ case IP_PKTINFO:
+ case IP_RECVTTL:
+ case IP_RECVOPTS:
+ case IP_RECVTOS:
+ case IP_RETOPTS:
+ case IP_TOS:
+ case IP_TTL:
+ case IP_HDRINCL:
+ case IP_MTU_DISCOVER:
+ case IP_RECVERR:
+ case IP_ROUTER_ALERT:
+ case IP_FREEBIND:
+ case IP_PASSSEC:
+ case IP_TRANSPARENT:
+ case IP_MINTTL:
+ case IP_NODEFRAG:
+ case IP_UNICAST_IF:
+ case IP_MULTICAST_TTL:
+ case IP_MULTICAST_ALL:
+ case IP_MULTICAST_LOOP:
+ case IP_RECVORIGDSTADDR:
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ return -1;
+
tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
+ skb->mark = 0;
+ secpath_reset(skb);
skb->dev = tunnel->dev;
return 1;
}
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
+ /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ if (v != RT_TABLE_DEFAULT && v >= 1000000000)
+ return -EINVAL;
+
rtnl_lock();
ret = 0;
if (sk == rtnl_dereference(mrt->mroute_sk)) {
if ((ct->tuplehash[dir].tuple.src.u3.ip !=
ct->tuplehash[!dir].tuple.dst.u3.ip) ||
- (ct->tuplehash[dir].tuple.src.u.all !=
+ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
+ ct->tuplehash[dir].tuple.src.u.all !=
ct->tuplehash[!dir].tuple.dst.u.all))
if (nf_xfrm_me_harder(skb, AF_INET) < 0)
ret = NF_DROP;
}
#ifdef CONFIG_XFRM
else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
ct->tuplehash[dir].tuple.dst.u.all !=
ct->tuplehash[!dir].tuple.src.u.all)
if (nf_xfrm_me_harder(skb, AF_INET) < 0)
spin_lock_bh(&fnhe_lock);
if (daddr == fnhe->fnhe_daddr) {
- struct rtable *orig;
-
+ struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
+ if (orig && rt_is_expired(orig)) {
+ fnhe->fnhe_gw = 0;
+ fnhe->fnhe_pmtu = 0;
+ fnhe->fnhe_expires = 0;
+ }
if (fnhe->fnhe_pmtu) {
unsigned long expires = fnhe->fnhe_expires;
unsigned long diff = expires - jiffies;
} else if (!rt->rt_gateway)
rt->rt_gateway = daddr;
- orig = rcu_dereference(fnhe->fnhe_rth);
rcu_assign_pointer(fnhe->fnhe_rth, rt);
if (orig)
rt_free(orig);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
+ do_cache = true;
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
fl4->flowi4_proto))
flags &= ~RTCF_LOCAL;
+ else
+ do_cache = false;
/* If multicast route do not exist use
* default one, but do not gateway in this case.
* Yes, it is hack.
}
fnhe = NULL;
- do_cache = fi != NULL;
- if (fi) {
+ do_cache &= fi != NULL;
+ if (do_cache) {
struct rtable __rcu **prth;
struct fib_nh *nh = &FIB_RES_NH(*res);
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
- xfrm4_init(ip_rt_max_size);
+ xfrm4_init();
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
!tp->urg_data ||
before(tp->urg_seq, tp->copied_seq) ||
!before(tp->urg_seq, tp->rcv_nxt)) {
- struct sk_buff *skb;
answ = tp->rcv_nxt - tp->copied_seq;
- /* Subtract 1, if FIN is in queue. */
- skb = skb_peek_tail(&sk->sk_receive_queue);
- if (answ && skb)
- answ -= tcp_hdr(skb)->fin;
+ /* Subtract 1, if FIN was received */
+ if (answ && sock_flag(sk, SOCK_DONE))
+ answ--;
} else
answ = tp->urg_seq - tp->copied_seq;
release_sock(sk);
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied && likely(!tp->repair))
+ if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
}
out:
- if (copied && likely(!tp->repair))
+ if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
return copied + copied_syn;
info->tcpi_options |= TCPI_OPT_ECN;
if (tp->ecn_flags & TCP_ECN_SEEN)
info->tcpi_options |= TCPI_OPT_ECN_SEEN;
+ if (tp->syn_data_acked)
+ info->tcpi_options |= TCPI_OPT_SYN_DATA;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
.tcpv_rttcnt = ca->cnt_rtt,
.tcpv_minrtt = ca->base_rtt,
};
- u64 t = ca->sum_rtt;
- do_div(t, ca->cnt_rtt);
- info.tcpv_rtt = t;
+ if (info.tcpv_rttcnt > 0) {
+ u64 t = ca->sum_rtt;
+ do_div(t, info.tcpv_rttcnt);
+ info.tcpv_rtt = t;
+ }
nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
}
struct tcphdr *th;
bool fragstolen;
+ if (size == 0)
+ return 0;
+
skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
if (!skb)
goto err;
goto discard;
}
- /* ts_recent update must be made after we are sure that the packet
- * is in window.
- */
- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
/* step 3: check security and precedence [ignored] */
/* step 4: Check for a SYN
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
goto discard;
+ /* ts_recent update must be made after we are sure that the packet
+ * is in window.
+ */
+ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */
tcp_rearm_rto(sk);
return true;
}
+ tp->syn_data_acked = tp->syn_data;
return false;
}
req = tp->fastopen_rsk;
if (req != NULL) {
- BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+ WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
* ACK we have received, this would have acknowledged
* our SYNACK so stop the SYNACK timer.
*/
- if (acceptable && req != NULL) {
+ if (req != NULL) {
+ /* Return RST if ack_seq is invalid.
+ * Note that RFC793 only says to generate a
+ * DUPACK for it but for TCP Fast Open it seems
+ * better to treat this case like TCP_SYN_RECV
+ * above.
+ */
+ if (!acceptable)
+ return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
} else
goto discard;
+ /* ts_recent update must be made after we are sure that the packet
+ * is in window.
+ */
+ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
/* step 6: check the URG bit */
tcp_urg(sk, skb, th);
skb_set_owner_r(skb, child);
__skb_queue_tail(&child->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ tp->syn_data_acked = 1;
}
sk->sk_data_ready(sk, 0);
bh_unlock_sock(child);
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/hash.h>
#include <linux/tcp_metrics.h>
+#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
}
a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
if (a) {
- if (nla_len(a) != sizeof(sizeof(struct in6_addr)))
+ if (nla_len(a) != sizeof(struct in6_addr))
return -EINVAL;
addr->family = AF_INET6;
memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
- net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
+ net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!net->ipv4.tcp_metrics_hash)
+ net->ipv4.tcp_metrics_hash = vzalloc(size);
+
if (!net->ipv4.tcp_metrics_hash)
return -ENOMEM;
tm = next;
}
}
- kfree(net->ipv4.tcp_metrics_hash);
+ if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
+ vfree(net->ipv4.tcp_metrics_hash);
+ else
+ kfree(net->ipv4.tcp_metrics_hash);
}
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
+ newtp->syn_data_acked = 0;
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
+ if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
+ goto repair; /* Skip network transmission */
+
cwnd_quota = tcp_cwnd_test(tp, skb);
if (!cwnd_quota)
break;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
+repair:
/* Advance the send_head. This one is sent out.
* This call will increment packets_out.
*/
return;
}
if (tp->fastopen_rsk) {
- BUG_ON(sk->sk_state != TCP_SYN_RECV &&
- sk->sk_state != TCP_FIN_WAIT1);
+ WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
+ sk->sk_state != TCP_FIN_WAIT1);
tcp_fastopen_synack_timer(sk);
/* Before we receive ACK to our SYN-ACK don't retransmit
* anything else (e.g., data or FIN segments).
xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
}
-void __init xfrm4_init(int rt_max_size)
+void __init xfrm4_init(void)
{
- /*
- * Select a default value for the gc_thresh based on the main route
- * table hash size. It seems to me the worst case scenario is when
- * we have ipsec operating in transport mode, in which we create a
- * dst_entry per socket. The xfrm gc algorithm starts trying to remove
- * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
- * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
- * That will let us store an ipsec connection per route table entry,
- * and start cleaning when were 1/2 full
- */
- xfrm4_dst_ops.gc_thresh = rt_max_size/2;
dst_entries_init(&xfrm4_dst_ops);
xfrm4_state_init();
return NULL;
dst->ops->update_pmtu(dst, sk, NULL, mtu);
- return inet6_csk_route_socket(sk, &fl6);
+ dst = inet6_csk_route_socket(sk, &fl6);
+ return IS_ERR(dst) ? NULL : dst;
}
EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
/* IFLA_GRE_OKEY */
nla_total_size(4) +
/* IFLA_GRE_LOCAL */
- nla_total_size(4) +
+ nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_GRE_REMOTE */
- nla_total_size(4) +
+ nla_total_size(sizeof(struct in6_addr)) +
/* IFLA_GRE_TTL */
nla_total_size(1) +
/* IFLA_GRE_TOS */
nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
- nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
- nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
+ nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
+ nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
/*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
if (val < 0 || val > 255)
goto e_inval;
np->min_hopcount = val;
+ retv = 0;
break;
case IPV6_DONTFRAG:
np->dontfrag = valbool;
{
struct inet6_dev *idev;
struct inet6_ifaddr *ifa;
- struct in6_addr mcaddr;
+ struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
idev = in6_dev_get(dev);
if (!idev)
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
/*router=*/ !!idev->cnf.forwarding,
/*solicited=*/ false, /*override=*/ true,
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3) ||
- (ct->tuplehash[dir].tuple.src.u.all !=
+ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
+ ct->tuplehash[dir].tuple.src.u.all !=
ct->tuplehash[!dir].tuple.dst.u.all))
if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
ret = NF_DROP;
}
#ifdef CONFIG_XFRM
else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
ct->tuplehash[dir].tuple.dst.u.all !=
ct->tuplehash[!dir].tuple.src.u.all)
if (nf_xfrm_me_harder(skb, AF_INET6))
{ }
};
-static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
+static int nf_ct_frag6_sysctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
}
#else
-static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
+static int nf_ct_frag6_sysctl_register(struct net *net)
{
return 0;
}
};
static const u32 ip6_template_metrics[RTAX_MAX] = {
- [RTAX_HOPLIMIT - 1] = 255,
+ [RTAX_HOPLIMIT - 1] = 0,
};
static const struct rt6_info ip6_null_entry_template = {
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
if (lsap == NULL) {
IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__);
+ __irttp_close_tsap(self);
return NULL;
}
out_del_dev:
free_netdev(dev);
+ spriv->dev = NULL;
out_del_session:
l2tp_session_delete(session);
out:
else
local->probe_req_reg--;
+ if (!local->open_count)
+ break;
+
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
break;
default:
sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
sdata->u.ibss.ibss_join_req = jiffies;
- memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
+ memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
sdata->u.ibss.ssid_len = params->ssid_len;
mutex_unlock(&sdata->u.ibss.mtx);
mutex_lock(&sdata->u.ibss.mtx);
- sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
- memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
- sdata->u.ibss.ssid_len = 0;
-
active_ibss = ieee80211_sta_active_ibss(sdata);
if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
}
}
+ ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+ memset(ifibss->bssid, 0, ETH_ALEN);
+ ifibss->ssid_len = 0;
+
sta_info_flush(sdata->local, sdata);
spin_lock_bh(&ifibss->incomplete_lock);
struct net_device *dev);
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->control.vif == &sdata->vif) {
__skb_unlink(skb, &local->pending[i]);
- dev_kfree_skb_irq(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
}
}
local->hw.wiphy->cipher_suites,
sizeof(u32) * local->hw.wiphy->n_cipher_suites,
GFP_KERNEL);
- if (!suites)
- return -ENOMEM;
+ if (!suites) {
+ result = -ENOMEM;
+ goto fail_wiphy_register;
+ }
for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
u32 suite = local->hw.wiphy->cipher_suites[r];
if (suite == WLAN_CIPHER_SUITE_WEP40 ||
ht_cfreq, ht_oper->primary_chan,
cbss->channel->band);
ht_oper = NULL;
+ } else {
+ channel_type = NL80211_CHAN_HT20;
}
}
- if (ht_oper) {
- channel_type = NL80211_CHAN_HT20;
+ if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ /*
+ * cfg80211 already verified that the channel itself can
+ * be used, but it didn't check that we can do the right
+ * HT type, so do that here as well. If HT40 isn't allowed
+ * on this channel, disable 40 MHz operation.
+ */
- if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- switch (ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
+ else
channel_type = NL80211_CHAN_HT40PLUS;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
+ else
channel_type = NL80211_CHAN_HT40MINUS;
- break;
- }
+ break;
}
}
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+ bool tx = !req->local_state_change;
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- req->reason_code, true, frame_buf);
+ req->reason_code, tx, frame_buf);
} else {
drv_mgd_prepare_tx(sdata->local, sdata);
ieee80211_send_deauth_disassoc(sdata, req->bssid,
IEEE80211_STYPE_DEAUTH,
- req->reason_code, true,
+ req->reason_code, tx,
frame_buf);
}
if (ieee80211_is_action(hdr->frame_control)) {
u8 category;
+
+ /* make sure category field is present */
+ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return RX_DROP_MONITOR;
+
mgmt = (struct ieee80211_mgmt *)hdr;
category = mgmt->u.action.category;
if (category != WLAN_CATEGORY_MESH_ACTION &&
*/
if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
ieee80211_is_data_present(hdr->frame_control)) {
- u16 ethertype;
- u8 *payload;
-
- payload = rx->skb->data +
- ieee80211_hdrlen(hdr->frame_control);
- ethertype = (payload[6] << 8) | payload[7];
- if (cpu_to_be16(ethertype) ==
- rx->sdata->control_port_protocol)
+ unsigned int hdrlen;
+ __be16 ethertype;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (rx->skb->len < hdrlen + 8)
+ return RX_DROP_MONITOR;
+
+ skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2);
+ if (ethertype == rx->sdata->control_port_protocol)
return RX_CONTINUE;
}
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
+
+ if (ieee80211_is_ctl(fc))
+ return RX_CONTINUE;
+
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
- (rx->skb)->len < 24 ||
is_multicast_ether_addr(hdr->addr1))) {
/* not fragmented */
goto out;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ /* make sure fixed part of mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb, hdrlen + 6))
+ return RX_DROP_MONITOR;
+
+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ /* make sure full mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb,
+ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
+ return RX_DROP_MONITOR;
+
+ /* reload pointers */
+ hdr = (struct ieee80211_hdr *) skb->data;
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
/* frame is in RMC, don't forward */
mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
return RX_DROP_MONITOR;
- if (!ieee80211_is_data(hdr->frame_control))
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ !(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_CONTINUE;
if (!mesh_hdr->ttl)
if (is_multicast_ether_addr(hdr->addr1)) {
mpp_addr = hdr->addr3;
proxied_addr = mesh_hdr->eaddr1;
- } else {
+ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+ /* has_a4 already checked in ieee80211_rx_mesh_check */
mpp_addr = hdr->addr4;
proxied_addr = mesh_hdr->eaddr2;
+ } else {
+ return RX_DROP_MONITOR;
}
rcu_read_lock();
}
skb_set_queue_mapping(skb, q);
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- goto out;
-
if (!--mesh_hdr->ttl) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
- return RX_DROP_MONITOR;
+ goto out;
}
if (!ifmsh->mshcfg.dot11MeshForwarding)
}
break;
case WLAN_CATEGORY_SELF_PROTECTED:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.self_prot.action_code)))
+ break;
+
switch (mgmt->u.action.u.self_prot.action_code) {
case WLAN_SP_MESH_PEERING_OPEN:
case WLAN_SP_MESH_PEERING_CLOSE:
}
break;
case WLAN_CATEGORY_MESH_ACTION:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.mesh_action.action_code)))
+ break;
+
if (!ieee80211_vif_is_mesh(&sdata->vif))
break;
if (mesh_action_is_path_sel(mgmt) &&
if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
local->dot11ReceivedFragmentCount++;
- if (ieee80211_is_mgmt(fc))
- err = skb_linearize(skb);
- else
+ if (ieee80211_is_mgmt(fc)) {
+ /* drop frame if too short for header */
+ if (skb->len < ieee80211_hdrlen(fc))
+ err = -ENOBUFS;
+ else
+ err = skb_linearize(skb);
+ } else {
err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
+ }
if (err) {
dev_kfree_skb(skb);
struct cfg80211_sched_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_sched_scan_ies sched_scan_ies;
+ struct ieee80211_sched_scan_ies sched_scan_ies = {};
int ret, i;
mutex_lock(&local->mtx);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
- __skb_queue_purge(&sta->ps_tx_buf[ac]);
- __skb_queue_purge(&sta->tx_filtered[ac]);
+ ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
+ ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
}
#ifdef CONFIG_MAC80211_MESH
tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
if (!tid_tx)
continue;
- __skb_queue_purge(&tid_tx->pending);
+ ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
kfree(tid_tx);
}
*/
if (!skb)
break;
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
/*
local->total_ps_buffered--;
ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
sta->sta.addr);
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
/*
struct ieee80211_local *local = sdata->local;
struct sk_buff_head pending;
int filtered = 0, buffered = 0, ac;
+ unsigned long flags;
clear_sta_flag(sta, WLAN_STA_SP);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
+ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
+ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
tmp = skb_queue_len(&pending);
filtered += tmp - count;
count = tmp;
+ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
+ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
tmp = skb_queue_len(&pending);
buffered += tmp - count;
}
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ieee80211_free_txskb);
+
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(skbs)))
+ ieee80211_free_txskb(hw, skb);
+}
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
- __skb_queue_purge(&tx->skbs);
+ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{
+ struct sk_buff *skb;
int i;
- for (i = 0; i < local->hw.queues; i++)
- skb_queue_purge(&local->pending[i]);
+ for (i = 0; i < local->hw.queues; i++) {
+ while ((skb = skb_dequeue(&local->pending[i])) != NULL)
+ ieee80211_free_txskb(&local->hw, skb);
+ }
}
/*
int queue = info->hw_queue;
if (WARN_ON(!info->control.vif)) {
- kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
return;
}
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
- kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
continue;
}
break;
}
- if (id != WLAN_EID_VENDOR_SPECIFIC &&
- id != WLAN_EID_QUIET &&
- test_bit(id, seen_elems)) {
- elems->parse_error = true;
- left -= elen;
- pos += elen;
- continue;
+ switch (id) {
+ case WLAN_EID_SSID:
+ case WLAN_EID_SUPP_RATES:
+ case WLAN_EID_FH_PARAMS:
+ case WLAN_EID_DS_PARAMS:
+ case WLAN_EID_CF_PARAMS:
+ case WLAN_EID_TIM:
+ case WLAN_EID_IBSS_PARAMS:
+ case WLAN_EID_CHALLENGE:
+ case WLAN_EID_RSN:
+ case WLAN_EID_ERP_INFO:
+ case WLAN_EID_EXT_SUPP_RATES:
+ case WLAN_EID_HT_CAPABILITY:
+ case WLAN_EID_HT_OPERATION:
+ case WLAN_EID_VHT_CAPABILITY:
+ case WLAN_EID_VHT_OPERATION:
+ case WLAN_EID_MESH_ID:
+ case WLAN_EID_MESH_CONFIG:
+ case WLAN_EID_PEER_MGMT:
+ case WLAN_EID_PREQ:
+ case WLAN_EID_PREP:
+ case WLAN_EID_PERR:
+ case WLAN_EID_RANN:
+ case WLAN_EID_CHANNEL_SWITCH:
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ case WLAN_EID_COUNTRY:
+ case WLAN_EID_PWR_CONSTRAINT:
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ if (test_bit(id, seen_elems)) {
+ elems->parse_error = true;
+ left -= elen;
+ pos += elen;
+ continue;
+ }
+ break;
}
if (calc_crc && id < 64 && (filter & (1ULL << id)))
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
+ if (!sdata->u.mgd.associated)
+ continue;
ieee80211_send_nullfunc(local, sdata, 0);
}
if (status->flag & RX_FLAG_MMIC_ERROR)
goto mic_fail;
- if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
+ if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
goto update_iv;
return RX_CONTINUE;
static void bip_aad(struct sk_buff *skb, u8 *aad)
{
+ __le16 mask_fc;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
/* BIP AAD: FC(masked) || A1 || A2 || A3 */
/* FC type/subtype */
- aad[0] = skb->data[0];
/* Mask FC Retry, PwrMgt, MoreData flags to zero */
- aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6));
+ mask_fc = hdr->frame_control;
+ mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM |
+ IEEE80211_FCTL_MOREDATA);
+ put_unaligned(mask_fc, (__le16 *) &aad[0]);
/* A1 || A2 || A3 */
- memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN);
+ memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
}
return adtfn(set, &nip, timeout, flags);
}
+ ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
- } else
- ip_to = ip;
+ }
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { };
- u32 ip, ip_to = 0, p = 0, port, port_to;
+ u32 ip, ip_to, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
return ip_set_eexist(ret, flags) ? 0 : ret;
}
- ip = ntohl(data.ip);
+ ip_to = ip = ntohl(data.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
- } else
- ip_to = ip;
+ }
port_to = port = ntohs(data.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
- u32 ip, ip_to = 0, p = 0, port, port_to;
+ u32 ip, ip_to, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
return ip_set_eexist(ret, flags) ? 0 : ret;
}
- ip = ntohl(data.ip);
+ ip_to = ip = ntohl(data.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
- } else
- ip_to = ip;
+ }
port_to = port = ntohs(data.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
- u32 ip, ip_to = 0, p = 0, port, port_to;
- u32 ip2_from = 0, ip2_to, ip2_last, ip2;
+ u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip2_from, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
u8 cidr;
return ip_set_eexist(ret, flags) ? 0 : ret;
}
+ ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (port > port_to)
swap(port, port_to);
}
+
+ ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
if (ret)
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
- .len = IPSET_MAXNAMELEN - 1 },
+ .len = IFNAMSIZ - 1 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
struct ip_vs_proto_data *pd;
#endif
+ memset(u, 0, sizeof (*u));
+
#ifdef CONFIG_IP_VS_PROTO_TCP
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
{
struct ip_vs_timeout_user t;
- memset(&t, 0, sizeof(t));
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
flowi4_to_flowi(&fl1), false)) {
if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
flowi4_to_flowi(&fl2), false)) {
- if (rt1->rt_gateway == rt2->rt_gateway &&
+ if (rt_nexthop(rt1, fl1.daddr) ==
+ rt_nexthop(rt2, fl2.daddr) &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->dst);
static LIST_HEAD(cttimeout_list);
static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
- [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
+ [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING,
+ .len = CTNL_TIMEOUT_NAME_MAX - 1},
[CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
[CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
[CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct ctnl_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
- const struct ipt_entry *e = par->entryinfo;
struct nf_conntrack_l4proto *l4proto;
int ret = 0;
+ u8 proto;
rcu_read_lock();
timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
goto out;
}
- if (e->ip.invflags & IPT_INV_PROTO) {
+ proto = xt_ct_find_proto(par);
+ if (!proto) {
ret = -EINVAL;
- pr_info("You cannot use inversion on L4 protocol\n");
+ pr_info("You must specify a L4 protocol, and not use "
+ "inversions on it.\n");
goto out;
}
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
- l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto);
+ l4proto = __nf_ct_l4proto_find(par->family, proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be used by L4 protocol "
fl4.daddr = info->gw.ip;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return false;
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT),
+ (1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_LOCAL_IN),
+ (1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
{
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
- (1 << NF_INET_LOCAL_OUT),
+ (1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
- (1 << NF_INET_LOCAL_IN),
+ (1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
};
static DEFINE_RWLOCK(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
+#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
+
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static inline u32 netlink_group_mask(u32 group)
struct hlist_node *node;
unsigned long mask;
unsigned int i;
+ struct listeners *listeners;
+
+ listeners = nl_deref_protected(tbl->listeners);
+ if (!listeners)
+ return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
- tbl->listeners->masks[i] = mask;
+ listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
if (netlink_is_kernel(sk)) {
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
- kfree(nl_table[sk->sk_protocol].listeners);
+ struct listeners *old;
+
+ old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
+ RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
+ kfree_rcu(old, rcu);
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].bind = NULL;
nl_table[sk->sk_protocol].flags = 0;
rcu_read_lock();
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
- if (group - 1 < nl_table[sk->sk_protocol].groups)
+ if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
- old = rcu_dereference_protected(tbl->listeners, 1);
+ old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
local->remote_miu = LLCP_DEFAULT_MIU;
local->remote_lto = LLCP_DEFAULT_LTO;
- list_add(&llcp_devices, &local->list);
+ list_add(&local->list, &llcp_devices);
return 0;
}
/* We only match on the lower 8 bits of the opcode. */
if (ntohs(arp->ar_op) <= 0xff)
key->ip.proto = ntohs(arp->ar_op);
-
- if (key->ip.proto == ARPOP_REQUEST
- || key->ip.proto == ARPOP_REPLY) {
- memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
- memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
- memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
- memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
- key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
- }
+ memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+ memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+ memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+ memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
- ovs_dp_name(vport->dp),
+ netdev_vport->dev->name,
packet_length(skb), mtu);
goto error;
}
* grp->index is the index of the group; and grp->slot_shift
* is the shift for the corresponding (scaled) sigma_i.
*/
-#define QFQ_MAX_INDEX 19
-#define QFQ_MAX_WSHIFT 16
+#define QFQ_MAX_INDEX 24
+#define QFQ_MAX_WSHIFT 12
#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
-#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
+#define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT)
#define FRAC_BITS 30 /* fixed point arithmetic */
#define ONE_FP (1UL << FRAC_BITS)
#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
-#define QFQ_MTU_SHIFT 11
+#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
+#define QFQ_MIN_LMAX 256 /* min possible lmax for a class */
/*
* Possible group states. These values are used as indexes for the bitmaps
q->wsum += delta_w;
}
+static void qfq_update_reactivate_class(struct qfq_sched *q,
+ struct qfq_class *cl,
+ u32 inv_w, u32 lmax, int delta_w)
+{
+ bool need_reactivation = false;
+ int i = qfq_calc_index(inv_w, lmax);
+
+ if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
+ /*
+ * shift cl->F back, to not charge the
+ * class for the not-yet-served head
+ * packet
+ */
+ cl->F = cl->S;
+ /* remove class from its slot in the old group */
+ qfq_deactivate_class(q, cl);
+ need_reactivation = true;
+ }
+
+ qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+
+ if (need_reactivation) /* activate in new group */
+ qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+}
+
+
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr **tca, unsigned long *arg)
{
struct qfq_class *cl = (struct qfq_class *)*arg;
struct nlattr *tb[TCA_QFQ_MAX + 1];
u32 weight, lmax, inv_w;
- int i, err;
+ int err;
int delta_w;
if (tca[TCA_OPTIONS] == NULL) {
if (tb[TCA_QFQ_LMAX]) {
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
- if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
+ if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
pr_notice("qfq: invalid max length %u\n", lmax);
return -EINVAL;
}
} else
- lmax = 1UL << QFQ_MTU_SHIFT;
+ lmax = psched_mtu(qdisc_dev(sch));
if (cl != NULL) {
- bool need_reactivation = false;
-
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
if (lmax == cl->lmax && inv_w == cl->inv_w)
return 0; /* nothing to update */
- i = qfq_calc_index(inv_w, lmax);
sch_tree_lock(sch);
- if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
- /*
- * shift cl->F back, to not charge the
- * class for the not-yet-served head
- * packet
- */
- cl->F = cl->S;
- /* remove class from its slot in the old group */
- qfq_deactivate_class(q, cl);
- need_reactivation = true;
- }
-
- qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
-
- if (need_reactivation) /* activate in new group */
- qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+ qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
sch_tree_unlock(sch);
return 0;
/*
- * XXX we should make sure that slot becomes less than 32.
- * This is guaranteed by the input values.
- * roundedS is always cl->S rounded on grp->slot_shift bits.
+ * If the weight and lmax (max_pkt_size) of the classes do not change,
+ * then QFQ guarantees that the slot index is never higher than
+ * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
+ *
+ * With the current values of the above constants, the index is
+ * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
+ *
+ * When the weight of a class is increased or the lmax of the class is
+ * decreased, a new class with smaller slot size may happen to be
+ * activated. The activation of this class should be properly delayed
+ * to when the service of the class has finished in the ideal system
+ * tracked by QFQ. If the activation of the class is not delayed to
+ * this reference time instant, then this class may be unjustly served
+ * before other classes waiting for service. This may cause
+ * (unfrequently) the above bound to the slot index to be violated for
+ * some of these unlucky classes.
+ *
+ * Instead of delaying the activation of the new class, which is quite
+ * complex, the following inaccurate but simple solution is used: if
+ * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
+ * of the class are shifted backward so as to let the slot index
+ * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
+ * the slot index is above it, then the data structure implementing
+ * the bucket list either gets immediately corrupted or may get
+ * corrupted on a possible next packet arrival that causes the start
+ * time of the group to be shifted backward.
*/
static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
u64 roundedS)
{
u64 slot = (roundedS - grp->S) >> grp->slot_shift;
- unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
+ unsigned int i; /* slot index in the bucket list */
+
+ if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
+ u64 deltaS = roundedS - grp->S -
+ ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
+ cl->S -= deltaS;
+ cl->F -= deltaS;
+ slot = QFQ_MAX_SLOTS - 2;
+ }
+
+ i = (grp->front + slot) % QFQ_MAX_SLOTS;
hlist_add_head(&cl->next, &grp->slots[i]);
__set_bit(slot, &grp->full_slots);
}
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
+ if (unlikely(cl->lmax < qdisc_pkt_len(skb))) {
+ pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
+ cl->lmax, qdisc_pkt_len(skb), cl->common.classid);
+ qfq_update_reactivate_class(q, cl, cl->inv_w,
+ qdisc_pkt_len(skb), 0);
+ }
+
err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
msg = sctp_datamsg_new(GFP_KERNEL);
if (!msg)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Note: Calculate this outside of the loop, so that all fragments
* have the same expiration.
chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
- if (!chunk)
+ if (!chunk) {
+ err = -ENOMEM;
goto errout;
+ }
+
err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
if (err < 0)
- goto errout;
+ goto errout_chunk_free;
offset += len;
chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
- if (!chunk)
+ if (!chunk) {
+ err = -ENOMEM;
goto errout;
+ }
err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
- (__u8 *)chunk->skb->data);
if (err < 0)
- goto errout;
+ goto errout_chunk_free;
sctp_datamsg_assign(msg, chunk);
list_add_tail(&chunk->frag_list, &msg->chunks);
return msg;
+errout_chunk_free:
+ sctp_chunk_free(chunk);
+
errout:
list_for_each_safe(pos, temp, &msg->chunks) {
list_del_init(pos);
sctp_chunk_free(chunk);
}
sctp_datamsg_put(msg);
- return NULL;
+ return ERR_PTR(err);
}
/* Check whether this message has expired. */
.open = sctp_snmp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = single_release_net,
};
/* Set up the proc fs entry for 'snmp' object. */
.open = sctp_eps_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
/* Set up the proc fs entry for 'eps' object. */
.open = sctp_assocs_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
/* Set up the proc fs entry for 'assocs' object. */
.open = sctp_remaddr_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
int __net_init sctp_remaddr_proc_init(struct net *net)
void *addr_buf;
struct sctp_af *af;
- SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p"
+ SCTP_DEBUG_PRINTK("sctp_setsockopt_bindx: sk %p addrs %p"
" addrs_size %d opt %d\n", sk, addrs, addrs_size, op);
if (unlikely(addrs_size <= 0))
/* Break the message into multiple chunks of maximum size. */
datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
- if (!datamsg) {
- err = -ENOMEM;
+ if (IS_ERR(datamsg)) {
+ err = PTR_ERR(datamsg);
goto out_free;
}
* 1/8, rto_alpha would be expressed as 3.
*/
tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
- + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+ + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+ (rtt >> net->sctp.rto_alpha);
} else {
xprt_free_allocation(req);
dprintk("RPC: setup backchannel transport failed\n");
- return -1;
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
- void (*old_error_report)(struct sock *);
};
/*
dprintk("RPC: sendmsg returned unrecognized error %d\n",
-status);
case -ECONNRESET:
- case -EPIPE:
xs_tcp_shutdown(xprt);
case -ECONNREFUSED:
case -ENOTCONN:
+ case -EPIPE:
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
}
transport->old_data_ready = sk->sk_data_ready;
transport->old_state_change = sk->sk_state_change;
transport->old_write_space = sk->sk_write_space;
- transport->old_error_report = sk->sk_error_report;
}
static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
sk->sk_data_ready = transport->old_data_ready;
sk->sk_state_change = transport->old_state_change;
sk->sk_write_space = transport->old_write_space;
- sk->sk_error_report = transport->old_error_report;
}
static void xs_reset_transport(struct sock_xprt *transport)
xprt_clear_connecting(xprt);
}
-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
{
smp_mb__before_clear_bit();
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
clear_bit(XPRT_CLOSING, &xprt->state);
smp_mb__after_clear_bit();
+}
+
+static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+{
+ xs_sock_reset_connection_flags(xprt);
/* Mark transport as closed and wake up all pending tasks */
xprt_disconnect_done(xprt);
}
case TCP_CLOSE_WAIT:
/* The server initiated a shutdown of the socket */
xprt->connect_cookie++;
+ clear_bit(XPRT_CONNECTED, &xprt->state);
xs_tcp_force_close(xprt);
case TCP_CLOSING:
/*
read_unlock_bh(&sk->sk_callback_lock);
}
-/**
- * xs_error_report - callback mainly for catching socket errors
- * @sk: socket
- */
-static void xs_error_report(struct sock *sk)
-{
- struct rpc_xprt *xprt;
-
- read_lock_bh(&sk->sk_callback_lock);
- if (!(xprt = xprt_from_sock(sk)))
- goto out;
- dprintk("RPC: %s client %p...\n"
- "RPC: error %d\n",
- __func__, xprt, sk->sk_err);
- xprt_wake_pending_tasks(xprt, -EAGAIN);
-out:
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
static void xs_write_space(struct sock *sk)
{
struct socket *sock;
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_local_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_allocation = GFP_ATOMIC;
xprt_clear_connected(xprt);
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_udp_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_no_check = UDP_CSUM_NORCV;
sk->sk_allocation = GFP_ATOMIC;
any.sa_family = AF_UNSPEC;
result = kernel_connect(transport->sock, &any, sizeof(any), 0);
if (!result)
- xs_sock_mark_closed(&transport->xprt);
- else
- dprintk("RPC: AF_UNSPEC connect return code %d\n",
- result);
+ xs_sock_reset_connection_flags(&transport->xprt);
+ dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
}
static void xs_tcp_reuse_connection(struct sock_xprt *transport)
sk->sk_data_ready = xs_tcp_data_ready;
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_allocation = GFP_ATOMIC;
/* socket options */
return;
handler_enabled = 0;
- tasklet_disable(&tipc_tasklet);
tasklet_kill(&tipc_tasklet);
spin_lock_bh(&qitem_lock);
for (i = 0; i < sband->n_channels; i++) {
sband->channels[i].orig_flags =
sband->channels[i].flags;
- sband->channels[i].orig_mag =
- sband->channels[i].max_antenna_gain;
+ sband->channels[i].orig_mag = INT_MAX;
sband->channels[i].orig_mpwr =
sband->channels[i].max_power;
sband->channels[i].band = band;
.reason_code = reason,
.ie = ie,
.ie_len = ie_len,
+ .local_state_change = local_state_change,
};
ASSERT_WDEV_LOCK(wdev);
- if (local_state_change) {
- if (wdev->current_bss &&
- ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
- cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(&wdev->current_bss->pub);
- wdev->current_bss = NULL;
- }
-
+ if (local_state_change && (!wdev->current_bss ||
+ !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
return 0;
- }
return rdev->ops->deauth(&rdev->wiphy, dev, &req);
}
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
- /* IEEE 802.11b/g, channels 12..13. No HT40
- * channel fits here. */
- REG_RULE(2467-10, 2472+10, 20, 6, 20,
+ /* IEEE 802.11b/g, channels 12..13. */
+ REG_RULE(2467-10, 2472+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* IEEE 802.11 channel 14 - Only JP enables
map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = chan->orig_mag =
(int) MBI_TO_DBI(power_rule->max_antenna_gain);
- chan->max_power = chan->orig_mpwr =
+ chan->max_reg_power = chan->max_power = chan->orig_mpwr =
(int) MBM_TO_DBM(power_rule->max_eirp);
return;
}
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
- chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
+ chan->max_reg_power = chan->max_power =
+ (int) MBM_TO_DBM(power_rule->max_eirp);
}
static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band,
}
EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
-static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
{
int ae = meshhdr->flags & MESH_FLAGS_AE;
- /* 7.1.3.5a.2 */
+ /* 802.11-2012, 8.2.4.7.3 */
switch (ae) {
+ default:
case 0:
return 6;
case MESH_FLAGS_AE_A4:
return 12;
case MESH_FLAGS_AE_A5_A6:
return 18;
- case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6):
- return 24;
- default:
- return 6;
}
}
+EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
/* make sure meshdr->flags is on the linear part */
if (!pskb_may_pull(skb, hdrlen + 1))
return -1;
+ if (meshdr->flags & MESH_FLAGS_AE_A4)
+ return -1;
if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
/* make sure meshdr->flags is on the linear part */
if (!pskb_may_pull(skb, hdrlen + 1))
return -1;
+ if (meshdr->flags & MESH_FLAGS_AE_A5_A6)
+ return -1;
if (meshdr->flags & MESH_FLAGS_AE_A4)
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
__modinst: $(modules)
@:
+# Don't stop modules_install if we can't sign external modules.
quiet_cmd_modules_install = INSTALL $@
- cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@)
+ cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD))
# Modules built outside the kernel source tree go into extra by default
INSTALL_MOD_DIR ?= extra
}
if ($realfile =~ m@^(drivers/net/|net/)@ &&
- $rawline !~ m@^\+[ \t]*(\/\*|\*\/)@ &&
- $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) {
+ $rawline !~ m@^\+[ \t]*\*/[ \t]*$@ && #trailing */
+ $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ && #inline /*...*/
+ $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ && #trailing **/
+ $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) { #non blank */
WARN("NETWORKING_BLOCK_COMMENT_STYLE",
"networking block comments put the trailing */ on a separate line\n" . $herecurr);
}
$line =~ s/(^|\s)(inline)\b/$1__$2__/g;
$line =~ s/(^|\s)(asm)\b(\s|[(]|$)/$1__$2__$3/g;
$line =~ s/(^|\s|[(])(volatile)\b(\s|[(]|$)/$1__$2__$3/g;
+ $line =~ s/#ifndef _UAPI/#ifndef /;
+ $line =~ s/#define _UAPI/#define /;
+ $line =~ s!#endif /[*] _UAPI!#endif /* !;
printf {$out} "%s", $line;
}
close $out;
#include <assert.h>
#include <stdio.h>
-#include <sys/queue.h>
+#include "list.h"
#ifndef __cplusplus
#include <stdbool.h>
#endif
#define MENU_ROOT 0x0002
struct jump_key {
- CIRCLEQ_ENTRY(jump_key) entries;
+ struct list_head entries;
size_t offset;
struct menu *target;
int index;
};
-CIRCLEQ_HEAD(jk_head, jump_key);
#define JUMP_NB 9
--- /dev/null
+#ifndef LIST_H
+#define LIST_H
+
+/*
+ * Copied from include/linux/...
+ */
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *_new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = _new;
+ _new->next = next;
+ _new->prev = prev;
+ prev->next = _new;
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *_new, struct list_head *head)
+{
+ __list_add(_new, head->prev, head);
+}
+
+#endif
P(menu_get_parent_menu,struct menu *,(struct menu *menu));
P(menu_has_help,bool,(struct menu *menu));
P(menu_get_help,const char *,(struct menu *menu));
-P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct jk_head
+P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head
*head));
-P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct jk_head
+P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head
*head));
P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
struct search_data {
- struct jk_head *head;
+ struct list_head *head;
struct menu **targets;
int *keys;
};
struct jump_key *pos;
int k = 0;
- CIRCLEQ_FOREACH(pos, data->head, entries) {
+ list_for_each_entry(pos, data->head, entries) {
if (pos->offset >= start && pos->offset < end) {
char header[4];
sym_arr = sym_re_search(dialog_input);
do {
- struct jk_head head = CIRCLEQ_HEAD_INITIALIZER(head);
+ LIST_HEAD(head);
struct menu *targets[JUMP_NB];
int keys[JUMP_NB + 1], i;
struct search_data data = {
}
static void get_prompt_str(struct gstr *r, struct property *prop,
- struct jk_head *head)
+ struct list_head *head)
{
int i, j;
struct menu *submenu[8], *menu, *location = NULL;
} else
jump->target = location;
- if (CIRCLEQ_EMPTY(head))
+ if (list_empty(head))
jump->index = 0;
else
- jump->index = CIRCLEQ_LAST(head)->index + 1;
+ jump->index = list_entry(head->prev, struct jump_key,
+ entries)->index + 1;
- CIRCLEQ_INSERT_TAIL(head, jump, entries);
+ list_add_tail(&jump->entries, head);
}
if (i > 0) {
/*
* head is optional and may be NULL
*/
-void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head)
+void get_symbol_str(struct gstr *r, struct symbol *sym,
+ struct list_head *head)
{
bool hit;
struct property *prop;
str_append(r, "\n\n");
}
-struct gstr get_relations_str(struct symbol **sym_arr, struct jk_head *head)
+struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head)
{
struct symbol *sym;
struct gstr res = str_new();
if ($l == 0x1) {
$len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1));
- } elsif ($l = 0x2) {
+ } elsif ($l == 0x2) {
$len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2));
- } elsif ($l = 0x3) {
+ } elsif ($l == 0x3) {
$len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16;
$len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2));
- } elsif ($l = 0x4) {
+ } elsif ($l == 0x4) {
$len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4));
} else {
die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n";
*/
static void free_profile(struct aa_profile *profile)
{
+ struct aa_profile *p;
+
AA_DEBUG("%s(%p)\n", __func__, profile);
if (!profile)
aa_put_dfa(profile->xmatch);
aa_put_dfa(profile->policy.dfa);
- aa_put_profile(profile->replacedby);
+ /* put the profile reference for replacedby, but not via
+ * put_profile(kref_put).
+ * replacedby can form a long chain that can result in cascading
+ * frees that blows the stack because kref_put makes a nested fn
+ * call (it looks like recursion, with free_profile calling
+ * free_profile) for each profile in the chain lp#1056078.
+ */
+ for (p = profile->replacedby; p; ) {
+ if (atomic_dec_and_test(&p->base.count.refcount)) {
+ /* no more refs on p, grab its replacedby */
+ struct aa_profile *next = p->replacedby;
+ /* break the chain */
+ p->replacedby = NULL;
+ /* now free p, chain is broken */
+ free_profile(p);
+
+ /* follow up with next profile in the chain */
+ p = next;
+ } else
+ break;
+ }
kzfree(profile);
}
struct dev_cgroup {
struct cgroup_subsys_state css;
struct list_head exceptions;
- bool deny_all;
+ enum {
+ DEVCG_DEFAULT_ALLOW,
+ DEVCG_DEFAULT_DENY,
+ } behavior;
};
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
struct dev_exception_item *ex, *tmp;
list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
- list_del(&ex->list);
- kfree(ex);
+ list_del_rcu(&ex->list);
+ kfree_rcu(ex, rcu);
}
}
parent_cgroup = cgroup->parent;
if (parent_cgroup == NULL)
- dev_cgroup->deny_all = false;
+ dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
else {
parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
mutex_lock(&devcgroup_mutex);
ret = dev_exceptions_copy(&dev_cgroup->exceptions,
&parent_dev_cgroup->exceptions);
- dev_cgroup->deny_all = parent_dev_cgroup->deny_all;
+ dev_cgroup->behavior = parent_dev_cgroup->behavior;
mutex_unlock(&devcgroup_mutex);
if (ret) {
kfree(dev_cgroup);
* - List the exceptions in case the default policy is to deny
* This way, the file remains as a "whitelist of devices"
*/
- if (devcgroup->deny_all == false) {
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
set_access(acc, ACC_MASK);
set_majmin(maj, ~0);
set_majmin(min, ~0);
struct dev_exception_item *ex;
bool match = false;
- list_for_each_entry(ex, &dev_cgroup->exceptions, list) {
+ list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
continue;
if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
* In two cases we'll consider this new exception valid:
* - the dev cgroup has its default policy to allow + exception list:
* the new exception should *not* match any of the exceptions
- * (!deny_all, !match)
+ * (behavior == DEVCG_DEFAULT_ALLOW, !match)
* - the dev cgroup has its default policy to deny + exception list:
* the new exception *should* match the exceptions
- * (deny_all, match)
+ * (behavior == DEVCG_DEFAULT_DENY, match)
*/
- if (dev_cgroup->deny_all == match)
+ if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
return 1;
return 0;
}
return may_access(parent, ex);
}
+/**
+ * may_allow_all - checks if it's possible to change the behavior to
+ * allow based on parent's rules.
+ * @parent: device cgroup's parent
+ * returns: != 0 in case it's allowed, 0 otherwise
+ */
+static inline int may_allow_all(struct dev_cgroup *parent)
+{
+ if (!parent)
+ return 1;
+ return parent->behavior == DEVCG_DEFAULT_ALLOW;
+}
+
/*
* Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
int filetype, const char *buffer)
{
const char *b;
- char *endp;
- int count;
+ char temp[12]; /* 11 + 1 characters needed for a u32 */
+ int count, rc;
struct dev_exception_item ex;
+ struct cgroup *p = devcgroup->css.cgroup;
+ struct dev_cgroup *parent = NULL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (p->parent)
+ parent = cgroup_to_devcgroup(p->parent);
+
memset(&ex, 0, sizeof(ex));
b = buffer;
case 'a':
switch (filetype) {
case DEVCG_ALLOW:
- if (!parent_has_perm(devcgroup, &ex))
+ if (!may_allow_all(parent))
return -EPERM;
dev_exception_clean(devcgroup);
- devcgroup->deny_all = false;
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ if (!parent)
+ break;
+
+ rc = dev_exceptions_copy(&devcgroup->exceptions,
+ &parent->exceptions);
+ if (rc)
+ return rc;
break;
case DEVCG_DENY:
dev_exception_clean(devcgroup);
- devcgroup->deny_all = true;
+ devcgroup->behavior = DEVCG_DEFAULT_DENY;
break;
default:
return -EINVAL;
ex.major = ~0;
b++;
} else if (isdigit(*b)) {
- ex.major = simple_strtoul(b, &endp, 10);
- b = endp;
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.major);
+ if (rc)
+ return -EINVAL;
} else {
return -EINVAL;
}
ex.minor = ~0;
b++;
} else if (isdigit(*b)) {
- ex.minor = simple_strtoul(b, &endp, 10);
- b = endp;
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.minor);
+ if (rc)
+ return -EINVAL;
} else {
return -EINVAL;
}
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
- if (devcgroup->deny_all == false) {
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
dev_exception_rm(devcgroup, &ex);
return 0;
}
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
- if (devcgroup->deny_all == true) {
+ if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
dev_exception_rm(devcgroup, &ex);
return 0;
}
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
-static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
- short type, u32 major, u32 minor,
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
short access)
{
+ struct dev_cgroup *dev_cgroup;
struct dev_exception_item ex;
int rc;
ex.access = access;
rcu_read_lock();
+ dev_cgroup = task_devcgroup(current);
rc = may_access(dev_cgroup, &ex);
rcu_read_unlock();
int __devcgroup_inode_permission(struct inode *inode, int mask)
{
- struct dev_cgroup *dev_cgroup = task_devcgroup(current);
short type, access = 0;
if (S_ISBLK(inode->i_mode))
if (mask & MAY_READ)
access |= ACC_READ;
- return __devcgroup_check_permission(dev_cgroup, type, imajor(inode),
- iminor(inode), access);
+ return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
+ access);
}
int devcgroup_inode_mknod(int mode, dev_t dev)
{
- struct dev_cgroup *dev_cgroup = task_devcgroup(current);
short type;
if (!S_ISBLK(mode) && !S_ISCHR(mode))
else
type = DEV_CHAR;
- return __devcgroup_check_permission(dev_cgroup, type, MAJOR(dev),
- MINOR(dev), ACC_MKNOD);
+ return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+ ACC_MKNOD);
}
if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
struct sel_netnode *tail;
tail = list_entry(
- rcu_dereference(sel_netnode_hash[idx].list.prev),
+ rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+ lockdep_is_held(&sel_netnode_lock)),
struct sel_netnode, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
if (dirn != compr->direction) {
pr_err("this device doesn't support this direction\n");
+ snd_card_unref(compr->card);
return -EINVAL;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ snd_card_unref(compr->card);
return -ENOMEM;
+ }
data->stream.ops = compr->ops;
data->stream.direction = dirn;
data->stream.private_data = compr->private_data;
runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
if (!runtime) {
kfree(data);
+ snd_card_unref(compr->card);
return -ENOMEM;
}
runtime->state = SNDRV_PCM_STATE_OPEN;
kfree(runtime);
kfree(data);
}
- return ret;
+ snd_card_unref(compr->card);
+ return 0;
}
static int snd_compr_free(struct inode *inode, struct file *f)
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_add_tail(&ctl->list, &card->ctl_files);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
+ snd_card_unref(card);
return 0;
__error:
__error2:
snd_card_file_remove(card, file);
__error1:
+ if (card)
+ snd_card_unref(card);
return err;
}
spin_unlock_irq(&ctl->read_lock);
schedule();
remove_wait_queue(&ctl->change_sleep, &wait);
+ if (ctl->card->shutdown)
+ return -ENODEV;
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ctl->read_lock);
if (hw == NULL)
return -ENODEV;
- if (!try_module_get(hw->card->module))
+ if (!try_module_get(hw->card->module)) {
+ snd_card_unref(hw->card);
return -EFAULT;
+ }
init_waitqueue_entry(&wait, current);
add_wait_queue(&hw->open_wait, &wait);
mutex_unlock(&hw->open_mutex);
schedule();
mutex_lock(&hw->open_mutex);
+ if (hw->card->shutdown) {
+ err = -ENODEV;
+ break;
+ }
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
mutex_unlock(&hw->open_mutex);
if (err < 0)
module_put(hw->card->module);
+ snd_card_unref(hw->card);
return err;
}
mutex_unlock(®ister_mutex);
return -EINVAL;
}
+ mutex_lock(&hwdep->open_mutex);
+ wake_up(&hwdep->open_wait);
#ifdef CONFIG_SND_OSSEMUL
if (hwdep->ossreg)
snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device);
#endif
snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device);
list_del_init(&hwdep->list);
+ mutex_unlock(&hwdep->open_mutex);
mutex_unlock(®ister_mutex);
return 0;
}
spin_lock_init(&card->files_lock);
INIT_LIST_HEAD(&card->files_list);
init_waitqueue_head(&card->shutdown_sleep);
+ atomic_set(&card->refcount, 0);
#ifdef CONFIG_PM
mutex_init(&card->power_lock);
init_waitqueue_head(&card->power_sleep);
return 0;
}
+/**
+ * snd_card_unref - release the reference counter
+ * @card: the card instance
+ *
+ * Decrements the reference counter. When it reaches to zero, wake up
+ * the sleeper and call the destructor if needed.
+ */
+void snd_card_unref(struct snd_card *card)
+{
+ if (atomic_dec_and_test(&card->refcount)) {
+ wake_up(&card->shutdown_sleep);
+ if (card->free_on_last_close)
+ snd_card_do_free(card);
+ }
+}
+EXPORT_SYMBOL(snd_card_unref);
+
int snd_card_free_when_closed(struct snd_card *card)
{
- int free_now = 0;
- int ret = snd_card_disconnect(card);
- if (ret)
- return ret;
+ int ret;
- spin_lock(&card->files_lock);
- if (list_empty(&card->files_list))
- free_now = 1;
- else
- card->free_on_last_close = 1;
- spin_unlock(&card->files_lock);
+ atomic_inc(&card->refcount);
+ ret = snd_card_disconnect(card);
+ if (ret) {
+ atomic_dec(&card->refcount);
+ return ret;
+ }
- if (free_now)
+ card->free_on_last_close = 1;
+ if (atomic_dec_and_test(&card->refcount))
snd_card_do_free(card);
return 0;
}
return ret;
/* wait, until all devices are ready for the free operation */
- wait_event(card->shutdown_sleep, list_empty(&card->files_list));
+ wait_event(card->shutdown_sleep, !atomic_read(&card->refcount));
snd_card_do_free(card);
return 0;
}
return -ENODEV;
}
list_add(&mfile->list, &card->files_list);
+ atomic_inc(&card->refcount);
spin_unlock(&card->files_lock);
return 0;
}
int snd_card_file_remove(struct snd_card *card, struct file *file)
{
struct snd_monitor_file *mfile, *found = NULL;
- int last_close = 0;
spin_lock(&card->files_lock);
list_for_each_entry(mfile, &card->files_list, list) {
break;
}
}
- if (list_empty(&card->files_list))
- last_close = 1;
spin_unlock(&card->files_lock);
- if (last_close) {
- wake_up(&card->shutdown_sleep);
- if (card->free_on_last_close)
- snd_card_do_free(card);
- }
if (!found) {
snd_printk(KERN_ERR "ALSA card file remove problem (%p)\n", file);
return -ENOENT;
}
kfree(found);
+ snd_card_unref(card);
return 0;
}
SNDRV_OSS_DEVICE_TYPE_MIXER);
if (card == NULL)
return -ENODEV;
- if (card->mixer_oss == NULL)
+ if (card->mixer_oss == NULL) {
+ snd_card_unref(card);
return -ENODEV;
+ }
err = snd_card_file_add(card, file);
- if (err < 0)
+ if (err < 0) {
+ snd_card_unref(card);
return err;
+ }
fmixer = kzalloc(sizeof(*fmixer), GFP_KERNEL);
if (fmixer == NULL) {
snd_card_file_remove(card, file);
+ snd_card_unref(card);
return -ENOMEM;
}
fmixer->card = card;
if (!try_module_get(card->module)) {
kfree(fmixer);
snd_card_file_remove(card, file);
+ snd_card_unref(card);
return -EFAULT;
}
+ snd_card_unref(card);
return 0;
}
mutex_unlock(&pcm->open_mutex);
schedule();
mutex_lock(&pcm->open_mutex);
+ if (pcm->card->shutdown) {
+ err = -ENODEV;
+ break;
+ }
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
mutex_unlock(&pcm->open_mutex);
if (err < 0)
goto __error;
+ snd_card_unref(pcm->card);
return err;
__error:
__error2:
snd_card_file_remove(pcm->card, file);
__error1:
+ if (pcm)
+ snd_card_unref(pcm->card);
return err;
}
if (list_empty(&pcm->list))
goto unlock;
+ mutex_lock(&pcm->open_mutex);
+ wake_up(&pcm->open_wait);
list_del_init(&pcm->list);
for (cidx = 0; cidx < 2; cidx++)
- for (substream = pcm->streams[cidx].substream; substream; substream = substream->next)
- if (substream->runtime)
+ for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) {
+ snd_pcm_stream_lock_irq(substream);
+ if (substream->runtime) {
substream->runtime->status->state = SNDRV_PCM_STATE_DISCONNECTED;
+ wake_up(&substream->runtime->sleep);
+ wake_up(&substream->runtime->tsleep);
+ }
+ snd_pcm_stream_unlock_irq(substream);
+ }
list_for_each_entry(notify, &snd_pcm_notify_list, list) {
notify->n_disconnect(pcm);
}
pcm->streams[cidx].chmap_kctl = NULL;
}
}
+ mutex_unlock(&pcm->open_mutex);
unlock:
mutex_unlock(®ister_mutex);
return 0;
return usecs;
}
+static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
+{
+ snd_pcm_stream_lock_irq(substream);
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
+ substream->runtime->status->state = state;
+ snd_pcm_stream_unlock_irq(substream);
+}
+
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
runtime->boundary *= 2;
snd_pcm_timer_resolution_change(substream);
- runtime->status->state = SNDRV_PCM_STATE_SETUP;
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
if (pm_qos_request_active(&substream->latency_pm_qos_req))
pm_qos_remove_request(&substream->latency_pm_qos_req);
/* hardware might be unusable from this time,
so we force application to retry to set
the correct hardware parameter settings */
- runtime->status->state = SNDRV_PCM_STATE_OPEN;
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
if (substream->ops->hw_free != NULL)
substream->ops->hw_free(substream);
return err;
return -EBADFD;
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
- runtime->status->state = SNDRV_PCM_STATE_OPEN;
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
pm_qos_remove_request(&substream->latency_pm_qos_req);
return result;
}
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->control->appl_ptr = runtime->status->hw_ptr;
- runtime->status->state = SNDRV_PCM_STATE_PREPARED;
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
}
static struct action_ops snd_pcm_action_prepare = {
down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
remove_wait_queue(&to_check->sleep, &wait);
+ if (card->shutdown) {
+ result = -ENODEV;
+ break;
+ }
if (tout == 0) {
if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
result = -ESTRPIPE;
write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
_nolock:
+ snd_card_unref(substream1->pcm->card);
fput_light(file, fput_needed);
if (res < 0)
kfree(group);
return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
- return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
+ err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
+ if (pcm)
+ snd_card_unref(pcm->card);
+ return err;
}
static int snd_pcm_capture_open(struct inode *inode, struct file *file)
return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_CAPTURE);
- return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
+ err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
+ if (pcm)
+ snd_card_unref(pcm->card);
+ return err;
}
static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
mutex_unlock(&pcm->open_mutex);
schedule();
mutex_lock(&pcm->open_mutex);
+ if (pcm->card->shutdown) {
+ err = -ENODEV;
+ break;
+ }
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
if (rmidi == NULL)
return -ENODEV;
- if (!try_module_get(rmidi->card->module))
+ if (!try_module_get(rmidi->card->module)) {
+ snd_card_unref(rmidi->card);
return -ENXIO;
+ }
mutex_lock(&rmidi->open_mutex);
card = rmidi->card;
mutex_unlock(&rmidi->open_mutex);
schedule();
mutex_lock(&rmidi->open_mutex);
+ if (rmidi->card->shutdown) {
+ err = -ENODEV;
+ break;
+ }
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
#endif
file->private_data = rawmidi_file;
mutex_unlock(&rmidi->open_mutex);
+ snd_card_unref(rmidi->card);
return 0;
__error:
__error_card:
mutex_unlock(&rmidi->open_mutex);
module_put(rmidi->card->module);
+ snd_card_unref(rmidi->card);
return err;
}
spin_unlock_irq(&runtime->lock);
schedule();
remove_wait_queue(&runtime->sleep, &wait);
+ if (rfile->rmidi->card->shutdown)
+ return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail)
spin_unlock_irq(&runtime->lock);
timeout = schedule_timeout(30 * HZ);
remove_wait_queue(&runtime->sleep, &wait);
+ if (rfile->rmidi->card->shutdown)
+ return -ENODEV;
if (signal_pending(current))
return result > 0 ? result : -ERESTARTSYS;
if (!runtime->avail && !timeout)
static int snd_rawmidi_dev_disconnect(struct snd_device *device)
{
struct snd_rawmidi *rmidi = device->device_data;
+ int dir;
mutex_lock(®ister_mutex);
+ mutex_lock(&rmidi->open_mutex);
+ wake_up(&rmidi->open_wait);
list_del_init(&rmidi->list);
+ for (dir = 0; dir < 2; dir++) {
+ struct snd_rawmidi_substream *s;
+ list_for_each_entry(s, &rmidi->streams[dir].substreams, list) {
+ if (s->runtime)
+ wake_up(&s->runtime->sleep);
+ }
+ }
+
#ifdef CONFIG_SND_OSSEMUL
if (rmidi->ossreg) {
if ((int)rmidi->device == midi_map[rmidi->card->number]) {
}
#endif /* CONFIG_SND_OSSEMUL */
snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
+ mutex_unlock(&rmidi->open_mutex);
mutex_unlock(®ister_mutex);
return 0;
}
*
* Checks that a minor device with the specified type is registered, and returns
* its user data pointer.
+ *
+ * This function increments the reference counter of the card instance
+ * if an associated instance with the given minor number and type is found.
+ * The caller must call snd_card_unref() appropriately later.
*/
void *snd_lookup_minor_data(unsigned int minor, int type)
{
return NULL;
mutex_lock(&sound_mutex);
mreg = snd_minors[minor];
- if (mreg && mreg->type == type)
+ if (mreg && mreg->type == type) {
private_data = mreg->private_data;
- else
+ if (private_data && mreg->card_ptr)
+ atomic_inc(&mreg->card_ptr->refcount);
+ } else
private_data = NULL;
mutex_unlock(&sound_mutex);
return private_data;
preg->device = dev;
preg->f_ops = f_ops;
preg->private_data = private_data;
+ preg->card_ptr = card;
mutex_lock(&sound_mutex);
#ifdef CONFIG_SND_DYNAMIC_MINORS
minor = snd_find_free_minor(type);
static struct snd_minor *snd_oss_minors[SNDRV_OSS_MINORS];
static DEFINE_MUTEX(sound_oss_mutex);
+/* NOTE: This function increments the refcount of the associated card like
+ * snd_lookup_minor_data(); the caller must call snd_card_unref() appropriately
+ */
void *snd_lookup_oss_minor_data(unsigned int minor, int type)
{
struct snd_minor *mreg;
return NULL;
mutex_lock(&sound_oss_mutex);
mreg = snd_oss_minors[minor];
- if (mreg && mreg->type == type)
+ if (mreg && mreg->type == type) {
private_data = mreg->private_data;
- else
+ if (private_data && mreg->card_ptr)
+ atomic_inc(&mreg->card_ptr->refcount);
+ } else
private_data = NULL;
mutex_unlock(&sound_oss_mutex);
return private_data;
preg->device = dev;
preg->f_ops = f_ops;
preg->private_data = private_data;
+ preg->card_ptr = card;
mutex_lock(&sound_oss_mutex);
snd_oss_minors[minor] = preg;
minor_unit = SNDRV_MINOR_OSS_DEVICE(minor);
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "IEC958 Preample Capture Default",
+ .name = "IEC958 Preamble Capture Default",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4113_spdif_pinfo,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "IEC958 Preample Capture Default",
+ .name = "IEC958 Preamble Capture Default",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4114_spdif_pinfo,
.get = snd_ak4114_spdif_pget,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "IEC958 Preample Capture Default",
+ .name = "IEC958 Preamble Capture Default",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4117_spdif_pinfo,
.get = snd_ak4117_spdif_pget,
error = snd_card_miro_aci_detect(card, miro);
if (error < 0) {
- snd_card_free(card);
snd_printk(KERN_ERR "unable to detect aci chip\n");
return -ENODEV;
}
struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
snd_als300_dbgcallenter();
chip->playback_substream = substream;
runtime->hw = snd_als300_playback_hw;
struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
snd_als300_dbgcallenter();
chip->capture_substream = substream;
runtime->hw = snd_als300_capture_hw;
struct es1968 *chip = tea->private_data;
unsigned long io = chip->io_port + GPIO_DATA;
u16 val = inw(io);
+ u8 ret;
- return (val & STR_DATA) ? TEA575X_DATA : 0 |
- (val & STR_MOST) ? TEA575X_MOST : 0;
+ ret = 0;
+ if (val & STR_DATA)
+ ret |= TEA575X_DATA;
+ if (val & STR_MOST)
+ ret |= TEA575X_MOST;
+ return ret;
}
static void snd_es1968_tea575x_set_direction(struct snd_tea575x *tea, bool output)
{ TYPE_MAESTRO2E, 0x1179 },
{ TYPE_MAESTRO2E, 0x14c0 }, /* HP omnibook 4150 */
{ TYPE_MAESTRO2E, 0x1558 },
+ { TYPE_MAESTRO2E, 0x125d }, /* a PCI card, e.g. Terratec DMX */
+ { TYPE_MAESTRO2, 0x125d }, /* a PCI card, e.g. SF64-PCE2 */
};
static struct ess_device_list mpu_blacklist[] __devinitdata = {
struct fm801 *chip = tea->private_data;
unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL));
struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip);
-
- return (reg & FM801_GPIO_GP(gpio.data)) ? TEA575X_DATA : 0 |
- (reg & FM801_GPIO_GP(gpio.most)) ? TEA575X_MOST : 0;
+ u8 ret;
+
+ ret = 0;
+ if (reg & FM801_GPIO_GP(gpio.data))
+ ret |= TEA575X_DATA;
+ if (reg & FM801_GPIO_GP(gpio.most))
+ ret |= TEA575X_MOST;
+ return ret;
}
static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output)
EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset);
#ifdef CONFIG_PM
+#define codec_in_pm(codec) ((codec)->in_pm)
static void hda_power_work(struct work_struct *work);
static void hda_keep_power_on(struct hda_codec *codec);
#define hda_codec_is_power_on(codec) ((codec)->power_on)
bus->ops.pm_notify(bus, power_up);
}
#else
+#define codec_in_pm(codec) 0
static inline void hda_keep_power_on(struct hda_codec *codec) {}
#define hda_codec_is_power_on(codec) 1
#define hda_call_pm_notify(bus, state) {}
}
mutex_unlock(&bus->cmd_mutex);
snd_hda_power_down(codec);
- if (res && *res == -1 && bus->rirb_error) {
+ if (!codec_in_pm(codec) && res && *res == -1 && bus->rirb_error) {
if (bus->response_reset) {
snd_printd("hda_codec: resetting BUS due to "
"fatal communication error\n");
goto again;
}
/* clear reset-flag when the communication gets recovered */
- if (!err)
+ if (!err || codec_in_pm(codec))
bus->response_reset = 0;
return err;
}
{
unsigned int state;
+ codec->in_pm = 1;
+
if (codec->patch_ops.suspend)
codec->patch_ops.suspend(codec);
hda_cleanup_all_streams(codec);
codec->power_transition = 0;
codec->power_jiffies = jiffies;
spin_unlock(&codec->power_lock);
+ codec->in_pm = 0;
return state;
}
*/
static void hda_call_codec_resume(struct hda_codec *codec)
{
+ codec->in_pm = 1;
+
/* set as if powered on for avoiding re-entering the resume
* in the resume / power-save sequence
*/
snd_hda_codec_resume_cache(codec);
}
snd_hda_jack_report_sync(codec);
+
+ codec->in_pm = 0;
snd_hda_power_down(codec); /* flag down before returning */
}
#endif /* CONFIG_PM */
unsigned int power_on :1; /* current (global) power-state */
unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */
unsigned int pm_down_notified:1; /* PM notified to controller */
+ unsigned int in_pm:1; /* suspend/resume being performed */
int power_transition; /* power-state in transition */
int power_count; /* current (global) power refcount */
struct delayed_work power_work; /* delayed task for powerdown */
#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
+#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
+
+/* quirks for Intel PCH */
+#define AZX_DCAPS_INTEL_PCH \
+ (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
+ AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
{
struct azx *chip = bus->private_data;
+ if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ return;
+
if (power_up)
pm_runtime_get_sync(&chip->pci->dev);
else
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (!power_save_controller)
+ if (!power_save_controller ||
+ !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return -EAGAIN;
azx_stop_chip(chip);
static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* PBG */
{ PCI_DEVICE(0x8086, 0x1d20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Panther Point */
{ PCI_DEVICE(0x8086, 0x1e20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Lynx Point */
{ PCI_DEVICE(0x8086, 0x8c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Lynx Point-LP */
{ PCI_DEVICE(0x8086, 0x9c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Lynx Point-LP */
{ PCI_DEVICE(0x8086, 0x9c21),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0c0c),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
{ PCI_DEVICE(0x8086, 0x0d0c),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
/* 5 Series/3400 */
{ PCI_DEVICE(0x8086, 0x3b56),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
/* Teradici */
{ PCI_DEVICE(0x6549, 0x1200),
.driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
+ { PCI_DEVICE(0x6549, 0x2200),
+ .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
/* Creative X-Fi (CA0110-IBG) */
/* CTHDA chips */
{ PCI_DEVICE(0x1102, 0x0010),
if (spec->multiout.dig_out_nid) {
info++;
codec->num_pcms++;
+ codec->spdif_status_reset = 1;
info->name = "AD198x Digital";
info->pcm_type = HDA_PCM_TYPE_SPDIF;
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
#define CS420X_VENDOR_NID 0x11
#define CS_DIG_OUT1_PIN_NID 0x10
#define CS_DIG_OUT2_PIN_NID 0x15
-#define CS_DMIC1_PIN_NID 0x12
-#define CS_DMIC2_PIN_NID 0x0e
+#define CS_DMIC1_PIN_NID 0x0e
+#define CS_DMIC2_PIN_NID 0x12
/* coef indices */
#define IDX_SPDIF_STAT 0x0000
memcpy(cfg->speaker_pins, cfg->line_out_pins,
sizeof(cfg->speaker_pins));
cfg->line_outs = 0;
+ memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins));
}
return 0;
cs_automic(codec, NULL);
coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
+ cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
+
+ coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG);
if (is_active_pin(codec, CS_DMIC2_PIN_NID))
- coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */
+ coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */
if (is_active_pin(codec, CS_DMIC1_PIN_NID))
- coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off
+ coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off
* No effect if SPDIF_OUT2 is
* selected in IDX_SPDIF_CTL.
*/
- cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
+
+ cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef);
} else {
if (spec->mic_detect)
cs_automic(codec, NULL);
| 0x0400 /* Disable Coefficient Auto increment */
)},
/* Beep */
- {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
+ {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG},
{0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
{} /* terminator */
}
-static struct snd_kcontrol_new cs421x_capture_source = {
-
+static const struct snd_kcontrol_new cs421x_capture_source = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
}
#endif
-static struct hda_codec_ops cs421x_patch_ops = {
+static const struct hda_codec_ops cs421x_patch_ops = {
.build_controls = cs421x_build_controls,
.build_pcms = cs_build_pcms,
.init = cs421x_init,
SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
enum {
ALC268_FIXUP_INV_DMIC,
+ ALC268_FIXUP_HP_EAPD,
};
static const struct alc_fixup alc268_fixups[] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
},
+ [ALC268_FIXUP_HP_EAPD] = {
+ .type = ALC_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ {0x15, AC_VERB_SET_EAPD_BTLENABLE, 0},
+ {}
+ }
+ },
};
static const struct alc_model_fixup alc268_fixup_models[] = {
{.id = ALC268_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {.id = ALC268_FIXUP_HP_EAPD, .name = "hp-eapd"},
+ {}
+};
+
+static const struct snd_pci_quirk alc268_fixup_tbl[] = {
+ /* below is codec SSID since multiple Toshiba laptops have the
+ * same PCI SSID 1179:ff00
+ */
+ SND_PCI_QUIRK(0x1179, 0xff06, "Toshiba P200", ALC268_FIXUP_HP_EAPD),
{}
};
spec = codec->spec;
- alc_pick_fixup(codec, alc268_fixup_models, NULL, alc268_fixups);
+ alc_pick_fixup(codec, alc268_fixup_models, alc268_fixup_tbl, alc268_fixups);
alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
/* automatic parse from the BIOS config */
return alc_parse_auto_config(codec, alc269_ignore, ssids);
}
-static void alc269_toggle_power_output(struct hda_codec *codec, int power_up)
+static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
{
int val = alc_read_coef_idx(codec, 0x04);
if (power_up)
if (spec->codec_variant != ALC269_TYPE_ALC269VB)
return;
- if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
- alc269_toggle_power_output(codec, 0);
- if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
- alc269_toggle_power_output(codec, 0);
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB)
+ alc269vb_toggle_power_output(codec, 0);
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
+ (alc_get_coef0(codec) & 0x00ff) == 0x018) {
msleep(150);
}
}
{
struct alc_spec *spec = codec->spec;
- if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB)
+ alc269vb_toggle_power_output(codec, 0);
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
(alc_get_coef0(codec) & 0x00ff) == 0x018) {
- alc269_toggle_power_output(codec, 0);
msleep(150);
}
codec->patch_ops.init(codec);
- if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB)
+ alc269vb_toggle_power_output(codec, 1);
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
(alc_get_coef0(codec) & 0x00ff) == 0x017) {
- alc269_toggle_power_output(codec, 1);
msleep(200);
}
- if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
- (alc_get_coef0(codec) & 0x00ff) == 0x018)
- alc269_toggle_power_output(codec, 1);
-
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
hda_call_check_power_status(codec, 0x01);
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
{ .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
{ .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
+ { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
.patch = patch_alc662 },
{ .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
{ .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
+ { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
{ .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
{ .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
{ .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
{ .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
{ .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 },
{ .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 },
+ { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 },
{} /* terminator */
};
"HP", STAC_HP_ZEPHYR),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660,
"HP Mini", STAC_92HD83XXX_HP_LED),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x144E,
+ "HP Pavilion dv5", STAC_92HD83XXX_HP_INV_LED),
{} /* terminator */
};
{
struct via_spec *spec = codec->spec;
const struct auto_pin_cfg *cfg = &spec->autocfg;
- int i, dac_num;
+ int i;
hda_nid_t nid;
+ spec->multiout.num_dacs = 0;
spec->multiout.dac_nids = spec->private_dac_nids;
- dac_num = 0;
for (i = 0; i < cfg->line_outs; i++) {
hda_nid_t dac = 0;
nid = cfg->line_out_pins[i];
if (!i && parse_output_path(codec, nid, dac, 1,
&spec->out_mix_path))
dac = spec->out_mix_path.path[0];
- if (dac) {
- spec->private_dac_nids[i] = dac;
- dac_num++;
- }
+ if (dac)
+ spec->private_dac_nids[spec->multiout.num_dacs++] = dac;
}
if (!spec->out_path[0].depth && spec->out_mix_path.depth) {
spec->out_path[0] = spec->out_mix_path;
spec->out_mix_path.depth = 0;
}
- spec->multiout.num_dacs = dac_num;
return 0;
}
*/
enum {
VIA_FIXUP_INTMIC_BOOST,
+ VIA_FIXUP_ASUS_G75,
};
static void via_fixup_intmic_boost(struct hda_codec *codec,
.type = HDA_FIXUP_FUNC,
.v.func = via_fixup_intmic_boost,
},
+ [VIA_FIXUP_ASUS_G75] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* set 0x24 and 0x33 as speakers */
+ { 0x24, 0x991301f0 },
+ { 0x33, 0x991301f1 }, /* subwoofer */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk vt2002p_fixups[] = {
+ SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
{}
};
+/* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e
+ * Replace this with mixer NID 0x1c
+ */
+static void fix_vt1802_connections(struct hda_codec *codec)
+{
+ static hda_nid_t conn_24[] = { 0x14, 0x1c };
+ static hda_nid_t conn_33[] = { 0x1c };
+
+ snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24);
+ snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33);
+}
+
/* patch for vt2002P */
static int patch_vt2002P(struct hda_codec *codec)
{
spec->aa_mix_nid = 0x21;
override_mic_boost(codec, 0x2b, 0, 3, 40);
override_mic_boost(codec, 0x29, 0, 3, 40);
+ if (spec->codec_type == VT1802)
+ fix_vt1802_connections(codec);
add_secret_dac_path(codec);
snd_hda_pick_fixup(codec, NULL, vt2002p_fixups, via_fixups);
ice->set_spdif_clock(ice, 0);
} else {
/* internal on-card clock */
- snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1);
+ int rate;
+ if (ice->cur_rate)
+ rate = ice->cur_rate;
+ else
+ rate = ice->pro_rate_default;
+ snd_vt1724_set_pro_rate(ice, rate, 1);
}
update_spdif_bits(ice, ice->pm_saved_spdif_ctrl);
static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm);
static int hdspm_autosync_ref(struct hdspm *hdspm);
static int snd_hdspm_set_defaults(struct hdspm *hdspm);
+static int hdspm_system_clock_mode(struct hdspm *hdspm);
static void hdspm_set_sgbuf(struct hdspm *hdspm,
struct snd_pcm_substream *substream,
unsigned int reg, int channels);
rate = hdspm_calc_dds_value(hdspm, period);
if (rate > 207000) {
- /* Unreasonable high sample rate as seen on PCI MADI cards.
- * Use the cached value instead.
- */
- rate = hdspm->system_sample_rate;
+ /* Unreasonable high sample rate as seen on PCI MADI cards. */
+ if (0 == hdspm_system_clock_mode(hdspm)) {
+ /* master mode, return internal sample rate */
+ rate = hdspm->system_sample_rate;
+ } else {
+ /* slave mode, return external sample rate */
+ rate = hdspm_external_sample_rate(hdspm);
+ }
}
return rate;
#define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ, \
- .info = snd_hdspm_info_system_sample_rate, \
- .get = snd_hdspm_get_system_sample_rate \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_system_sample_rate, \
+ .put = snd_hdspm_put_system_sample_rate, \
+ .get = snd_hdspm_get_system_sample_rate \
}
static int snd_hdspm_info_system_sample_rate(struct snd_kcontrol *kcontrol,
return 0;
}
+static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *
+ ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
+ return 0;
+}
+
/**
* Returns the WordClock sample rate class for the given card.
hdspm_get_s1_sample_rate(hdspm,
kcontrol->private_value-1);
}
+ break;
case AIO:
switch (kcontrol->private_value) {
hdspm_get_s1_sample_rate(hdspm,
ucontrol->id.index-1);
}
+ break;
case AES32:
hdspm_get_s1_sample_rate(hdspm,
kcontrol->private_value-1);
break;
+ }
+ break;
+ case MADI:
+ case MADIface:
+ {
+ int rate = hdspm_external_sample_rate(hdspm);
+ int i, selected_rate = 0;
+ for (i = 1; i < 10; i++)
+ if (HDSPM_bit2freq(i) == rate) {
+ selected_rate = i;
+ break;
+ }
+ ucontrol->value.enumerated.item[0] = selected_rate;
}
+ break;
+
default:
break;
}
#define HDSPM_PREF_SYNC_REF(xname, xindex) \
-{.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.index = xindex, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
#define HDSPM_AUTOSYNC_REF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ, \
- .info = snd_hdspm_info_autosync_ref, \
- .get = snd_hdspm_get_autosync_ref, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READ, \
+ .info = snd_hdspm_info_autosync_ref, \
+ .get = snd_hdspm_get_autosync_ref, \
}
static int hdspm_autosync_ref(struct hdspm *hdspm)
#define HDSPM_LINE_OUT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_line_out, \
- .get = snd_hdspm_get_line_out, \
- .put = snd_hdspm_put_line_out \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_line_out, \
+ .get = snd_hdspm_get_line_out, \
+ .put = snd_hdspm_put_line_out \
}
static int hdspm_line_out(struct hdspm * hdspm)
#define HDSPM_TX_64(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_tx_64, \
- .get = snd_hdspm_get_tx_64, \
- .put = snd_hdspm_put_tx_64 \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_tx_64, \
+ .get = snd_hdspm_get_tx_64, \
+ .put = snd_hdspm_put_tx_64 \
}
static int hdspm_tx_64(struct hdspm * hdspm)
#define HDSPM_C_TMS(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_c_tms, \
- .get = snd_hdspm_get_c_tms, \
- .put = snd_hdspm_put_c_tms \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_c_tms, \
+ .get = snd_hdspm_get_c_tms, \
+ .put = snd_hdspm_put_c_tms \
}
static int hdspm_c_tms(struct hdspm * hdspm)
#define HDSPM_SAFE_MODE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_safe_mode, \
- .get = snd_hdspm_get_safe_mode, \
- .put = snd_hdspm_put_safe_mode \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_safe_mode, \
+ .get = snd_hdspm_get_safe_mode, \
+ .put = snd_hdspm_put_safe_mode \
}
static int hdspm_safe_mode(struct hdspm * hdspm)
#define HDSPM_EMPHASIS(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_emphasis, \
- .get = snd_hdspm_get_emphasis, \
- .put = snd_hdspm_put_emphasis \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_emphasis, \
+ .get = snd_hdspm_get_emphasis, \
+ .put = snd_hdspm_put_emphasis \
}
static int hdspm_emphasis(struct hdspm * hdspm)
#define HDSPM_DOLBY(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_dolby, \
- .get = snd_hdspm_get_dolby, \
- .put = snd_hdspm_put_dolby \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_dolby, \
+ .get = snd_hdspm_get_dolby, \
+ .put = snd_hdspm_put_dolby \
}
static int hdspm_dolby(struct hdspm * hdspm)
#define HDSPM_PROFESSIONAL(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_professional, \
- .get = snd_hdspm_get_professional, \
- .put = snd_hdspm_put_professional \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_professional, \
+ .get = snd_hdspm_get_professional, \
+ .put = snd_hdspm_put_professional \
}
static int hdspm_professional(struct hdspm * hdspm)
}
#define HDSPM_INPUT_SELECT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_input_select, \
- .get = snd_hdspm_get_input_select, \
- .put = snd_hdspm_put_input_select \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_input_select, \
+ .get = snd_hdspm_get_input_select, \
+ .put = snd_hdspm_put_input_select \
}
static int hdspm_input_select(struct hdspm * hdspm)
#define HDSPM_DS_WIRE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_ds_wire, \
- .get = snd_hdspm_get_ds_wire, \
- .put = snd_hdspm_put_ds_wire \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_ds_wire, \
+ .get = snd_hdspm_get_ds_wire, \
+ .put = snd_hdspm_put_ds_wire \
}
static int hdspm_ds_wire(struct hdspm * hdspm)
#define HDSPM_QS_WIRE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_qs_wire, \
- .get = snd_hdspm_get_qs_wire, \
- .put = snd_hdspm_put_qs_wire \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_qs_wire, \
+ .get = snd_hdspm_get_qs_wire, \
+ .put = snd_hdspm_put_qs_wire \
}
static int hdspm_qs_wire(struct hdspm * hdspm)
}
#define HDSPM_MIXER(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
- .name = xname, \
- .index = xindex, \
- .device = 0, \
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
- SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
- .info = snd_hdspm_info_mixer, \
- .get = snd_hdspm_get_mixer, \
- .put = snd_hdspm_put_mixer \
+{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+ .name = xname, \
+ .index = xindex, \
+ .device = 0, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_mixer, \
+ .get = snd_hdspm_get_mixer, \
+ .put = snd_hdspm_put_mixer \
}
static int snd_hdspm_info_mixer(struct snd_kcontrol *kcontrol,
*/
#define HDSPM_PLAYBACK_MIXER \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE | \
- SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
- .info = snd_hdspm_info_playback_mixer, \
- .get = snd_hdspm_get_playback_mixer, \
- .put = snd_hdspm_put_playback_mixer \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE | \
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+ .info = snd_hdspm_info_playback_mixer, \
+ .get = snd_hdspm_get_playback_mixer, \
+ .put = snd_hdspm_put_playback_mixer \
}
static int snd_hdspm_info_playback_mixer(struct snd_kcontrol *kcontrol,
break;
case MADI:
- case AES32:
- status = hdspm_read(hdspm, HDSPM_statusRegister2);
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
lock = (status & HDSPM_syncInLock) ? 1 : 0;
sync = (status & HDSPM_syncInSync) ? 1 : 0;
break;
+ case AES32:
+ status = hdspm_read(hdspm, HDSPM_statusRegister2);
+ lock = (status & 0x100000) ? 1 : 0;
+ sync = (status & 0x200000) ? 1 : 0;
+ break;
+
case MADIface:
break;
}
case 8: /* SYNC IN */
val = hdspm_sync_in_sync_check(hdspm); break;
default:
- val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
+ val = hdspm_s1_sync_check(hdspm,
+ kcontrol->private_value-1);
}
+ break;
case AIO:
switch (kcontrol->private_value) {
default:
val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
}
+ break;
case MADI:
switch (kcontrol->private_value) {
case 3: /* SYNC_IN */
val = hdspm_sync_in_sync_check(hdspm); break;
}
+ break;
case MADIface:
val = hdspm_madi_sync_check(hdspm); /* MADI */
val = hdspm_aes_sync_check(hdspm,
kcontrol->private_value-1);
}
+ break;
}
HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
HDSPM_SYNC_CHECK("WC SyncCheck", 0),
HDSPM_SYNC_CHECK("MADI SyncCheck", 1),
- HDSPM_SYNC_CHECK("TCO SyncCHeck", 2),
+ HDSPM_SYNC_CHECK("TCO SyncCheck", 2),
HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 3),
HDSPM_LINE_OUT("Line Out", 0),
HDSPM_TX_64("TX 64 channels mode", 0),
insel = "Coaxial";
break;
default:
- insel = "Unkown";
+ insel = "Unknown";
}
snd_iprintf(buffer,
static unsigned int arizona_sysclk_48k_rates[] = {
6144000,
12288000,
- 22579200,
+ 24576000,
49152000,
73728000,
98304000,
static unsigned int arizona_sysclk_44k1_rates[] = {
5644800,
11289600,
- 24576000,
+ 22579200,
45158400,
67737600,
90316800,
gpio_nreset = cs4271plat->gpio_nreset;
if (gpio_nreset >= 0)
- if (gpio_request(gpio_nreset, "CS4271 Reset"))
+ if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset"))
gpio_nreset = -EINVAL;
if (gpio_nreset >= 0) {
/* Reset codec */
static int cs4271_remove(struct snd_soc_codec *codec)
{
struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
- int gpio_nreset;
- gpio_nreset = cs4271->gpio_nreset;
-
- if (gpio_is_valid(gpio_nreset)) {
+ if (gpio_is_valid(cs4271->gpio_nreset))
/* Set codec to the reset state */
- gpio_set_value(gpio_nreset, 0);
- gpio_free(gpio_nreset);
- }
+ gpio_set_value(cs4271->gpio_nreset, 0);
return 0;
};
if ((freq >= CS42L52_MIN_CLK) && (freq <= CS42L52_MAX_CLK)) {
cs42l52->sysclk = freq;
} else {
- dev_err(codec->dev, "Invalid freq paramter\n");
+ dev_err(codec->dev, "Invalid freq parameter\n");
return -EINVAL;
}
return 0;
{
struct snd_soc_codec *codec = codec_dai->codec;
struct cs42l52_private *cs42l52 = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
u8 iface = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_NB_IF:
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
cs42l52->config.format = iface;
snd_soc_write(codec, CS42L52_IFACE_CTL1, cs42l52->config.format);
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static const struct reg_default wm5102_sysclk_reva_patch[] = {
+ { 0x3000, 0x2225 },
+ { 0x3001, 0x3a03 },
+ { 0x3002, 0x0225 },
+ { 0x3003, 0x0801 },
+ { 0x3004, 0x6249 },
+ { 0x3005, 0x0c04 },
+ { 0x3006, 0x0225 },
+ { 0x3007, 0x5901 },
+ { 0x3008, 0xe249 },
+ { 0x3009, 0x030d },
+ { 0x300a, 0x0249 },
+ { 0x300b, 0x2c01 },
+ { 0x300c, 0xe249 },
+ { 0x300d, 0x4342 },
+ { 0x300e, 0xe249 },
+ { 0x300f, 0x73c0 },
+ { 0x3010, 0x4249 },
+ { 0x3011, 0x0c00 },
+ { 0x3012, 0x0225 },
+ { 0x3013, 0x1f01 },
+ { 0x3014, 0x0225 },
+ { 0x3015, 0x1e01 },
+ { 0x3016, 0x0225 },
+ { 0x3017, 0xfa00 },
+ { 0x3018, 0x0000 },
+ { 0x3019, 0xf000 },
+ { 0x301a, 0x0000 },
+ { 0x301b, 0xf000 },
+ { 0x301c, 0x0000 },
+ { 0x301d, 0xf000 },
+ { 0x301e, 0x0000 },
+ { 0x301f, 0xf000 },
+ { 0x3020, 0x0000 },
+ { 0x3021, 0xf000 },
+ { 0x3022, 0x0000 },
+ { 0x3023, 0xf000 },
+ { 0x3024, 0x0000 },
+ { 0x3025, 0xf000 },
+ { 0x3026, 0x0000 },
+ { 0x3027, 0xf000 },
+ { 0x3028, 0x0000 },
+ { 0x3029, 0xf000 },
+ { 0x302a, 0x0000 },
+ { 0x302b, 0xf000 },
+ { 0x302c, 0x0000 },
+ { 0x302d, 0xf000 },
+ { 0x302e, 0x0000 },
+ { 0x302f, 0xf000 },
+ { 0x3030, 0x0225 },
+ { 0x3031, 0x1a01 },
+ { 0x3032, 0x0225 },
+ { 0x3033, 0x1e00 },
+ { 0x3034, 0x0225 },
+ { 0x3035, 0x1f00 },
+ { 0x3036, 0x6225 },
+ { 0x3037, 0xf800 },
+ { 0x3038, 0x0000 },
+ { 0x3039, 0xf000 },
+ { 0x303a, 0x0000 },
+ { 0x303b, 0xf000 },
+ { 0x303c, 0x0000 },
+ { 0x303d, 0xf000 },
+ { 0x303e, 0x0000 },
+ { 0x303f, 0xf000 },
+ { 0x3040, 0x2226 },
+ { 0x3041, 0x3a03 },
+ { 0x3042, 0x0226 },
+ { 0x3043, 0x0801 },
+ { 0x3044, 0x6249 },
+ { 0x3045, 0x0c06 },
+ { 0x3046, 0x0226 },
+ { 0x3047, 0x5901 },
+ { 0x3048, 0xe249 },
+ { 0x3049, 0x030d },
+ { 0x304a, 0x0249 },
+ { 0x304b, 0x2c01 },
+ { 0x304c, 0xe249 },
+ { 0x304d, 0x4342 },
+ { 0x304e, 0xe249 },
+ { 0x304f, 0x73c0 },
+ { 0x3050, 0x4249 },
+ { 0x3051, 0x0c00 },
+ { 0x3052, 0x0226 },
+ { 0x3053, 0x1f01 },
+ { 0x3054, 0x0226 },
+ { 0x3055, 0x1e01 },
+ { 0x3056, 0x0226 },
+ { 0x3057, 0xfa00 },
+ { 0x3058, 0x0000 },
+ { 0x3059, 0xf000 },
+ { 0x305a, 0x0000 },
+ { 0x305b, 0xf000 },
+ { 0x305c, 0x0000 },
+ { 0x305d, 0xf000 },
+ { 0x305e, 0x0000 },
+ { 0x305f, 0xf000 },
+ { 0x3060, 0x0000 },
+ { 0x3061, 0xf000 },
+ { 0x3062, 0x0000 },
+ { 0x3063, 0xf000 },
+ { 0x3064, 0x0000 },
+ { 0x3065, 0xf000 },
+ { 0x3066, 0x0000 },
+ { 0x3067, 0xf000 },
+ { 0x3068, 0x0000 },
+ { 0x3069, 0xf000 },
+ { 0x306a, 0x0000 },
+ { 0x306b, 0xf000 },
+ { 0x306c, 0x0000 },
+ { 0x306d, 0xf000 },
+ { 0x306e, 0x0000 },
+ { 0x306f, 0xf000 },
+ { 0x3070, 0x0226 },
+ { 0x3071, 0x1a01 },
+ { 0x3072, 0x0226 },
+ { 0x3073, 0x1e00 },
+ { 0x3074, 0x0226 },
+ { 0x3075, 0x1f00 },
+ { 0x3076, 0x6226 },
+ { 0x3077, 0xf800 },
+ { 0x3078, 0x0000 },
+ { 0x3079, 0xf000 },
+ { 0x307a, 0x0000 },
+ { 0x307b, 0xf000 },
+ { 0x307c, 0x0000 },
+ { 0x307d, 0xf000 },
+ { 0x307e, 0x0000 },
+ { 0x307f, 0xf000 },
+ { 0x3080, 0x2227 },
+ { 0x3081, 0x3a03 },
+ { 0x3082, 0x0227 },
+ { 0x3083, 0x0801 },
+ { 0x3084, 0x6255 },
+ { 0x3085, 0x0c04 },
+ { 0x3086, 0x0227 },
+ { 0x3087, 0x5901 },
+ { 0x3088, 0xe255 },
+ { 0x3089, 0x030d },
+ { 0x308a, 0x0255 },
+ { 0x308b, 0x2c01 },
+ { 0x308c, 0xe255 },
+ { 0x308d, 0x4342 },
+ { 0x308e, 0xe255 },
+ { 0x308f, 0x73c0 },
+ { 0x3090, 0x4255 },
+ { 0x3091, 0x0c00 },
+ { 0x3092, 0x0227 },
+ { 0x3093, 0x1f01 },
+ { 0x3094, 0x0227 },
+ { 0x3095, 0x1e01 },
+ { 0x3096, 0x0227 },
+ { 0x3097, 0xfa00 },
+ { 0x3098, 0x0000 },
+ { 0x3099, 0xf000 },
+ { 0x309a, 0x0000 },
+ { 0x309b, 0xf000 },
+ { 0x309c, 0x0000 },
+ { 0x309d, 0xf000 },
+ { 0x309e, 0x0000 },
+ { 0x309f, 0xf000 },
+ { 0x30a0, 0x0000 },
+ { 0x30a1, 0xf000 },
+ { 0x30a2, 0x0000 },
+ { 0x30a3, 0xf000 },
+ { 0x30a4, 0x0000 },
+ { 0x30a5, 0xf000 },
+ { 0x30a6, 0x0000 },
+ { 0x30a7, 0xf000 },
+ { 0x30a8, 0x0000 },
+ { 0x30a9, 0xf000 },
+ { 0x30aa, 0x0000 },
+ { 0x30ab, 0xf000 },
+ { 0x30ac, 0x0000 },
+ { 0x30ad, 0xf000 },
+ { 0x30ae, 0x0000 },
+ { 0x30af, 0xf000 },
+ { 0x30b0, 0x0227 },
+ { 0x30b1, 0x1a01 },
+ { 0x30b2, 0x0227 },
+ { 0x30b3, 0x1e00 },
+ { 0x30b4, 0x0227 },
+ { 0x30b5, 0x1f00 },
+ { 0x30b6, 0x6227 },
+ { 0x30b7, 0xf800 },
+ { 0x30b8, 0x0000 },
+ { 0x30b9, 0xf000 },
+ { 0x30ba, 0x0000 },
+ { 0x30bb, 0xf000 },
+ { 0x30bc, 0x0000 },
+ { 0x30bd, 0xf000 },
+ { 0x30be, 0x0000 },
+ { 0x30bf, 0xf000 },
+ { 0x30c0, 0x2228 },
+ { 0x30c1, 0x3a03 },
+ { 0x30c2, 0x0228 },
+ { 0x30c3, 0x0801 },
+ { 0x30c4, 0x6255 },
+ { 0x30c5, 0x0c06 },
+ { 0x30c6, 0x0228 },
+ { 0x30c7, 0x5901 },
+ { 0x30c8, 0xe255 },
+ { 0x30c9, 0x030d },
+ { 0x30ca, 0x0255 },
+ { 0x30cb, 0x2c01 },
+ { 0x30cc, 0xe255 },
+ { 0x30cd, 0x4342 },
+ { 0x30ce, 0xe255 },
+ { 0x30cf, 0x73c0 },
+ { 0x30d0, 0x4255 },
+ { 0x30d1, 0x0c00 },
+ { 0x30d2, 0x0228 },
+ { 0x30d3, 0x1f01 },
+ { 0x30d4, 0x0228 },
+ { 0x30d5, 0x1e01 },
+ { 0x30d6, 0x0228 },
+ { 0x30d7, 0xfa00 },
+ { 0x30d8, 0x0000 },
+ { 0x30d9, 0xf000 },
+ { 0x30da, 0x0000 },
+ { 0x30db, 0xf000 },
+ { 0x30dc, 0x0000 },
+ { 0x30dd, 0xf000 },
+ { 0x30de, 0x0000 },
+ { 0x30df, 0xf000 },
+ { 0x30e0, 0x0000 },
+ { 0x30e1, 0xf000 },
+ { 0x30e2, 0x0000 },
+ { 0x30e3, 0xf000 },
+ { 0x30e4, 0x0000 },
+ { 0x30e5, 0xf000 },
+ { 0x30e6, 0x0000 },
+ { 0x30e7, 0xf000 },
+ { 0x30e8, 0x0000 },
+ { 0x30e9, 0xf000 },
+ { 0x30ea, 0x0000 },
+ { 0x30eb, 0xf000 },
+ { 0x30ec, 0x0000 },
+ { 0x30ed, 0xf000 },
+ { 0x30ee, 0x0000 },
+ { 0x30ef, 0xf000 },
+ { 0x30f0, 0x0228 },
+ { 0x30f1, 0x1a01 },
+ { 0x30f2, 0x0228 },
+ { 0x30f3, 0x1e00 },
+ { 0x30f4, 0x0228 },
+ { 0x30f5, 0x1f00 },
+ { 0x30f6, 0x6228 },
+ { 0x30f7, 0xf800 },
+ { 0x30f8, 0x0000 },
+ { 0x30f9, 0xf000 },
+ { 0x30fa, 0x0000 },
+ { 0x30fb, 0xf000 },
+ { 0x30fc, 0x0000 },
+ { 0x30fd, 0xf000 },
+ { 0x30fe, 0x0000 },
+ { 0x30ff, 0xf000 },
+ { 0x3100, 0x222b },
+ { 0x3101, 0x3a03 },
+ { 0x3102, 0x222b },
+ { 0x3103, 0x5803 },
+ { 0x3104, 0xe26f },
+ { 0x3105, 0x030d },
+ { 0x3106, 0x626f },
+ { 0x3107, 0x2c01 },
+ { 0x3108, 0xe26f },
+ { 0x3109, 0x4342 },
+ { 0x310a, 0xe26f },
+ { 0x310b, 0x73c0 },
+ { 0x310c, 0x026f },
+ { 0x310d, 0x0c00 },
+ { 0x310e, 0x022b },
+ { 0x310f, 0x1f01 },
+ { 0x3110, 0x022b },
+ { 0x3111, 0x1e01 },
+ { 0x3112, 0x022b },
+ { 0x3113, 0xfa00 },
+ { 0x3114, 0x0000 },
+ { 0x3115, 0xf000 },
+ { 0x3116, 0x0000 },
+ { 0x3117, 0xf000 },
+ { 0x3118, 0x0000 },
+ { 0x3119, 0xf000 },
+ { 0x311a, 0x0000 },
+ { 0x311b, 0xf000 },
+ { 0x311c, 0x0000 },
+ { 0x311d, 0xf000 },
+ { 0x311e, 0x0000 },
+ { 0x311f, 0xf000 },
+ { 0x3120, 0x022b },
+ { 0x3121, 0x0a01 },
+ { 0x3122, 0x022b },
+ { 0x3123, 0x1e00 },
+ { 0x3124, 0x022b },
+ { 0x3125, 0x1f00 },
+ { 0x3126, 0x622b },
+ { 0x3127, 0xf800 },
+ { 0x3128, 0x0000 },
+ { 0x3129, 0xf000 },
+ { 0x312a, 0x0000 },
+ { 0x312b, 0xf000 },
+ { 0x312c, 0x0000 },
+ { 0x312d, 0xf000 },
+ { 0x312e, 0x0000 },
+ { 0x312f, 0xf000 },
+ { 0x3130, 0x0000 },
+ { 0x3131, 0xf000 },
+ { 0x3132, 0x0000 },
+ { 0x3133, 0xf000 },
+ { 0x3134, 0x0000 },
+ { 0x3135, 0xf000 },
+ { 0x3136, 0x0000 },
+ { 0x3137, 0xf000 },
+ { 0x3138, 0x0000 },
+ { 0x3139, 0xf000 },
+ { 0x313a, 0x0000 },
+ { 0x313b, 0xf000 },
+ { 0x313c, 0x0000 },
+ { 0x313d, 0xf000 },
+ { 0x313e, 0x0000 },
+ { 0x313f, 0xf000 },
+ { 0x3140, 0x0000 },
+ { 0x3141, 0xf000 },
+ { 0x3142, 0x0000 },
+ { 0x3143, 0xf000 },
+ { 0x3144, 0x0000 },
+ { 0x3145, 0xf000 },
+ { 0x3146, 0x0000 },
+ { 0x3147, 0xf000 },
+ { 0x3148, 0x0000 },
+ { 0x3149, 0xf000 },
+ { 0x314a, 0x0000 },
+ { 0x314b, 0xf000 },
+ { 0x314c, 0x0000 },
+ { 0x314d, 0xf000 },
+ { 0x314e, 0x0000 },
+ { 0x314f, 0xf000 },
+ { 0x3150, 0x0000 },
+ { 0x3151, 0xf000 },
+ { 0x3152, 0x0000 },
+ { 0x3153, 0xf000 },
+ { 0x3154, 0x0000 },
+ { 0x3155, 0xf000 },
+ { 0x3156, 0x0000 },
+ { 0x3157, 0xf000 },
+ { 0x3158, 0x0000 },
+ { 0x3159, 0xf000 },
+ { 0x315a, 0x0000 },
+ { 0x315b, 0xf000 },
+ { 0x315c, 0x0000 },
+ { 0x315d, 0xf000 },
+ { 0x315e, 0x0000 },
+ { 0x315f, 0xf000 },
+ { 0x3160, 0x0000 },
+ { 0x3161, 0xf000 },
+ { 0x3162, 0x0000 },
+ { 0x3163, 0xf000 },
+ { 0x3164, 0x0000 },
+ { 0x3165, 0xf000 },
+ { 0x3166, 0x0000 },
+ { 0x3167, 0xf000 },
+ { 0x3168, 0x0000 },
+ { 0x3169, 0xf000 },
+ { 0x316a, 0x0000 },
+ { 0x316b, 0xf000 },
+ { 0x316c, 0x0000 },
+ { 0x316d, 0xf000 },
+ { 0x316e, 0x0000 },
+ { 0x316f, 0xf000 },
+ { 0x3170, 0x0000 },
+ { 0x3171, 0xf000 },
+ { 0x3172, 0x0000 },
+ { 0x3173, 0xf000 },
+ { 0x3174, 0x0000 },
+ { 0x3175, 0xf000 },
+ { 0x3176, 0x0000 },
+ { 0x3177, 0xf000 },
+ { 0x3178, 0x0000 },
+ { 0x3179, 0xf000 },
+ { 0x317a, 0x0000 },
+ { 0x317b, 0xf000 },
+ { 0x317c, 0x0000 },
+ { 0x317d, 0xf000 },
+ { 0x317e, 0x0000 },
+ { 0x317f, 0xf000 },
+ { 0x3180, 0x2001 },
+ { 0x3181, 0xf101 },
+ { 0x3182, 0x0000 },
+ { 0x3183, 0xf000 },
+ { 0x3184, 0x0000 },
+ { 0x3185, 0xf000 },
+ { 0x3186, 0x0000 },
+ { 0x3187, 0xf000 },
+ { 0x3188, 0x0000 },
+ { 0x3189, 0xf000 },
+ { 0x318a, 0x0000 },
+ { 0x318b, 0xf000 },
+ { 0x318c, 0x0000 },
+ { 0x318d, 0xf000 },
+ { 0x318e, 0x0000 },
+ { 0x318f, 0xf000 },
+ { 0x3190, 0x0000 },
+ { 0x3191, 0xf000 },
+ { 0x3192, 0x0000 },
+ { 0x3193, 0xf000 },
+ { 0x3194, 0x0000 },
+ { 0x3195, 0xf000 },
+ { 0x3196, 0x0000 },
+ { 0x3197, 0xf000 },
+ { 0x3198, 0x0000 },
+ { 0x3199, 0xf000 },
+ { 0x319a, 0x0000 },
+ { 0x319b, 0xf000 },
+ { 0x319c, 0x0000 },
+ { 0x319d, 0xf000 },
+ { 0x319e, 0x0000 },
+ { 0x319f, 0xf000 },
+ { 0x31a0, 0x0000 },
+ { 0x31a1, 0xf000 },
+ { 0x31a2, 0x0000 },
+ { 0x31a3, 0xf000 },
+ { 0x31a4, 0x0000 },
+ { 0x31a5, 0xf000 },
+ { 0x31a6, 0x0000 },
+ { 0x31a7, 0xf000 },
+ { 0x31a8, 0x0000 },
+ { 0x31a9, 0xf000 },
+ { 0x31aa, 0x0000 },
+ { 0x31ab, 0xf000 },
+ { 0x31ac, 0x0000 },
+ { 0x31ad, 0xf000 },
+ { 0x31ae, 0x0000 },
+ { 0x31af, 0xf000 },
+ { 0x31b0, 0x0000 },
+ { 0x31b1, 0xf000 },
+ { 0x31b2, 0x0000 },
+ { 0x31b3, 0xf000 },
+ { 0x31b4, 0x0000 },
+ { 0x31b5, 0xf000 },
+ { 0x31b6, 0x0000 },
+ { 0x31b7, 0xf000 },
+ { 0x31b8, 0x0000 },
+ { 0x31b9, 0xf000 },
+ { 0x31ba, 0x0000 },
+ { 0x31bb, 0xf000 },
+ { 0x31bc, 0x0000 },
+ { 0x31bd, 0xf000 },
+ { 0x31be, 0x0000 },
+ { 0x31bf, 0xf000 },
+ { 0x31c0, 0x0000 },
+ { 0x31c1, 0xf000 },
+ { 0x31c2, 0x0000 },
+ { 0x31c3, 0xf000 },
+ { 0x31c4, 0x0000 },
+ { 0x31c5, 0xf000 },
+ { 0x31c6, 0x0000 },
+ { 0x31c7, 0xf000 },
+ { 0x31c8, 0x0000 },
+ { 0x31c9, 0xf000 },
+ { 0x31ca, 0x0000 },
+ { 0x31cb, 0xf000 },
+ { 0x31cc, 0x0000 },
+ { 0x31cd, 0xf000 },
+ { 0x31ce, 0x0000 },
+ { 0x31cf, 0xf000 },
+ { 0x31d0, 0x0000 },
+ { 0x31d1, 0xf000 },
+ { 0x31d2, 0x0000 },
+ { 0x31d3, 0xf000 },
+ { 0x31d4, 0x0000 },
+ { 0x31d5, 0xf000 },
+ { 0x31d6, 0x0000 },
+ { 0x31d7, 0xf000 },
+ { 0x31d8, 0x0000 },
+ { 0x31d9, 0xf000 },
+ { 0x31da, 0x0000 },
+ { 0x31db, 0xf000 },
+ { 0x31dc, 0x0000 },
+ { 0x31dd, 0xf000 },
+ { 0x31de, 0x0000 },
+ { 0x31df, 0xf000 },
+ { 0x31e0, 0x0000 },
+ { 0x31e1, 0xf000 },
+ { 0x31e2, 0x0000 },
+ { 0x31e3, 0xf000 },
+ { 0x31e4, 0x0000 },
+ { 0x31e5, 0xf000 },
+ { 0x31e6, 0x0000 },
+ { 0x31e7, 0xf000 },
+ { 0x31e8, 0x0000 },
+ { 0x31e9, 0xf000 },
+ { 0x31ea, 0x0000 },
+ { 0x31eb, 0xf000 },
+ { 0x31ec, 0x0000 },
+ { 0x31ed, 0xf000 },
+ { 0x31ee, 0x0000 },
+ { 0x31ef, 0xf000 },
+ { 0x31f0, 0x0000 },
+ { 0x31f1, 0xf000 },
+ { 0x31f2, 0x0000 },
+ { 0x31f3, 0xf000 },
+ { 0x31f4, 0x0000 },
+ { 0x31f5, 0xf000 },
+ { 0x31f6, 0x0000 },
+ { 0x31f7, 0xf000 },
+ { 0x31f8, 0x0000 },
+ { 0x31f9, 0xf000 },
+ { 0x31fa, 0x0000 },
+ { 0x31fb, 0xf000 },
+ { 0x31fc, 0x0000 },
+ { 0x31fd, 0xf000 },
+ { 0x31fe, 0x0000 },
+ { 0x31ff, 0xf000 },
+ { 0x024d, 0xff50 },
+ { 0x0252, 0xff50 },
+ { 0x0259, 0x0112 },
+ { 0x025e, 0x0112 },
+};
+
+static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct arizona *arizona = dev_get_drvdata(codec->dev);
+ struct regmap *regmap = codec->control_data;
+ const struct reg_default *patch = NULL;
+ int i, patch_size;
+
+ switch (arizona->rev) {
+ case 0:
+ patch = wm5102_sysclk_reva_patch;
+ patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch);
+ break;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (patch)
+ for (i = 0; i < patch_size; i++)
+ regmap_write(regmap, patch[i].reg,
+ patch[i].def);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static const struct snd_kcontrol_new wm5102_snd_controls[] = {
SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
ARIZONA_IN1_OSR_SHIFT, 1, 0),
static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
- 0, NULL, 0),
+ 0, wm5102_sysclk_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
wm8978->mclk_idx = -1;
f_sel = wm8978->f_mclk;
} else {
- if (!wm8978->f_pllout) {
+ if (!wm8978->f_opclk) {
/* We only enter here, if OPCLK is not used */
int ret = wm8978_configure_pll(codec);
if (ret < 0)
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = codec->control_data;
int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
int i;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ /* Don't enable timeslot 2 if not in use */
+ if (wm8994->channels[0] <= 2)
+ mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
+
val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
if ((val & WM8994_AIF1ADCL_SRC) &&
(val & WM8994_AIF1ADCR_SRC))
return -EINVAL;
}
- bclk_rate = params_rate(params) * 4;
+ bclk_rate = params_rate(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bclk_rate *= 16;
return -EINVAL;
}
+ wm8994->channels[id] = params_channels(params);
+ switch (params_channels(params)) {
+ case 1:
+ case 2:
+ bclk_rate *= 2;
+ break;
+ default:
+ bclk_rate *= 4;
+ break;
+ }
+
/* Try to find an appropriate sample rate; look for an exact match. */
for (i = 0; i < ARRAY_SIZE(srs); i++)
if (srs[i].rate == params_rate(params))
} while (count--);
if (count == 0)
- dev_warn(codec->dev, "No impedence range reported for jack\n");
+ dev_warn(codec->dev, "No impedance range reported for jack\n");
#ifndef CONFIG_SND_SOC_WM8994_MODULE
trace_snd_soc_jack_irq(dev_name(codec->dev));
int sysclk_rate[2];
int mclk[2];
int aifclk[2];
+ int channels[2];
struct wm8994_fll_config fll[2], fll_suspend[2];
struct completion fll_locked[2];
bool fll_locked_irq;
printk(KERN_WARNING "%s: got err interrupt 0x%lx\n",
__func__, cause);
writel(cause, priv->io + KIRKWOOD_ERR_CAUSE);
- return IRQ_HANDLED;
}
/* we've enabled only bytes interrupts ... */
}
dram = mv_mbus_dram_info();
- addr = virt_to_phys(substream->dma_buffer.area);
+ addr = substream->dma_buffer.addr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
prdata->play_stream = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
do {
cpu_relax();
value = readl(io + KIRKWOOD_DCO_SPCR_STATUS);
- value &= KIRKWOOD_DCO_SPCR_STATUS;
+ value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK;
} while (value == 0);
}
int cmd, struct snd_soc_dai *dai)
{
struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai);
- unsigned long value;
-
- /*
- * specs says KIRKWOOD_PLAYCTL must be read 2 times before
- * changing it. So read 1 time here and 1 later.
- */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
+ uint32_t ctl, value;
+
+ ctl = readl(priv->io + KIRKWOOD_PLAYCTL);
+ if (ctl & KIRKWOOD_PLAYCTL_PAUSE) {
+ unsigned timeout = 5000;
+ /*
+ * The Armada510 spec says that if we enter pause mode, the
+ * busy bit must be read back as clear _twice_. Make sure
+ * we respect that otherwise we get DMA underruns.
+ */
+ do {
+ value = ctl;
+ ctl = readl(priv->io + KIRKWOOD_PLAYCTL);
+ if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY))
+ break;
+ udelay(1);
+ } while (timeout--);
+
+ if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)
+ dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n",
+ ctl);
+ }
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- /* stop audio, enable interrupts */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value |= KIRKWOOD_PLAYCTL_PAUSE;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
-
value = readl(priv->io + KIRKWOOD_INT_MASK);
value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
/* configure audio & enable i2s playback */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
- value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
+ ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
+ ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
| KIRKWOOD_PLAYCTL_SPDIF_EN);
if (priv->burst == 32)
- value |= KIRKWOOD_PLAYCTL_BURST_32;
+ ctl |= KIRKWOOD_PLAYCTL_BURST_32;
else
- value |= KIRKWOOD_PLAYCTL_BURST_128;
- value |= KIRKWOOD_PLAYCTL_I2S_EN;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl |= KIRKWOOD_PLAYCTL_BURST_128;
+ ctl |= KIRKWOOD_PLAYCTL_I2S_EN;
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_STOP:
/* stop audio, disable interrupts */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
value = readl(priv->io + KIRKWOOD_INT_MASK);
value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
/* disable all playbacks */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
default:
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- /* stop audio, enable interrupts */
- value = readl(priv->io + KIRKWOOD_RECCTL);
- value |= KIRKWOOD_RECCTL_PAUSE;
- writel(value, priv->io + KIRKWOOD_RECCTL);
-
value = readl(priv->io + KIRKWOOD_INT_MASK);
value |= KIRKWOOD_INT_CAUSE_REC_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
- * write a data to saif data register to trigger
- * the transfer
+ * write data to saif data register to trigger
+ * the transfer.
+ * For 24-bit format the 32-bit FIFO register stores
+ * only one channel, so we need to write twice.
+ * This is also safe for the other non 24-bit formats.
*/
__raw_writel(0, saif->base + SAIF_DATA);
+ __raw_writel(0, saif->base + SAIF_DATA);
} else {
/*
- * read a data from saif data register to trigger
- * the receive
+ * read data from saif data register to trigger
+ * the receive.
+ * For 24-bit format the 32-bit FIFO register stores
+ * only one channel, so we need to read twice.
+ * This is also safe for the other non 24-bit formats.
*/
__raw_readl(saif->base + SAIF_DATA);
+ __raw_readl(saif->base + SAIF_DATA);
}
master_saif->ongoing = 1;
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ASoC SAIF driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-saif");
mutex_init(&dmic->mutex);
- dmic->fclk = clk_get(dmic->dev, "dmic_fck");
+ dmic->fclk = clk_get(dmic->dev, "fck");
if (IS_ERR(dmic->fclk)) {
- dev_err(dmic->dev, "cant get dmic_fck\n");
+ dev_err(dmic->dev, "cant get fck\n");
return -ENODEV;
}
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/gpio.h>
-#include <mach/board-zoom.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
+#include <linux/platform_data/gpio-omap.h>
/* Register descriptions for twl4030 codec part */
#include <linux/mfd/twl4030-audio.h>
select SND_SOC_WM5102
select SND_SOC_WM5110
select SND_SOC_WM9081
+ select SND_SOC_WM0010
+ select SND_SOC_WM1250_EV1
config SND_SOC_LOWLAND
tristate "Audio support for Wolfson Lowland"
{
.name = "Sub",
.stream_name = "Sub",
- .cpu_dai_name = "wm5110-aif3",
+ .cpu_dai_name = "wm5102-aif3",
.codec_dai_name = "wm9081-hifi",
.codec_name = "wm9081.1-006c",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
val = (ucontrol->value.integer.value[0] + min) & mask;
val = val << shift;
- if (snd_soc_update_bits_locked(codec, reg, val_mask, val))
- return err;
+ err = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+ if (err < 0)
+ return err;
if (snd_soc_volsw_is_stereo(mc)) {
val_mask = mask << rshift;
{
struct snd_soc_codec *codec;
- list_for_each_entry(codec, &card->codec_dev_list, list) {
+ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
soc_dapm_shutdown_codec(&codec->dapm);
if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY)
snd_soc_dapm_set_bias_level(&codec->dapm,
.num_links = ARRAY_SIZE(mop500_dai_links),
};
+static void mop500_of_node_put(void)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (mop500_dai_links[i].cpu_of_node)
+ of_node_put((struct device_node *)
+ mop500_dai_links[i].cpu_of_node);
+ if (mop500_dai_links[i].codec_of_node)
+ of_node_put((struct device_node *)
+ mop500_dai_links[i].codec_of_node);
+ }
+}
+
static int __devinit mop500_of_probe(struct platform_device *pdev,
struct device_node *np)
{
if (!(msp_np[0] && msp_np[1] && codec_np)) {
dev_err(&pdev->dev, "Phandle missing or invalid\n");
+ mop500_of_node_put();
return -EINVAL;
}
return 0;
}
+
static int __devinit mop500_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
snd_soc_unregister_card(mop500_card);
mop500_ab8500_remove(mop500_card);
+ mop500_of_node_put();
return 0;
}
#include <linux/pinctrl/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <mach/hardware.h>
platform_data = devm_kzalloc(&pdev->dev,
sizeof(struct msp_i2s_platform_data), GFP_KERNEL);
if (!platform_data)
- ret = -ENOMEM;
+ return -ENOMEM;
}
} else
if (!platform_data)
- ret = -EINVAL;
-
- if (ret)
- goto err_res;
+ return -EINVAL;
dev_dbg(&pdev->dev, "%s: Enter (name: %s, id: %d).\n", __func__,
pdev->name, platform_data->id);
}
mutex_init(&chip->mutex);
- mutex_init(&chip->shutdown_mutex);
+ init_rwsem(&chip->shutdown_rwsem);
chip->index = idx;
chip->dev = dev;
chip->card = card;
return;
card = chip->card;
- mutex_lock(®ister_mutex);
- mutex_lock(&chip->shutdown_mutex);
+ down_write(&chip->shutdown_rwsem);
chip->shutdown = 1;
+ up_write(&chip->shutdown_rwsem);
+
+ mutex_lock(®ister_mutex);
chip->num_interfaces--;
if (chip->num_interfaces <= 0) {
snd_card_disconnect(card);
snd_usb_mixer_disconnect(p);
}
usb_chip[chip->index] = NULL;
- mutex_unlock(&chip->shutdown_mutex);
mutex_unlock(®ister_mutex);
snd_card_free_when_closed(card);
} else {
- mutex_unlock(&chip->shutdown_mutex);
mutex_unlock(®ister_mutex);
}
}
{
int err = -ENODEV;
+ down_read(&chip->shutdown_rwsem);
if (!chip->shutdown && !chip->probing)
err = usb_autopm_get_interface(chip->pm_intf);
+ up_read(&chip->shutdown_rwsem);
return err;
}
void snd_usb_autosuspend(struct snd_usb_audio *chip)
{
+ down_read(&chip->shutdown_rwsem);
if (!chip->shutdown && !chip->probing)
usb_autopm_put_interface(chip->pm_intf);
+ up_read(&chip->shutdown_rwsem);
}
static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
struct snd_usb_endpoint *sync_endpoint;
unsigned long flags;
bool need_setup_ep; /* (re)configure EP at prepare? */
+ unsigned int speed; /* USB_SPEED_XXX */
u64 formats; /* format bitmasks (all or'ed) */
unsigned int num_formats; /* number of supported audio formats (list) */
#define EP_FLAG_ACTIVATED 0
#define EP_FLAG_RUNNING 1
+#define EP_FLAG_STOPPING 2
/*
* snd_usb_endpoint is a model that abstracts everything related to an
if (alive)
snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n",
alive, ep->ep_num);
+ clear_bit(EP_FLAG_STOPPING, &ep->flags);
return 0;
}
+/* sync the pending stop operation;
+ * this function itself doesn't trigger the stop operation
+ */
+void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
+{
+ if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags))
+ wait_clear_urbs(ep);
+}
+
/*
* unlink active urbs.
*/
if (wait)
wait_clear_urbs(ep);
+ else
+ set_bit(EP_FLAG_STOPPING, &ep->flags);
}
}
int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
int force, int can_sleep, int wait);
+void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_free(struct list_head *head);
struct snd_usb_midi_out_endpoint* ep;
struct snd_rawmidi_substream *substream;
int active;
+ bool autopm_reference;
uint8_t cable; /* cable number << 4 */
uint8_t state;
#define STATE_UNKNOWN 0
return -ENXIO;
}
err = usb_autopm_get_interface(umidi->iface);
- if (err < 0)
+ port->autopm_reference = err >= 0;
+ if (err < 0 && err != -EACCES)
return -EIO;
substream->runtime->private_data = port;
port->state = STATE_UNKNOWN;
static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct usbmidi_out_port *port = substream->runtime->private_data;
substream_open(substream, 0);
- usb_autopm_put_interface(umidi->iface);
+ if (port->autopm_reference)
+ usb_autopm_put_interface(umidi->iface);
return 0;
}
unsigned char buf[2];
int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
int timeout = 10;
- int err;
+ int idx = 0, err;
err = snd_usb_autoresume(cval->mixer->chip);
if (err < 0)
return -EIO;
+ down_read(&chip->shutdown_rwsem);
while (timeout-- > 0) {
+ if (chip->shutdown)
+ break;
+ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
- buf, val_len) >= val_len) {
+ validx, idx, buf, val_len) >= val_len) {
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len));
- snd_usb_autosuspend(cval->mixer->chip);
- return 0;
+ err = 0;
+ goto out;
}
}
- snd_usb_autosuspend(cval->mixer->chip);
snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type);
- return -EINVAL;
+ request, validx, idx, cval->val_type);
+ err = -EINVAL;
+
+ out:
+ up_read(&chip->shutdown_rwsem);
+ snd_usb_autosuspend(cval->mixer->chip);
+ return err;
}
static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
struct snd_usb_audio *chip = cval->mixer->chip;
unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */
unsigned char *val;
- int ret, size;
+ int idx = 0, ret, size;
__u8 bRequest;
if (request == UAC_GET_CUR) {
if (ret)
goto error;
- ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+ down_read(&chip->shutdown_rwsem);
+ if (chip->shutdown)
+ ret = -ENODEV;
+ else {
+ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
- buf, size);
+ validx, idx, buf, size);
+ }
+ up_read(&chip->shutdown_rwsem);
snd_usb_autosuspend(chip);
if (ret < 0) {
error:
snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type);
+ request, validx, idx, cval->val_type);
return ret;
}
{
struct snd_usb_audio *chip = cval->mixer->chip;
unsigned char buf[2];
- int val_len, err, timeout = 10;
+ int idx = 0, val_len, err, timeout = 10;
if (cval->mixer->protocol == UAC_VERSION_1) {
val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
err = snd_usb_autoresume(chip);
if (err < 0)
return -EIO;
- while (timeout-- > 0)
+ down_read(&chip->shutdown_rwsem);
+ while (timeout-- > 0) {
+ if (chip->shutdown)
+ break;
+ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
if (snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
- buf, val_len) >= 0) {
- snd_usb_autosuspend(chip);
- return 0;
+ validx, idx, buf, val_len) >= 0) {
+ err = 0;
+ goto out;
}
- snd_usb_autosuspend(chip);
+ }
snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n",
- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type, buf[0], buf[1]);
- return -EINVAL;
+ request, validx, idx, cval->val_type, buf[0], buf[1]);
+ err = -EINVAL;
+
+ out:
+ up_read(&chip->shutdown_rwsem);
+ snd_usb_autosuspend(chip);
+ return err;
}
static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value)
if (value > 1)
return -EINVAL;
changed = value != mixer->audigy2nx_leds[index];
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown) {
+ err = -ENODEV;
+ goto out;
+ }
if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
err = snd_usb_ctl_msg(mixer->chip->dev,
usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
value, index + 2, NULL, 0);
+ out:
+ up_read(&mixer->chip->shutdown_rwsem);
if (err < 0)
return err;
mixer->audigy2nx_leds[index] = value;
for (i = 0; jacks[i].name; ++i) {
snd_iprintf(buffer, "%s: ", jacks[i].name);
- err = snd_usb_ctl_msg(mixer->chip->dev,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ err = 0;
+ else
+ err = snd_usb_ctl_msg(mixer->chip->dev,
usb_rcvctrlpipe(mixer->chip->dev, 0),
UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE, 0,
jacks[i].unitid << 8, buf, 3);
+ up_read(&mixer->chip->shutdown_rwsem);
if (err == 3 && (buf[0] == 3 || buf[0] == 6))
snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
else
else
new_status = old_status & ~0x02;
changed = new_status != old_status;
- err = snd_usb_ctl_msg(mixer->chip->dev,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ err = -ENODEV;
+ else
+ err = snd_usb_ctl_msg(mixer->chip->dev,
usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
50, 0, &new_status, 1);
+ up_read(&mixer->chip->shutdown_rwsem);
if (err < 0)
return err;
mixer->xonar_u1_status = new_status;
u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
u16 wIndex = kcontrol->private_value & 0xffff;
u8 tmp;
+ int ret;
- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ ret = -ENODEV;
+ else
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, cpu_to_le16(wIndex),
&tmp, sizeof(tmp), 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
if (ret < 0) {
snd_printk(KERN_ERR
u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
u16 wIndex = kcontrol->private_value & 0xffff;
u16 wValue = ucontrol->value.integer.value[0];
+ int ret;
- int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ ret = -ENODEV;
+ else
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
cpu_to_le16(wValue), cpu_to_le16(wIndex),
NULL, 0, 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
if (ret < 0) {
snd_printk(KERN_ERR
return -EINVAL;
- err = snd_usb_ctl_msg(chip->dev,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ err = -ENODEV;
+ else
+ err = snd_usb_ctl_msg(chip->dev,
usb_rcvctrlpipe(chip->dev, 0), UAC_GET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx << 8, snd_usb_ctrl_intf(chip) | (id << 8),
value, val_len);
+ up_read(&mixer->chip->shutdown_rwsem);
if (err < 0)
return err;
if (!pval->is_cached) {
/* Read current value */
- err = snd_usb_ctl_msg(chip->dev,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ err = -ENODEV;
+ else
+ err = snd_usb_ctl_msg(chip->dev,
usb_rcvctrlpipe(chip->dev, 0), UAC_GET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx << 8, snd_usb_ctrl_intf(chip) | (id << 8),
value, val_len);
+ up_read(&mixer->chip->shutdown_rwsem);
if (err < 0)
return err;
if (cur_val != new_val) {
value[0] = new_val;
value[1] = 0;
- err = snd_usb_ctl_msg(chip->dev,
+ down_read(&mixer->chip->shutdown_rwsem);
+ if (mixer->chip->shutdown)
+ err = -ENODEV;
+ else
+ err = snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
validx << 8, snd_usb_ctrl_intf(chip) | (id << 8),
value, val_len);
+ up_read(&mixer->chip->shutdown_rwsem);
if (err < 0)
return err;
unsigned int hwptr_done;
subs = (struct snd_usb_substream *)substream->runtime->private_data;
+ if (subs->stream->chip->shutdown)
+ return SNDRV_PCM_POS_XRUN;
spin_lock(&subs->lock);
hwptr_done = subs->hwptr_done;
substream->runtime->delay = snd_usb_pcm_delay(subs,
{
int ret;
- mutex_lock(&subs->stream->chip->shutdown_mutex);
/* format changed */
stop_endpoints(subs, 0, 0, 0);
ret = snd_usb_endpoint_set_params(subs->data_endpoint,
subs->cur_audiofmt,
subs->sync_endpoint);
if (ret < 0)
- goto unlock;
+ return ret;
if (subs->sync_endpoint)
- ret = snd_usb_endpoint_set_params(subs->data_endpoint,
+ ret = snd_usb_endpoint_set_params(subs->sync_endpoint,
subs->pcm_format,
subs->channels,
subs->period_bytes,
subs->cur_rate,
subs->cur_audiofmt,
NULL);
-
-unlock:
- mutex_unlock(&subs->stream->chip->shutdown_mutex);
return ret;
}
return -EINVAL;
}
- if ((ret = set_format(subs, fmt)) < 0)
+ down_read(&subs->stream->chip->shutdown_rwsem);
+ if (subs->stream->chip->shutdown)
+ ret = -ENODEV;
+ else
+ ret = set_format(subs, fmt);
+ up_read(&subs->stream->chip->shutdown_rwsem);
+ if (ret < 0)
return ret;
subs->interface = fmt->iface;
subs->cur_audiofmt = NULL;
subs->cur_rate = 0;
subs->period_bytes = 0;
- mutex_lock(&subs->stream->chip->shutdown_mutex);
- stop_endpoints(subs, 0, 1, 1);
- deactivate_endpoints(subs);
- mutex_unlock(&subs->stream->chip->shutdown_mutex);
+ down_read(&subs->stream->chip->shutdown_rwsem);
+ if (!subs->stream->chip->shutdown) {
+ stop_endpoints(subs, 0, 1, 1);
+ deactivate_endpoints(subs);
+ }
+ up_read(&subs->stream->chip->shutdown_rwsem);
return snd_pcm_lib_free_vmalloc_buffer(substream);
}
return -ENXIO;
}
- if (snd_BUG_ON(!subs->data_endpoint))
- return -EIO;
+ down_read(&subs->stream->chip->shutdown_rwsem);
+ if (subs->stream->chip->shutdown) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ if (snd_BUG_ON(!subs->data_endpoint)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint);
+ snd_usb_endpoint_sync_pending_stop(subs->data_endpoint);
ret = set_format(subs, subs->cur_audiofmt);
if (ret < 0)
- return ret;
+ goto unlock;
iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
subs->cur_audiofmt,
subs->cur_rate);
if (ret < 0)
- return ret;
+ goto unlock;
if (subs->need_setup_ep) {
ret = configure_endpoint(subs);
if (ret < 0)
- return ret;
+ goto unlock;
subs->need_setup_ep = false;
}
/* for playback, submit the URBs now; otherwise, the first hwptr_done
* updates for all URBs would happen at the same time when starting */
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- return start_endpoints(subs, 1);
+ ret = start_endpoints(subs, 1);
- return 0;
+ unlock:
+ up_read(&subs->stream->chip->shutdown_rwsem);
+ return ret;
}
static struct snd_pcm_hardware snd_usb_hardware =
return 0;
}
/* check whether the period time is >= the data packet interval */
- if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) {
+ if (subs->speed != USB_SPEED_FULL) {
ptime = 125 * (1 << fp->datainterval);
if (ptime > pt->max || (ptime == pt->max && pt->openmax)) {
hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max);
return err;
param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME;
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
+ if (subs->speed == USB_SPEED_FULL)
/* full speed devices have fixed data packet interval */
ptmin = 1000;
if (ptmin == 1000)
}
snd_iprintf(buffer, "\n");
}
- if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
+ if (subs->speed != USB_SPEED_FULL)
snd_iprintf(buffer, " Data packet interval: %d us\n",
125 * (1 << fp->datainterval));
// snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
return;
snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize);
snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
- snd_usb_get_speed(subs->dev) == USB_SPEED_FULL
+ subs->speed == USB_SPEED_FULL
? get_full_speed_hz(ep->freqm)
: get_high_speed_hz(ep->freqm),
ep->freqm >> 16, ep->freqm & 0xffff);
subs->direction = stream;
subs->dev = as->chip->dev;
subs->txfr_quirk = as->chip->txfr_quirk;
+ subs->speed = snd_usb_get_speed(subs->dev);
snd_usb_set_pcm_ops(as->pcm, stream);
struct usb_interface *pm_intf;
u32 usb_id;
struct mutex mutex;
- struct mutex shutdown_mutex;
+ struct rw_semaphore shutdown_rwsem;
unsigned int shutdown:1;
unsigned int probing:1;
unsigned int autosuspended:1;
int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool show_all = false;
- enum help_format help_format = HELP_FORMAT_NONE;
+ enum help_format help_format = HELP_FORMAT_MAN;
struct option builtin_help_options[] = {
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
{
char tp_name[128];
struct syscall *sc;
+ const char *name = audit_syscall_to_name(id, trace->audit_machine);
+
+ if (name == NULL)
+ return -1;
if (id > trace->syscalls.max) {
struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
}
sc = trace->syscalls.table + id;
- sc->name = audit_syscall_to_name(id, trace->audit_machine);
- if (sc->name == NULL)
- return -1;
-
- sc->fmt = syscall_fmt__find(sc->name);
+ sc->name = name;
+ sc->fmt = syscall_fmt__find(sc->name);
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
sc->tp_format = event_format__new("syscalls", tp_name);
if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
printf("%d ", sample.tid);
+ if (sample.raw_data == NULL) {
+ printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
+ perf_evsel__name(evsel), sample.tid,
+ sample.cpu, sample.raw_size);
+ continue;
+ }
+
handler = evsel->handler.func;
handler(trace, evsel, &sample);
}
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
err = self->comm == NULL ? -ENOMEM : 0;
if (!err) {
self->comm_set = true;
- map_groups__flush(&self->mg);
}
return err;
}
retval = pread(fd, msr, sizeof *msr, offset);
close(fd);
- if (retval != sizeof *msr)
+ if (retval != sizeof *msr) {
+ fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset);
return -1;
+ }
return 0;
}
restart:
retval = for_all_cpus(get_counters, EVEN_COUNTERS);
- if (retval) {
+ if (retval < -1) {
+ exit(retval);
+ } else if (retval == -1) {
re_initialize();
goto restart;
}
}
sleep(interval_sec);
retval = for_all_cpus(get_counters, ODD_COUNTERS);
- if (retval) {
+ if (retval < -1) {
+ exit(retval);
+ } else if (retval == -1) {
re_initialize();
goto restart;
}
flush_stdout();
sleep(interval_sec);
retval = for_all_cpus(get_counters, EVEN_COUNTERS);
- if (retval) {
+ if (retval < -1) {
+ exit(retval);
+ } else if (retval == -1) {
re_initialize();
goto restart;
}
int fork_it(char **argv)
{
pid_t child_pid;
+ int status;
- for_all_cpus(get_counters, EVEN_COUNTERS);
+ status = for_all_cpus(get_counters, EVEN_COUNTERS);
+ if (status)
+ exit(status);
/* clear affinity side-effect of get_counters() */
sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
gettimeofday(&tv_even, (struct timezone *)NULL);
/* child */
execvp(argv[0], argv);
} else {
- int status;
/* parent */
if (child_pid == -1) {
signal(SIGQUIT, SIG_IGN);
if (waitpid(child_pid, &status, 0) == -1) {
perror("wait");
- exit(1);
+ exit(status);
}
}
/*
fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
- return 0;
+ return status;
}
void cmdline(int argc, char **argv)
progname = argv[0];
- while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) {
+ while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) {
switch (opt) {
case 'p':
show_core_only++;
open(IN, "$output_config") or dodie("Can't read config file");
while (<IN>) {
if (/CONFIG_MODULES(=y)?/) {
- $install_mods = 1 if (defined($1));
- last;
+ if (defined($1)) {
+ $install_mods = 1;
+ last;
+ }
}
}
close(IN);
-TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug epoll
+TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug
all:
for TARGET in $(TARGETS); do \
+++ /dev/null
-# Makefile for epoll selftests
-
-all: test_epoll
-%: %.c
- gcc -pthread -g -o $@ $^
-
-run_tests: all
- ./test_epoll
-
-clean:
- $(RM) test_epoll
+++ /dev/null
-/*
- * tools/testing/selftests/epoll/test_epoll.c
- *
- * Copyright 2012 Adobe Systems Incorporated
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Paton J. Lewis <palewis@adobe.com>
- *
- */
-
-#include <errno.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/epoll.h>
-#include <sys/socket.h>
-
-/*
- * A pointer to an epoll_item_private structure will be stored in the epoll
- * item's event structure so that we can get access to the epoll_item_private
- * data after calling epoll_wait:
- */
-struct epoll_item_private {
- int index; /* Position of this struct within the epoll_items array. */
- int fd;
- uint32_t events;
- pthread_mutex_t mutex; /* Guards the following variables... */
- int stop;
- int status; /* Stores any error encountered while handling item. */
- /* The following variable allows us to test whether we have encountered
- a problem while attempting to cancel and delete the associated
- event. When the test program exits, 'deleted' should be exactly
- one. If it is greater than one, then the failed test reflects a real
- world situation where we would have tried to access the epoll item's
- private data after deleting it: */
- int deleted;
-};
-
-struct epoll_item_private *epoll_items;
-
-/*
- * Delete the specified item from the epoll set. In a real-world secneario this
- * is where we would free the associated data structure, but in this testing
- * environment we retain the structure so that we can test for double-deletion:
- */
-void delete_item(int index)
-{
- __sync_fetch_and_add(&epoll_items[index].deleted, 1);
-}
-
-/*
- * A pointer to a read_thread_data structure will be passed as the argument to
- * each read thread:
- */
-struct read_thread_data {
- int stop;
- int status; /* Indicates any error encountered by the read thread. */
- int epoll_set;
-};
-
-/*
- * The function executed by the read threads:
- */
-void *read_thread_function(void *function_data)
-{
- struct read_thread_data *thread_data =
- (struct read_thread_data *)function_data;
- struct epoll_event event_data;
- struct epoll_item_private *item_data;
- char socket_data;
-
- /* Handle events until we encounter an error or this thread's 'stop'
- condition is set: */
- while (1) {
- int result = epoll_wait(thread_data->epoll_set,
- &event_data,
- 1, /* Number of desired events */
- 1000); /* Timeout in ms */
- if (result < 0) {
- /* Breakpoints signal all threads. Ignore that while
- debugging: */
- if (errno == EINTR)
- continue;
- thread_data->status = errno;
- return 0;
- } else if (thread_data->stop)
- return 0;
- else if (result == 0) /* Timeout */
- continue;
-
- /* We need the mutex here because checking for the stop
- condition and re-enabling the epoll item need to be done
- together as one atomic operation when EPOLL_CTL_DISABLE is
- available: */
- item_data = (struct epoll_item_private *)event_data.data.ptr;
- pthread_mutex_lock(&item_data->mutex);
-
- /* Remove the item from the epoll set if we want to stop
- handling that event: */
- if (item_data->stop)
- delete_item(item_data->index);
- else {
- /* Clear the data that was written to the other end of
- our non-blocking socket: */
- do {
- if (read(item_data->fd, &socket_data, 1) < 1) {
- if ((errno == EAGAIN) ||
- (errno == EWOULDBLOCK))
- break;
- else
- goto error_unlock;
- }
- } while (item_data->events & EPOLLET);
-
- /* The item was one-shot, so re-enable it: */
- event_data.events = item_data->events;
- if (epoll_ctl(thread_data->epoll_set,
- EPOLL_CTL_MOD,
- item_data->fd,
- &event_data) < 0)
- goto error_unlock;
- }
-
- pthread_mutex_unlock(&item_data->mutex);
- }
-
-error_unlock:
- thread_data->status = item_data->status = errno;
- pthread_mutex_unlock(&item_data->mutex);
- return 0;
-}
-
-/*
- * A pointer to a write_thread_data structure will be passed as the argument to
- * the write thread:
- */
-struct write_thread_data {
- int stop;
- int status; /* Indicates any error encountered by the write thread. */
- int n_fds;
- int *fds;
-};
-
-/*
- * The function executed by the write thread. It writes a single byte to each
- * socket in turn until the stop condition for this thread is set. If writing to
- * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for
- * the moment and just move on to the next socket in the list. We don't care
- * about the order in which we deliver events to the epoll set. In fact we don't
- * care about the data we're writing to the pipes at all; we just want to
- * trigger epoll events:
- */
-void *write_thread_function(void *function_data)
-{
- const char data = 'X';
- int index;
- struct write_thread_data *thread_data =
- (struct write_thread_data *)function_data;
- while (!write_thread_data->stop)
- for (index = 0;
- !thread_data->stop && (index < thread_data->n_fds);
- ++index)
- if ((write(thread_data->fds[index], &data, 1) < 1) &&
- (errno != EAGAIN) &&
- (errno != EWOULDBLOCK)) {
- write_thread_data->status = errno;
- return;
- }
-}
-
-/*
- * Arguments are currently ignored:
- */
-int main(int argc, char **argv)
-{
- const int n_read_threads = 100;
- const int n_epoll_items = 500;
- int index;
- int epoll_set = epoll_create1(0);
- struct write_thread_data write_thread_data = {
- 0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int))
- };
- struct read_thread_data *read_thread_data =
- malloc(n_read_threads * sizeof(struct read_thread_data));
- pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t));
- pthread_t write_thread;
-
- printf("-----------------\n");
- printf("Runing test_epoll\n");
- printf("-----------------\n");
-
- epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private));
-
- if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 ||
- read_thread_data == 0 || read_threads == 0)
- goto error;
-
- if (sysconf(_SC_NPROCESSORS_ONLN) < 2) {
- printf("Error: please run this test on a multi-core system.\n");
- goto error;
- }
-
- /* Create the socket pairs and epoll items: */
- for (index = 0; index < n_epoll_items; ++index) {
- int socket_pair[2];
- struct epoll_event event_data;
- if (socketpair(AF_UNIX,
- SOCK_STREAM | SOCK_NONBLOCK,
- 0,
- socket_pair) < 0)
- goto error;
- write_thread_data.fds[index] = socket_pair[0];
- epoll_items[index].index = index;
- epoll_items[index].fd = socket_pair[1];
- if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0)
- goto error;
- /* We always use EPOLLONESHOT because this test is currently
- structured to demonstrate the need for EPOLL_CTL_DISABLE,
- which only produces useful information in the EPOLLONESHOT
- case (without EPOLLONESHOT, calling epoll_ctl with
- EPOLL_CTL_DISABLE will never return EBUSY). If support for
- testing events without EPOLLONESHOT is desired, it should
- probably be implemented in a separate unit test. */
- epoll_items[index].events = EPOLLIN | EPOLLONESHOT;
- if (index < n_epoll_items / 2)
- epoll_items[index].events |= EPOLLET;
- epoll_items[index].stop = 0;
- epoll_items[index].status = 0;
- epoll_items[index].deleted = 0;
- event_data.events = epoll_items[index].events;
- event_data.data.ptr = &epoll_items[index];
- if (epoll_ctl(epoll_set,
- EPOLL_CTL_ADD,
- epoll_items[index].fd,
- &event_data) < 0)
- goto error;
- }
-
- /* Create and start the read threads: */
- for (index = 0; index < n_read_threads; ++index) {
- read_thread_data[index].stop = 0;
- read_thread_data[index].status = 0;
- read_thread_data[index].epoll_set = epoll_set;
- if (pthread_create(&read_threads[index],
- NULL,
- read_thread_function,
- &read_thread_data[index]) != 0)
- goto error;
- }
-
- if (pthread_create(&write_thread,
- NULL,
- write_thread_function,
- &write_thread_data) != 0)
- goto error;
-
- /* Cancel all event pollers: */
-#ifdef EPOLL_CTL_DISABLE
- for (index = 0; index < n_epoll_items; ++index) {
- pthread_mutex_lock(&epoll_items[index].mutex);
- ++epoll_items[index].stop;
- if (epoll_ctl(epoll_set,
- EPOLL_CTL_DISABLE,
- epoll_items[index].fd,
- NULL) == 0)
- delete_item(index);
- else if (errno != EBUSY) {
- pthread_mutex_unlock(&epoll_items[index].mutex);
- goto error;
- }
- /* EBUSY means events were being handled; allow the other thread
- to delete the item. */
- pthread_mutex_unlock(&epoll_items[index].mutex);
- }
-#else
- for (index = 0; index < n_epoll_items; ++index) {
- pthread_mutex_lock(&epoll_items[index].mutex);
- ++epoll_items[index].stop;
- pthread_mutex_unlock(&epoll_items[index].mutex);
- /* Wait in case a thread running read_thread_function is
- currently executing code between epoll_wait and
- pthread_mutex_lock with this item. Note that a longer delay
- would make double-deletion less likely (at the expense of
- performance), but there is no guarantee that any delay would
- ever be sufficient. Note also that we delete all event
- pollers at once for testing purposes, but in a real-world
- environment we are likely to want to be able to cancel event
- pollers at arbitrary times. Therefore we can't improve this
- situation by just splitting this loop into two loops
- (i.e. signal 'stop' for all items, sleep, and then delete all
- items). We also can't fix the problem via EPOLL_CTL_DEL
- because that command can't prevent the case where some other
- thread is executing read_thread_function within the region
- mentioned above: */
- usleep(1);
- pthread_mutex_lock(&epoll_items[index].mutex);
- if (!epoll_items[index].deleted)
- delete_item(index);
- pthread_mutex_unlock(&epoll_items[index].mutex);
- }
-#endif
-
- /* Shut down the read threads: */
- for (index = 0; index < n_read_threads; ++index)
- __sync_fetch_and_add(&read_thread_data[index].stop, 1);
- for (index = 0; index < n_read_threads; ++index) {
- if (pthread_join(read_threads[index], NULL) != 0)
- goto error;
- if (read_thread_data[index].status)
- goto error;
- }
-
- /* Shut down the write thread: */
- __sync_fetch_and_add(&write_thread_data.stop, 1);
- if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status)
- goto error;
-
- /* Check for final error conditions: */
- for (index = 0; index < n_epoll_items; ++index) {
- if (epoll_items[index].status != 0)
- goto error;
- if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0)
- goto error;
- }
- for (index = 0; index < n_epoll_items; ++index)
- if (epoll_items[index].deleted != 1) {
- printf("Error: item data deleted %1d times.\n",
- epoll_items[index].deleted);
- goto error;
- }
-
- printf("[PASS]\n");
- return 0;
-
- error:
- printf("[FAIL]\n");
- return errno;
-}
#include <sys/mount.h>
#include <sys/statfs.h>
#include "../../include/uapi/linux/magic.h"
-#include "../../include/linux/kernel-page-flags.h"
+#include "../../include/uapi/linux/kernel-page-flags.h"
#ifndef MAX_PATH
int retval;
int rc = -1;
int namesize;
- int i;
+ unsigned int i;
mode |= S_IFREG;
static char *cpio_replace_env(char *new_location)
{
- char expanded[PATH_MAX + 1];
- char env_var[PATH_MAX + 1];
- char *start;
- char *end;
-
- for (start = NULL; (start = strstr(new_location, "${")); ) {
- end = strchr(start, '}');
- if (start < end) {
- *env_var = *expanded = '\0';
- strncat(env_var, start + 2, end - start - 2);
- strncat(expanded, new_location, start - new_location);
- strncat(expanded, getenv(env_var), PATH_MAX);
- strncat(expanded, end + 1, PATH_MAX);
- strncpy(new_location, expanded, PATH_MAX);
- } else
- break;
- }
-
- return new_location;
+ char expanded[PATH_MAX + 1];
+ char env_var[PATH_MAX + 1];
+ char *start;
+ char *end;
+
+ for (start = NULL; (start = strstr(new_location, "${")); ) {
+ end = strchr(start, '}');
+ if (start < end) {
+ *env_var = *expanded = '\0';
+ strncat(env_var, start + 2, end - start - 2);
+ strncat(expanded, new_location, start - new_location);
+ strncat(expanded, getenv(env_var),
+ PATH_MAX - strlen(expanded));
+ strncat(expanded, end + 1,
+ PATH_MAX - strlen(expanded));
+ strncpy(new_location, expanded, PATH_MAX);
+ new_location[PATH_MAX] = 0;
+ } else
+ break;
+ }
+
+ return new_location;
}
void kvm_release_pfn_clean(pfn_t pfn)
{
- WARN_ON(is_error_pfn(pfn));
-
- if (!kvm_is_mmio_pfn(pfn))
+ if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);