- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'dma_fence_chain_for_each'
+ - 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
+ - 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
+ - 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_pci_dev'
- 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
+ - 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_cpu'
- 'for_each_process_thread'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
+ - 'for_each_requested_gpio'
+ - 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_codec_dais_rollback'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
+ - 'for_each_sgtable_dma_page'
+ - 'for_each_sgtable_dma_sg'
+ - 'for_each_sgtable_page'
+ - 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
+ - 'for_each_unicast_dest_pgid'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
+ - 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
Aaron Durbin <adurbin@google.com>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
-Adrian Bunk <bunk@stusta.de>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
+Adrian Bunk <bunk@stusta.de>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
-Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
-Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
-Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
+Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
-Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
+Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
+Andi Kleen <ak@linux.intel.com> <ak@suse.de>
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
Andreas Herrmann <aherrman@de.ibm.com>
-Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
-Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
+Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Vasquez <andrew.vasquez@qlogic.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Arnd Bergmann <arnd@arndb.de>
Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com>
-Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
+Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
-Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
-Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
-Christoph Hellwig <hch@lst.de>
Christophe Ricard <christophe.ricard@gmail.com>
+Christoph Hellwig <hch@lst.de>
Corey Minyard <minyard@acm.org>
Damian Hobson-Garcia <dhobsong@igel.co.jp>
-Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
-Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
-Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
David Brownell <david-b@pacbell.net>
David Woodhouse <dwmw2@shinybook.infradead.org>
-Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
-Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
+Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
+Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
-Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
Felix Moeller <felix@derklecks.de>
Filipe Lautert <filipe@icewall.org>
Franck Bui-Huu <vagabon.xyz@gmail.com>
-Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
Frank Zago <fzago@systemfabricworks.com>
Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
-Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
Herbert Xu <herbert@gondor.apana.org.au>
Jacob Shin <Jacob.Shin@amd.com>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
-Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
+Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com>
James E Wilson <wilson@specifix.com>
-James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
James Hogan <jhogan@kernel.org> <james@albanarts.com>
+James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
James Ketrenos <jketreno@io.(none)>
Jan Glauber <jan.glauber@gmail.com> <jang@de.ibm.com>
Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
+Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
-Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
+Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
-Jean Tourrilhes <jt@hpl.hp.com>
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
+Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
-Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
+Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
-Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
+Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
-Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
-Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
-Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
+Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
+Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
+Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
Linas Vepstas <linas@austin.ibm.com>
-Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
-Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
+Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
-Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
+Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
-Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Mathieu Othacehe <m.othacehe@gmail.com>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
Matthieu CASTET <castet.matthieu@free.fr>
-Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
+Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
+Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
+Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
+Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@redhat.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
-Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
-Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
-Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
-Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
-Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Patrick Mochel <mochel@digitalimplant.org>
Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
+Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.ibm.com>
Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.vnet.ibm.com>
-Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com>
Peter A Jonsson <pj@ludd.ltu.se>
-Peter Oruba <peter@oruba.de>
Peter Oruba <peter.oruba@amd.com>
+Peter Oruba <peter@oruba.de>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
Rémi Denis-Courmont <rdenis@simphalempin.com>
-Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
+Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
-Sarangdhar Joshi <spjoshi@codeaurora.org>
+Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
Sam Ravnborg <sam@mars.ravnborg.org>
-Santosh Shilimkar <ssantosh@kernel.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
+Santosh Shilimkar <ssantosh@kernel.org>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
-Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
-Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
+Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
+Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Subhash Jadavani <subhashj@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Sumit Semwal <sumit.semwal@ti.com>
+Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
Thomas Pedersen <twp@codeaurora.org>
Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
Tony Luck <tony.luck@intel.com>
-TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
+TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
-Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
-Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
-Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
Will Deacon <will@kernel.org> <will.deacon@arm.com>
-Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
+Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
-Gustavo Padovan <gustavo@las.ic.unicamp.br>
-Gustavo Padovan <padovan@profusion.mobi>
-Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
-Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
-Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
-Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
This sysfs interface exposes the number of cores per chip
present in the system.
-What: /sys/devices/hv_24x7/interface/cpumask
+What: /sys/devices/hv_24x7/cpumask
Date: July 2020
Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description: read only
is invoked by both RCU-sched readers and updaters.
srcu_dereference_check(p, c):
Use explicit check expression "c" along with
- srcu_read_lock_held()(). This is useful in code that
+ srcu_read_lock_held(). This is useful in code that
is invoked by both SRCU readers and updaters.
rcu_dereference_raw(p):
Don't check. (Use sparingly, if at all.)
98 block User-mode virtual block device
0 = /dev/ubda First user-mode block device
- 16 = /dev/udbb Second user-mode block device
+ 16 = /dev/ubdb Second user-mode block device
...
Partitions are handled in the same way as for IDE
multiple of this tuning parameter if the stripe size is not set in the
ext4 superblock
+ mb_max_inode_prealloc
+ The maximum length of per-inode ext4_prealloc_space list.
+
mb_max_to_scan
The maximum number of extents the multiblock allocator will search to
find the best extent.
Ioctls
======
-There is some Ext4 specific functionality which can be accessed by applications
-through the system call interfaces. The list of all Ext4 specific ioctls are
-shown in the table below.
+Ext4 implements various ioctls which can be used by applications to access
+ext4-specific functionality. An incomplete list of these ioctls is shown in the
+table below. This list includes truly ext4-specific ioctls (``EXT4_IOC_*``) as
+well as ioctls that may have been ext4-specific originally but are now supported
+by some other filesystem(s) too (``FS_IOC_*``).
-Table of Ext4 specific ioctls
+Table of Ext4 ioctls
- EXT4_IOC_GETFLAGS
+ FS_IOC_GETFLAGS
Get additional attributes associated with inode. The ioctl argument is
- an integer bitfield, with bit values described in ext4.h. This ioctl is
- an alias for FS_IOC_GETFLAGS.
+ an integer bitfield, with bit values described in ext4.h.
- EXT4_IOC_SETFLAGS
+ FS_IOC_SETFLAGS
Set additional attributes associated with inode. The ioctl argument is
- an integer bitfield, with bit values described in ext4.h. This ioctl is
- an alias for FS_IOC_SETFLAGS.
+ an integer bitfield, with bit values described in ext4.h.
EXT4_IOC_GETVERSION, EXT4_IOC_GETVERSION_OLD
Get the inode i_generation number stored for each inode. The
efi= [EFI]
Format: { "debug", "disable_early_pci_dma",
"nochunk", "noruntime", "nosoftreserve",
- "novamap", "no_disable_early_pci_dma",
- "old_map" }
+ "novamap", "no_disable_early_pci_dma" }
debug: enable misc debug output.
disable_early_pci_dma: disable the busmaster bit on all
PCI bridges while in the EFI boot stub.
novamap: do not call SetVirtualAddressMap().
no_disable_early_pci_dma: Leave the busmaster bit set
on all PCI bridges while in the EFI boot stub
- old_map [X86-64]: switch to the old ioremap-based EFI
- runtime services mapping. [Needs CONFIG_X86_UV=y]
efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of
DYTC Lapmode sensor
-------------------
+-------------------
sysfs: dytc_lapmode
internal P-state selection logic is expected to focus entirely on performance.
This will override the EPP/EPB setting coming from the ``sysfs`` interface
-(see `Energy vs Performance Hints`_ below).
+(see `Energy vs Performance Hints`_ below). Moreover, any attempts to change
+the EPP/EPB to a value different from 0 ("performance") via ``sysfs`` in this
+configuration will be rejected.
Also, in this configuration the range of P-states available to the processor's
internal P-state selection logic is always restricted to the upper boundary
Energy-Performance Bias (EPB) knob. It is also possible to write a positive
integer value between 0 to 255, if the EPP feature is present. If the EPP
feature is not present, writing integer value to this attribute is not
-supported. In this case, user can use
- "/sys/devices/system/cpu/cpu*/power/energy_perf_bias" interface.
+supported. In this case, user can use the
+"/sys/devices/system/cpu/cpu*/power/energy_perf_bias" interface.
[Note that tasks may by migrated from one CPU to another by the scheduler's
load-balancing algorithm and if different energy vs performance hints are
bpf_devel_QA
+Helper functions
+================
+
+* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs.
+
+
Program types
=============
.. _networking-filter: ../networking/filter.rst
.. _man-pages: https://www.kernel.org/doc/man-pages/
.. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html
+.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
.. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/
title: Clock bindings for Freescale i.MX23
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
description: |
The clock consumer should specify the desired clock by having the clock
title: Clock bindings for Freescale i.MX28
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
description: |
The clock consumer should specify the desired clock by having the clock
title: Freescale MXS GPIO controller
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
- Anson Huang <Anson.Huang@nxp.com>
description: |
title: Freescale MXS Inter IC (I2C) Controller
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
properties:
compatible:
+++ /dev/null
-Texas Instruments K3 Interrupt Aggregator
-=========================================
-
-The Interrupt Aggregator (INTA) provides a centralized machine
-which handles the termination of system events to that they can
-be coherently processed by the host(s) in the system. A maximum
-of 64 events can be mapped to a single interrupt.
-
-
- Interrupt Aggregator
- +-----------------------------------------+
- | Intmap VINT |
- | +--------------+ +------------+ |
- m ------>| | vint | bit | | 0 |.....|63| vint0 |
- . | +--------------+ +------------+ | +------+
- . | . . | | HOST |
-Globalevents ------>| . . |------>| IRQ |
- . | . . | | CTRL |
- . | . . | +------+
- n ------>| +--------------+ +------------+ |
- | | vint | bit | | 0 |.....|63| vintx |
- | +--------------+ +------------+ |
- | |
- +-----------------------------------------+
-
-Configuration of these Intmap registers that maps global events to vint is done
-by a system controller (like the Device Memory and Security Controller on K3
-AM654 SoC). Driver should request the system controller to get the range
-of global events and vints assigned to the requesting host. Management
-of these requested resources should be handled by driver and requests
-system controller to map specific global event to vint, bit pair.
-
-Communication between the host processor running an OS and the system
-controller happens through a protocol called TI System Control Interface
-(TISCI protocol). For more details refer:
-Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
-
-TISCI Interrupt Aggregator Node:
--------------------------------
-- compatible: Must be "ti,sci-inta".
-- reg: Should contain registers location and length.
-- interrupt-controller: Identifies the node as an interrupt controller
-- msi-controller: Identifies the node as an MSI controller.
-- interrupt-parent: phandle of irq parent.
-- ti,sci: Phandle to TI-SCI compatible System controller node.
-- ti,sci-dev-id: TISCI device ID of the Interrupt Aggregator.
-- ti,sci-rm-range-vint: Array of TISCI subtype ids representing vints(inta
- outputs) range within this INTA, assigned to the
- requesting host context.
-- ti,sci-rm-range-global-event: Array of TISCI subtype ids representing the
- global events range reaching this IA and are assigned
- to the requesting host context.
-
-Example:
---------
-main_udmass_inta: interrupt-controller@33d00000 {
- compatible = "ti,sci-inta";
- reg = <0x0 0x33d00000 0x0 0x100000>;
- interrupt-controller;
- msi-controller;
- interrupt-parent = <&main_navss_intr>;
- ti,sci = <&dmsc>;
- ti,sci-dev-id = <179>;
- ti,sci-rm-range-vint = <0x0>;
- ti,sci-rm-range-global-event = <0x1>;
-};
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/ti,sci-inta.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 Interrupt Aggregator
+
+maintainers:
+ - Lokesh Vutla <lokeshvutla@ti.com>
+
+allOf:
+ - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
+
+description: |
+ The Interrupt Aggregator (INTA) provides a centralized machine
+ which handles the termination of system events to that they can
+ be coherently processed by the host(s) in the system. A maximum
+ of 64 events can be mapped to a single interrupt.
+
+ Interrupt Aggregator
+ +-----------------------------------------+
+ | Intmap VINT |
+ | +--------------+ +------------+ |
+ m ------>| | vint | bit | | 0 |.....|63| vint0 |
+ . | +--------------+ +------------+ | +------+
+ . | . . | | HOST |
+ Globalevents ------>| . . |----->| IRQ |
+ . | . . | | CTRL |
+ . | . . | +------+
+ n ------>| +--------------+ +------------+ |
+ | | vint | bit | | 0 |.....|63| vintx |
+ | +--------------+ +------------+ |
+ | |
+ +-----------------------------------------+
+
+ Configuration of these Intmap registers that maps global events to vint is
+ done by a system controller (like the Device Memory and Security Controller
+ on AM654 SoC). Driver should request the system controller to get the range
+ of global events and vints assigned to the requesting host. Management
+ of these requested resources should be handled by driver and requests
+ system controller to map specific global event to vint, bit pair.
+
+ Communication between the host processor running an OS and the system
+ controller happens through a protocol called TI System Control Interface
+ (TISCI protocol).
+
+properties:
+ compatible:
+ const: ti,sci-inta
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ msi-controller: true
+
+ ti,interrupt-ranges:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description: |
+ Interrupt ranges that converts the INTA output hw irq numbers
+ to parents's input interrupt numbers.
+ items:
+ items:
+ - description: |
+ "output_irq" specifies the base for inta output irq
+ - description: |
+ "parent's input irq" specifies the base for parent irq
+ - description: |
+ "limit" specifies the limit for translation
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - msi-controller
+ - ti,sci
+ - ti,sci-dev-id
+ - ti,interrupt-ranges
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ main_udmass_inta: msi-controller@33d00000 {
+ compatible = "ti,sci-inta";
+ reg = <0x0 0x33d00000 0x0 0x100000>;
+ interrupt-controller;
+ msi-controller;
+ interrupt-parent = <&main_navss_intr>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <179>;
+ ti,interrupt-ranges = <0 0 256>;
+ };
+ };
+++ /dev/null
-Texas Instruments K3 Interrupt Router
-=====================================
-
-The Interrupt Router (INTR) module provides a mechanism to mux M
-interrupt inputs to N interrupt outputs, where all M inputs are selectable
-to be driven per N output. An Interrupt Router can either handle edge triggered
-or level triggered interrupts and that is fixed in hardware.
-
- Interrupt Router
- +----------------------+
- | Inputs Outputs |
- +-------+ | +------+ +-----+ |
- | GPIO |----------->| | irq0 | | 0 | | Host IRQ
- +-------+ | +------+ +-----+ | controller
- | . . | +-------+
- +-------+ | . . |----->| IRQ |
- | INTA |----------->| . . | +-------+
- +-------+ | . +-----+ |
- | +------+ | N | |
- | | irqM | +-----+ |
- | +------+ |
- | |
- +----------------------+
-
-There is one register per output (MUXCNTL_N) that controls the selection.
-Configuration of these MUXCNTL_N registers is done by a system controller
-(like the Device Memory and Security Controller on K3 AM654 SoC). System
-controller will keep track of the used and unused registers within the Router.
-Driver should request the system controller to get the range of GIC IRQs
-assigned to the requesting hosts. It is the drivers responsibility to keep
-track of Host IRQs.
-
-Communication between the host processor running an OS and the system
-controller happens through a protocol called TI System Control Interface
-(TISCI protocol). For more details refer:
-Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
-
-TISCI Interrupt Router Node:
-----------------------------
-Required Properties:
-- compatible: Must be "ti,sci-intr".
-- ti,intr-trigger-type: Should be one of the following:
- 1: If intr supports edge triggered interrupts.
- 4: If intr supports level triggered interrupts.
-- interrupt-controller: Identifies the node as an interrupt controller
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value should be 2.
- First cell should contain the TISCI device ID of source
- Second cell should contain the interrupt source offset
- within the device.
-- ti,sci: Phandle to TI-SCI compatible System controller node.
-- ti,sci-dst-id: TISCI device ID of the destination IRQ controller.
-- ti,sci-rm-range-girq: Array of TISCI subtype ids representing the host irqs
- assigned to this interrupt router. Each subtype id
- corresponds to a range of host irqs.
-
-For more details on TISCI IRQ resource management refer:
-https://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
-
-Example:
---------
-The following example demonstrates both interrupt router node and the consumer
-node(main gpio) on the AM654 SoC:
-
-main_intr: interrupt-controller0 {
- compatible = "ti,sci-intr";
- ti,intr-trigger-type = <1>;
- interrupt-controller;
- interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
- ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x1>;
-};
-
-main_gpio0: gpio@600000 {
- ...
- interrupt-parent = <&main_intr>;
- interrupts = <57 256>, <57 257>, <57 258>,
- <57 259>, <57 260>, <57 261>;
- ...
-};
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/ti,sci-intr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 Interrupt Router
+
+maintainers:
+ - Lokesh Vutla <lokeshvutla@ti.com>
+
+allOf:
+ - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
+
+description: |
+ The Interrupt Router (INTR) module provides a mechanism to mux M
+ interrupt inputs to N interrupt outputs, where all M inputs are selectable
+ to be driven per N output. An Interrupt Router can either handle edge
+ triggered or level triggered interrupts and that is fixed in hardware.
+
+ Interrupt Router
+ +----------------------+
+ | Inputs Outputs |
+ +-------+ | +------+ +-----+ |
+ | GPIO |----------->| | irq0 | | 0 | | Host IRQ
+ +-------+ | +------+ +-----+ | controller
+ | . . | +-------+
+ +-------+ | . . |----->| IRQ |
+ | INTA |----------->| . . | +-------+
+ +-------+ | . +-----+ |
+ | +------+ | N | |
+ | | irqM | +-----+ |
+ | +------+ |
+ | |
+ +----------------------+
+
+ There is one register per output (MUXCNTL_N) that controls the selection.
+ Configuration of these MUXCNTL_N registers is done by a system controller
+ (like the Device Memory and Security Controller on K3 AM654 SoC). System
+ controller will keep track of the used and unused registers within the Router.
+ Driver should request the system controller to get the range of GIC IRQs
+ assigned to the requesting hosts. It is the drivers responsibility to keep
+ track of Host IRQs.
+
+ Communication between the host processor running an OS and the system
+ controller happens through a protocol called TI System Control Interface
+ (TISCI protocol).
+
+properties:
+ compatible:
+ const: ti,sci-intr
+
+ ti,intr-trigger-type:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 4]
+ description: |
+ Should be one of the following.
+ 1 = If intr supports edge triggered interrupts.
+ 4 = If intr supports level triggered interrupts.
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+ description: |
+ The 1st cell should contain interrupt router input hw number.
+
+ ti,interrupt-ranges:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ description: |
+ Interrupt ranges that converts the INTR output hw irq numbers
+ to parents's input interrupt numbers.
+ items:
+ items:
+ - description: |
+ "output_irq" specifies the base for intr output irq
+ - description: |
+ "parent's input irq" specifies the base for parent irq
+ - description: |
+ "limit" specifies the limit for translation
+
+required:
+ - compatible
+ - ti,intr-trigger-type
+ - interrupt-controller
+ - '#interrupt-cells'
+ - ti,sci
+ - ti,sci-dev-id
+ - ti,interrupt-ranges
+
+examples:
+ - |
+ main_gpio_intr: interrupt-controller0 {
+ compatible = "ti,sci-intr";
+ ti,intr-trigger-type = <1>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ #interrupt-cells = <1>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <131>;
+ ti,interrupt-ranges = <0 360 32>;
+ };
then:
properties:
clock-output-names:
- items:
- - const: clk_out_sd0
- - const: clk_in_sd0
+ oneOf:
+ - items:
+ - const: clk_out_sd0
+ - const: clk_in_sd0
+ - items:
+ - const: clk_out_sd1
+ - const: clk_in_sd1
properties:
compatible:
title: Freescale Enhanced Secure Digital Host Controller (eSDHC) for i.MX
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
allOf:
- $ref: "mmc-controller.yaml"
error caused by stop clock(fifo full)
Valid range = [0:0x7]. if not present, default value is 0.
applied to compatible "mediatek,mt2701-mmc".
+- resets: Phandle and reset specifier pair to softreset line of MSDC IP.
+- reset-names: Should be "hrst".
Examples:
mmc0: mmc@11230000 {
title: Freescale MXS MMC controller
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
description: |
The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
- "nvidia,tegra210-sdhci": for Tegra210
- "nvidia,tegra186-sdhci": for Tegra186
- "nvidia,tegra194-sdhci": for Tegra194
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
+- clocks: For Tegra210, Tegra186 and Tegra194 must contain two entries.
+ One for the module clock and one for the timeout clock.
+ For all other Tegra devices, must contain a single entry for
+ the module clock. See ../clocks/clock-bindings.txt for details.
+- clock-names: For Tegra210, Tegra186 and Tegra194 must contain the
+ strings 'sdhci' and 'tmclk' to represent the module and
+ the timeout clocks, respectively.
+ For all other Tegra devices must contain the string 'sdhci'
+ to represent the module clock.
- resets : Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names : Must include the following entries:
Example:
sdhci@700b0000 {
- compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
+ compatible = "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>;
status = "disabled";
};
+
+sdhci@700b0000 {
+ compatible = "nvidia,tegra210-sdhci";
+ reg = <0x0 0x700b0000 0x0 0x200>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
+ resets = <&tegra_car 14>;
+ reset-names = "sdhci";
+ pinctrl-names = "sdmmc-3v3", "sdmmc-1v8";
+ pinctrl-0 = <&sdmmc1_3v3>;
+ pinctrl-1 = <&sdmmc1_1v8>;
+ nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>;
+ nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>;
+ nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>;
+ status = "disabled";
+};
Distributed Switch Architecture Device Tree Bindings
----------------------------------------------------
-See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documenation.
+See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documentation.
phy-connection-type:
description:
- Operation mode of the PHY interface
+ Specifies interface type between the Ethernet device and a physical
+ layer (PHY) device.
enum:
# There is not a standard bus between the MAC and the PHY,
# something proprietary is being used to embed the PHY in the
clocks:
maxItems: 1
- pinctrl-0: true
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
- pinctrl-names: true
+ phy-mode: true
+
+ phy-handle: true
renesas,no-ether-link:
type: boolean
specify when the Ether LINK signal is active-low instead of normal
active-high
+patternProperties:
+ "^ethernet-phy@[0-9a-f]$":
+ type: object
+ $ref: ethernet-phy.yaml#
+
required:
- compatible
- reg
- '#address-cells'
- '#size-cells'
- clocks
- - pinctrl-0
+
+additionalProperties: false
examples:
# Lager board
clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
phy-mode = "rmii";
phy-handle = <&phy1>;
- pinctrl-0 = <ðer_pins>;
- pinctrl-names = "default";
renesas,ether-link-active-low;
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-0 = <&phy1_pins>;
- pinctrl-names = "default";
};
};
maintainers:
- Dilip Kota <eswara.kota@linux.intel.com>
+select:
+ properties:
+ compatible:
+ contains:
+ const: intel,lgm-pcie
+ required:
+ - compatible
+
properties:
compatible:
items:
title: Freescale MXS PWM controller
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
- Anson Huang <anson.huang@nxp.com>
properties:
title: Freescale (Enhanced) Configurable Serial Peripheral Interface (CSPI/eCSPI) for i.MX
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
allOf:
- $ref: "/schemas/spi/spi-controller.yaml#"
spi common code does not support use of CS signals discontinuously.
i.MX8DXL-EVK board only uses CS1 without using CS0. Therefore, add
this property to re-config the chipselect value in the LPSPI driver.
+ type: boolean
required:
- compatible
title: NXP i.MX Thermal Binding
maintainers:
- - Shawn Guo <shawn.guo@linaro.org>
+ - Shawn Guo <shawnguo@kernel.org>
- Anson Huang <Anson.Huang@nxp.com>
properties:
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/sifive,clint.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive Core Local Interruptor
+
+maintainers:
+ - Palmer Dabbelt <palmer@dabbelt.com>
+ - Anup Patel <anup.patel@wdc.com>
+
+description:
+ SiFive (and other RISC-V) SOCs include an implementation of the SiFive
+ Core Local Interruptor (CLINT) for M-mode timer and M-mode inter-processor
+ interrupts. It directly connects to the timer and inter-processor interrupt
+ lines of various HARTs (or CPUs) so RISC-V per-HART (or per-CPU) local
+ interrupt controller is the parent interrupt controller for CLINT device.
+ The clock frequency of CLINT is specified via "timebase-frequency" DT
+ property of "/cpus" DT node. The "timebase-frequency" DT property is
+ described in Documentation/devicetree/bindings/riscv/cpus.yaml
+
+properties:
+ compatible:
+ items:
+ - const: sifive,fu540-c000-clint
+ - const: sifive,clint0
+
+ description:
+ Should be "sifive,<chip>-clint" and "sifive,clint<version>".
+ Supported compatible strings are -
+ "sifive,fu540-c000-clint" for the SiFive CLINT v0 as integrated
+ onto the SiFive FU540 chip, and "sifive,clint0" for the SiFive
+ CLINT v0 IP block with no chip integration tweaks.
+ Please refer to sifive-blocks-ip-versioning.txt for details
+
+ reg:
+ maxItems: 1
+
+ interrupts-extended:
+ minItems: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts-extended
+
+examples:
+ - |
+ timer@2000000 {
+ compatible = "sifive,fu540-c000-clint", "sifive,clint0";
+ interrupts-extended = <&cpu1intc 3 &cpu1intc 7
+ &cpu2intc 3 &cpu2intc 7
+ &cpu3intc 3 &cpu3intc 7
+ &cpu4intc 3 &cpu4intc 7>;
+ reg = <0x2000000 0x10000>;
+ };
+...
"^sst,.*":
description: Silicon Storage Technology, Inc.
"^sstar,.*":
- description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd.
+ description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd.
(formerly part of MStar Semiconductor, Inc.)
"^st,.*":
description: STMicroelectronics
Devicetree bindings are written using json-schema vocabulary. Schema files are
written in a JSON compatible subset of YAML. YAML is used instead of JSON as it
-considered more human readable and has some advantages such as allowing
+is considered more human readable and has some advantages such as allowing
comments (Prefixed with '#').
Schema Contents
A json-schema unique identifier string. The string must be a valid
URI typically containing the binding's filename and path. For DT schema, it must
begin with "http://devicetree.org/schemas/". The URL is used in constructing
- references to other files specified in schema "$ref" properties. A $ref values
+ references to other files specified in schema "$ref" properties. A $ref value
with a leading '/' will have the hostname prepended. A $ref value a relative
path or filename only will be prepended with the hostname and path components
of the current schema file's '$id' value. A URL is used even for local files,
* struct :c:type:`fpga_bridge` — The FPGA Bridge structure
* struct :c:type:`fpga_bridge_ops` — Low level Bridge driver ops
-* :c:func:`devm_fpga_bridge_create()` — Allocate and init a bridge struct
-* :c:func:`fpga_bridge_register()` — Register a bridge
-* :c:func:`fpga_bridge_unregister()` — Unregister a bridge
+* devm_fpga_bridge_create() — Allocate and init a bridge struct
+* fpga_bridge_register() — Register a bridge
+* fpga_bridge_unregister() — Unregister a bridge
.. kernel-doc:: include/linux/fpga/fpga-bridge.h
:functions: fpga_bridge
* ``fpga_mgr_states`` — Values for :c:member:`fpga_manager->state`.
* struct :c:type:`fpga_manager` — the FPGA manager struct
* struct :c:type:`fpga_manager_ops` — Low level FPGA manager driver ops
-* :c:func:`devm_fpga_mgr_create` — Allocate and init a manager struct
-* :c:func:`fpga_mgr_register` — Register an FPGA manager
-* :c:func:`fpga_mgr_unregister` — Unregister an FPGA manager
+* devm_fpga_mgr_create() — Allocate and init a manager struct
+* fpga_mgr_register() — Register an FPGA manager
+* fpga_mgr_unregister() — Unregister an FPGA manager
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_mgr_states
The in-kernel API for FPGA programming is a combination of APIs from
FPGA manager, bridge, and regions. The actual function used to
-trigger FPGA programming is :c:func:`fpga_region_program_fpga()`.
+trigger FPGA programming is fpga_region_program_fpga().
-:c:func:`fpga_region_program_fpga()` uses functionality supplied by
+fpga_region_program_fpga() uses functionality supplied by
the FPGA manager and bridges. It will:
* lock the region's mutex
* release the locks
The struct fpga_image_info specifies what FPGA image to program. It is
-allocated/freed by :c:func:`fpga_image_info_alloc()` and freed with
-:c:func:`fpga_image_info_free()`
+allocated/freed by fpga_image_info_alloc() and freed with
+fpga_image_info_free()
How to program an FPGA using a region
-------------------------------------
API for programming an FPGA
---------------------------
-* :c:func:`fpga_region_program_fpga` — Program an FPGA
-* :c:type:`fpga_image_info` — Specifies what FPGA image to program
-* :c:func:`fpga_image_info_alloc()` — Allocate an FPGA image info struct
-* :c:func:`fpga_image_info_free()` — Free an FPGA image info struct
+* fpga_region_program_fpga() — Program an FPGA
+* fpga_image_info() — Specifies what FPGA image to program
+* fpga_image_info_alloc() — Allocate an FPGA image info struct
+* fpga_image_info_free() — Free an FPGA image info struct
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_program_fpga
----------------------------
* struct :c:type:`fpga_region` — The FPGA region struct
-* :c:func:`devm_fpga_region_create` — Allocate and init a region struct
-* :c:func:`fpga_region_register` — Register an FPGA region
-* :c:func:`fpga_region_unregister` — Unregister an FPGA region
+* devm_fpga_region_create() — Allocate and init a region struct
+* fpga_region_register() — Register an FPGA region
+* fpga_region_unregister() — Unregister an FPGA region
The FPGA region's probe function will need to get a reference to the FPGA
Manager it will be using to do the programming. This usually would happen
during the region's probe function.
-* :c:func:`fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count
-* :c:func:`of_fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count,
+* fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count
+* of_fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count,
given a device node.
-* :c:func:`fpga_mgr_put` — Put an FPGA manager
+* fpga_mgr_put() — Put an FPGA manager
The FPGA region will need to specify which bridges to control while programming
the FPGA. The region driver can build a list of bridges during probe time
(:c:member:`fpga_region->get_bridges`). The FPGA bridge framework supplies the
following APIs to handle building or tearing down that list.
-* :c:func:`fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
+* fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a
list
-* :c:func:`of_fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
+* of_fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a
list, given a device node
-* :c:func:`fpga_bridges_put` — Given a list of bridges, put them
+* fpga_bridges_put() — Given a list of bridges, put them
.. kernel-doc:: include/linux/fpga/fpga-region.h
:functions: fpga_region
----------------------
* struct :c:type:`iio_dev` - industrial I/O device
-* :c:func:`iio_device_alloc()` - allocate an :c:type:`iio_dev` from a driver
-* :c:func:`iio_device_free()` - free an :c:type:`iio_dev` from a driver
-* :c:func:`iio_device_register()` - register a device with the IIO subsystem
-* :c:func:`iio_device_unregister()` - unregister a device from the IIO
+* iio_device_alloc() - allocate an :c:type:`iio_dev` from a driver
+* iio_device_free() - free an :c:type:`iio_dev` from a driver
+* iio_device_register() - register a device with the IIO subsystem
+* iio_device_unregister() - unregister a device from the IIO
subsystem
An IIO device usually corresponds to a single hardware sensor and it
At probe:
-1. Call :c:func:`iio_device_alloc()`, which allocates memory for an IIO device.
+1. Call iio_device_alloc(), which allocates memory for an IIO device.
2. Initialize IIO device fields with driver specific information (e.g.
device name, device channels).
-3. Call :c:func:`iio_device_register()`, this registers the device with the
+3. Call iio_device_register(), this registers the device with the
IIO core. After this call the device is ready to accept requests from user
space applications.
At remove, we free the resources allocated in probe in reverse order:
-1. :c:func:`iio_device_unregister()`, unregister the device from the IIO core.
-2. :c:func:`iio_device_free()`, free the memory allocated for the IIO device.
+1. iio_device_unregister(), unregister the device from the IIO core.
+2. iio_device_free(), free the memory allocated for the IIO device.
IIO device sysfs interface
==========================
Linux's fault injection framework provides a systematic way to support
error injection via debugfs in the /sys/kernel/debug directory. When
enabled, the default NVME_SC_INVALID_OPCODE with no retry will be
-injected into the nvme_end_request. Users can change the default status
+injected into the nvme_try_complete_req. Users can change the default status
code and no retry flag via the debugfs. The list of Generic Command
Status can be found in include/linux/nvme.h
- R maps to r for user, group and others. On directories, R implies x.
- - If both W and D are allowed, w will be set.
+ - W maps to w.
- E maps to x.
- - H and P are always retained and ignored under Linux.
+ - D is ignored.
- - A is always reset when a file is written to.
+ - H, S and P are always retained and ignored under Linux.
+
+ - A is cleared when a file is written to.
User id and group id will be used unless set[gu]id are given as mount
options. Since most of the Amiga file systems are single user systems
The Linux rwxrwxrwx file mode is handled as follows:
- - r permission will set R for user, group and others.
+ - r permission will allow R for user, group and others.
+
+ - w permission will allow W for user, group and others.
- - w permission will set W and D for user, group and others.
+ - x permission of the user will allow E for plain files.
- - x permission of the user will set E for plain files.
+ - D will be allowed for user, group and others.
- All other flags (suid, sgid, ...) are ignored and will
not be retained.
Other References
----------------
-Also see http://www.nongnu.org/ext2-doc/ for quite a collection of
+Also see https://www.nongnu.org/ext2-doc/ for quite a collection of
information about ext2/3. Here's another old reference:
http://wiki.osdev.org/Ext2
number of bytes data per sensor and contents/meaning of those bytes.
Although both this document and the kernel driver have kept the sensor
-terminoligy for the addressing within a bank this is not 100% correct, in
+terminology for the addressing within a bank this is not 100% correct, in
bank 0x24 for example the addressing within the bank selects a PWM output not
a sensor.
turned up which do not hold 0x08 at DATA within 250 reads after writing the
bank address. With these versions this happens quite frequent, using larger
timeouts doesn't help, they just go offline for a second or 2, doing some
-internal callibration or whatever. Your code should be prepared to handle
+internal calibration or whatever. Your code should be prepared to handle
this and in case of no response in this specific case just goto sleep for a
while and then retry.
0-0x30 with the reading code used for the sensor banks (0x20-0x28) and this
resulted in a _permanent_ reprogramming of the voltages, luckily I had the
sensors part configured so that it would shutdown my system on any out of spec
-voltages which proprably safed my computer (after a reboot I managed to
+voltages which probably safed my computer (after a reboot I managed to
immediately enter the bios and reload the defaults). This probably means that
the read/write cycle for the non sensor part is different from the sensor part.
Note:
The uGuru is a microcontroller with onboard firmware which programs
it to behave as a hwmon IC. There are many different revisions of the
- firmware and thus effectivly many different revisions of the uGuru.
+ firmware and thus effectively many different revisions of the uGuru.
Below is an incomplete list with which revisions are used for which
Motherboards:
sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
this does not always work. For these uGuru's the autodetection can
be overridden with the bank1_types module param. For all 3 known
- revison 1 motherboards the correct use of this param is:
+ revision 1 motherboards the correct use of this param is:
bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
You may also need to specify the fan_sensors option for these boards
fan_sensors=5
Note:
The uGuru is a microcontroller with onboard firmware which programs
it to behave as a hwmon IC. There are many different revisions of the
- firmware and thus effectivly many different revisions of the uGuru.
+ firmware and thus effectively many different revisions of the uGuru.
Below is an incomplete list with which revisions are used for which
Motherboards:
- uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
AW9D-MAX)
- The abituguru3 driver is only for revison 3.0.x.x motherboards,
+ The abituguru3 driver is only for revision 3.0.x.x motherboards,
this driver will not work on older motherboards. For older
motherboards use the abituguru (without the 3 !) driver.
Clang
-----
-The compiler used can be swapped out via `CC=` command line argument to `make`.
-`CC=` should be set when selecting a config and during a build.
+The compiler used can be swapped out via ``CC=`` command line argument to ``make``.
+``CC=`` should be set when selecting a config and during a build. ::
make CC=clang defconfig
---------------
A single Clang compiler binary will typically contain all supported backends,
-which can help simplify cross compiling.
+which can help simplify cross compiling. ::
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
-`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead
-`CROSS_COMPILE` is used to set a command line flag: `--target <triple>`. For
-example:
+``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
+``CROSS_COMPILE`` is used to set a command line flag: ``--target <triple>``. For
+example: ::
clang --target aarch64-linux-gnu foo.c
LLVM Utilities
--------------
-LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1`
-to enable them.
+LLVM has substitutes for GNU binutils utilities. Kbuild supports ``LLVM=1``
+to enable them. ::
make LLVM=1
-They can be enabled individually. The full list of the parameters:
+They can be enabled individually. The full list of the parameters: ::
- make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\
- OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\
- READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\
+ make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
+ OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \
+ READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \
HOSTLD=ld.lld
Currently, the integrated assembler is disabled by default. You can pass
-`LLVM_IAS=1` to enable it.
+``LLVM_IAS=1`` to enable it.
Getting Help
------------
--- 3.5 Library file goals - lib-y
--- 3.6 Descending down in directories
--- 3.7 Compilation flags
- --- 3.8 Command line dependency
+ --- 3.8 <deleted>
--- 3.9 Dependency tracking
--- 3.10 Special Rules
--- 3.11 $(CC) support functions
=== 7 Architecture Makefiles
--- 7.1 Set variables to tweak the build to the architecture
- --- 7.2 Add prerequisites to archheaders:
- --- 7.3 Add prerequisites to archprepare:
+ --- 7.2 Add prerequisites to archheaders
+ --- 7.3 Add prerequisites to archprepare
--- 7.4 List directories to visit when descending
--- 7.5 Architecture-specific boot images
--- 7.6 Building non-kbuild targets
be used and if both a 'Makefile' and a 'Kbuild' file exists, then the 'Kbuild'
file will be used.
-Section 3.1 "Goal definitions" is a quick intro, further chapters provide
+Section 3.1 "Goal definitions" is a quick intro; further chapters provide
more details, with real examples.
3.1 Goal definitions
KBUILD_LDFLAGS := -m elf_s390
Note: ldflags-y can be used to further customise
- the flags used. See chapter 3.7.
+ the flags used. See section 3.7.
LDFLAGS_vmlinux
Options for $(LD) when linking vmlinux
In this example, the file target maketools will be processed
before descending down in the subdirectories.
- See also chapter XXX-TODO that describe how kbuild supports
+ See also chapter XXX-TODO that describes how kbuild supports
generating offset header files.
always be built.
Assignments to $(targets) are without $(obj)/ prefix.
if_changed may be used in conjunction with custom commands as
- defined in 6.8 "Custom kbuild commands".
+ defined in 7.8 "Custom kbuild commands".
Note: It is a typical mistake to forget the FORCE prerequisite.
Another common pitfall is that whitespace is sometimes
that may be shared between individual architectures.
The recommended approach how to use a generic header file is
to list the file in the Kbuild file.
- See "7.2 generic-y" for further info on syntax etc.
+ See "8.2 generic-y" for further info on syntax etc.
7.11 Post-link pass
-------------------
- Describe how kbuild supports shipped files with _shipped.
- Generating offset header files.
-- Add more variables to section 7?
+- Add more variables to chapters 7 or 9?
On non-PREEMPT_RT kernels local_lock operations map to the preemption and
interrupt disabling and enabling primitives:
- =========================== ======================
- local_lock(&llock) preempt_disable()
- local_unlock(&llock) preempt_enable()
- local_lock_irq(&llock) local_irq_disable()
- local_unlock_irq(&llock) local_irq_enable()
- local_lock_save(&llock) local_irq_save()
- local_lock_restore(&llock) local_irq_save()
- =========================== ======================
+ =============================== ======================
+ local_lock(&llock) preempt_disable()
+ local_unlock(&llock) preempt_enable()
+ local_lock_irq(&llock) local_irq_disable()
+ local_unlock_irq(&llock) local_irq_enable()
+ local_lock_irqsave(&llock) local_irq_save()
+ local_unlock_irqrestore(&llock) local_irq_restore()
+ =============================== ======================
The named scope of local_lock has two advantages over the regular
primitives:
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock_1, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock_1, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock_1, flags);
}
func2()
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock_2, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock_2, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock_2, flags);
}
func3()
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock, flags);
}
func2()
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock, flags);
}
func3()
../doc-guide/maintainer-profile
../nvdimm/maintainer-entry-profile
+ ../riscv/patch-acceptance
The latest version of this document can be found in the latest kernel
source (named Documentation/networking/bonding.rst).
-Discussions regarding the usage of the bonding driver take place on the
-bonding-devel mailing list, hosted at sourceforge.net. If you have questions or
-problems, post them to the list. The list address is:
-
-bonding-devel@lists.sourceforge.net
-
-The administrative interface (to subscribe or unsubscribe) can
-be found at:
-
-https://lists.sourceforge.net/lists/listinfo/bonding-devel
-
Discussions regarding the development of the bonding driver take place
on the main Linux network mailing list, hosted at vger.kernel.org. The list
address is:
be found at:
http://vger.kernel.org/vger-lists.html#netdev
-
-Donald Becker's Ethernet Drivers and diag programs may be found at :
-
- - http://web.archive.org/web/%2E/http://www.scyld.com/network/
-
-You will also find a lot of information regarding Ethernet, NWay, MII,
-etc. at www.scyld.com.
# bring up the slave interfaces
ip link set lan1 up
- ip link set lan1 up
+ ip link set lan2 up
ip link set lan3 up
# create bridge
Register preservation rules match the ELF ABI calling sequence with the
following differences:
-=========== ============= ========================================
--- For the sc instruction, differences with the ELF ABI ---
+=========== ============= ========================================
r0 Volatile (System call number.)
r3 Volatile (Parameter 1, and return value.)
r4-r8 Volatile (Parameters 2-6.)
cr0 Volatile (cr0.SO is the return error condition.)
cr1, cr5-7 Nonvolatile
lr Nonvolatile
+=========== ============= ========================================
--- For the scv 0 instruction, differences with the ELF ABI ---
+=========== ============= ========================================
r0 Volatile (System call number.)
r3 Volatile (Parameter 1, and return value.)
r4-r8 Volatile (Parameters 2-6.)
(Users of strscpy() still needing NUL-padding should instead
use strscpy_pad().)
-If a caller is using non-NUL-terminated strings, strncpy()() can
+If a caller is using non-NUL-terminated strings, strncpy() can
still be used, but destinations should be marked with the `__nonstring
<https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
attribute to avoid future compiler warnings.
US Patents (https://www.uspto.gov/)
-----------------------------------
+-----------------------------------
US 5925841
Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
US Patents (https://www.uspto.gov/)
-----------------------------------
+-----------------------------------
US 5925841
Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
before returning from the ``STATUS`` and ``STATUS_EXT`` ioctl. in most cases
this driver_timestamp will be identical to the regular system tstamp.
-Examples of typestamping with HDaudio:
+Examples of timestamping with HDAudio:
1. DMA timestamp, no compensation for DMA+analog delay
::
strscpy(). (chi usa strscpy() e necessita di estendere la
terminazione con NUL deve aggiungere una chiamata a memset())
-Se il chiamate no usa stringhe terminate con NUL, allore strncpy()()
+Se il chiamate no usa stringhe terminate con NUL, allore strncpy()
può continuare ad essere usata, ma i buffer di destinazione devono essere
marchiati con l'attributo `__nonstring <https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
per evitare avvisi durante la compilazione.
ARM/CAVIUM THUNDER NETWORK DRIVER
M: Sunil Goutham <sgoutham@marvell.com>
-M: Robert Richter <rrichter@marvell.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/net/ethernet/cavium/thunder/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: block/
F: drivers/block/
+F: include/linux/blk*
F: kernel/trace/blktrace.c
F: lib/sbitmap.c
L: netdev@vger.kernel.org
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Supported
+F: Documentation/devicetree/bindings/net/dsa/b53.txt
F: drivers/net/dsa/b53/*
F: include/linux/platform_data/b53.h
S: Maintained
F: drivers/phy/broadcom/phy-brcm-usb*
+BROADCOM ETHERNET PHY DRIVERS
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: bcm-kernel-feedback-list@broadcom.com
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
+F: drivers/net/phy/bcm*.[ch]
+F: drivers/net/phy/broadcom.c
+F: include/linux/brcmphy.h
+
BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
+F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
F: drivers/net/ethernet/broadcom/genet/
+F: drivers/net/mdio/mdio-bcm-unimac.c
+F: include/linux/platform_data/bcmgenet.h
+F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com>
F: drivers/net/wireless/ath/carl9170/
CAVIUM I2C DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Supported
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
W: http://www.marvell.com
F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
F: drivers/net/ethernet/cavium/liquidio/
CAVIUM MMC DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Supported
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
W: http://www.marvell.com
F: drivers/mmc/host/cavium*
F: drivers/crypto/cavium/cpt/
CAVIUM THUNDERX2 ARM64 SOC
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
F: .clang-format
CLANG/LLVM BUILD SUPPORT
+M: Nathan Chancellor <natechancellor@gmail.com>
+M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
W: https://clangbuiltlinux.github.io/
F: include/uapi/linux/dm-*.h
DEVLINK
-M: Jiri Pirko <jiri@mellanox.com>
+M: Jiri Pirko <jiri@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/devlink
M: Jonathan Corbet <corbet@lwn.net>
L: linux-doc@vger.kernel.org
S: Maintained
+P: Documentation/doc-guide/maintainer-profile.rst
T: git git://git.lwn.net/linux.git docs-next
F: Documentation/
F: scripts/documentation-file-ref-check
F: lib/dynamic_debug.c
DYNAMIC INTERRUPT MODERATION
-M: Tal Gilboa <talgi@mellanox.com>
+M: Tal Gilboa <talgi@nvidia.com>
S: Maintained
F: Documentation/networking/net_dim.rst
F: include/linux/dim.h
F: drivers/edac/aspeed_edac.c
EDAC-BLUEFIELD
-M: Shravan Kumar Ramani <sramani@mellanox.com>
+M: Shravan Kumar Ramani <sramani@nvidia.com>
S: Supported
F: drivers/edac/bluefield_edac.c
EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
-M: Robert Richter <rrichter@marvell.com>
L: linux-edac@vger.kernel.org
L: linux-mips@vger.kernel.org
S: Supported
F: drivers/edac/octeon_edac*
EDAC-CAVIUM THUNDERX
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
-S: Supported
+S: Odd Fixes
F: drivers/edac/thunderx_edac*
EDAC-CORE
M: Mauro Carvalho Chehab <mchehab@kernel.org>
M: Tony Luck <tony.luck@intel.com>
R: James Morse <james.morse@arm.com>
-R: Robert Richter <rrichter@marvell.com>
+R: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
F: drivers/net/ethernet/agere/
ETHERNET BRIDGE
-M: Roopa Prabhu <roopa@cumulusnetworks.com>
-M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+M: Roopa Prabhu <roopa@nvidia.com>
+M: Nikolay Aleksandrov <nikolay@nvidia.com>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch>
-M: Florian Fainelli <f.fainelli@gmail.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
R: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org
EZchip NPS platform support
M: Vineet Gupta <vgupta@synopsys.com>
-M: Ofer Levi <oferle@mellanox.com>
+M: Ofer Levi <oferle@nvidia.com>
S: Supported
F: arch/arc/boot/dts/eznps.dts
F: arch/arc/plat-eznps
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
-S: Maintained
+S: Odd Fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
F: Documentation/ia64/
F: arch/ia64/
INFINIBAND SUBSYSTEM
M: Doug Ledford <dledford@redhat.com>
-M: Jason Gunthorpe <jgg@mellanox.com>
+M: Jason Gunthorpe <jgg@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: https://github.com/linux-rdma/rdma-core
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Sagi Grimberg <sagi@grimberg.me>
-M: Max Gurtovoy <maxg@mellanox.com>
+M: Max Gurtovoy <maxg@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
F: drivers/input/touchscreen/melfas_mip4.c
MELLANOX ETHERNET DRIVER (mlx4_en)
-M: Tariq Toukan <tariqt@mellanox.com>
+M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
F: drivers/net/ethernet/mellanox/mlx4/en_*
MELLANOX ETHERNET DRIVER (mlx5e)
-M: Saeed Mahameed <saeedm@mellanox.com>
+M: Saeed Mahameed <saeedm@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
MELLANOX ETHERNET INNOVA DRIVERS
-R: Boris Pismenny <borisp@mellanox.com>
+R: Boris Pismenny <borisp@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
F: include/linux/mlx5/mlx5_ifc_fpga.h
MELLANOX ETHERNET SWITCH DRIVERS
-M: Jiri Pirko <jiri@mellanox.com>
-M: Ido Schimmel <idosch@mellanox.com>
+M: Jiri Pirko <jiri@nvidia.com>
+M: Ido Schimmel <idosch@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
F: tools/testing/selftests/drivers/net/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
-M: mlxsw@mellanox.com
+M: mlxsw@nvidia.com
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
MELLANOX HARDWARE PLATFORM SUPPORT
M: Andy Shevchenko <andy@infradead.org>
M: Darren Hart <dvhart@infradead.org>
-M: Vadim Pasternak <vadimp@mellanox.com>
+M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
F: include/linux/platform_data/mlxreg.h
MELLANOX MLX4 core VPI driver
-M: Tariq Toukan <tariqt@mellanox.com>
+M: Tariq Toukan <tariqt@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
F: include/linux/mlx4/
MELLANOX MLX4 IB driver
-M: Yishai Hadas <yishaih@mellanox.com>
+M: Yishai Hadas <yishaih@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.mellanox.com
F: include/uapi/rdma/mlx4-abi.h
MELLANOX MLX5 core VPI driver
-M: Saeed Mahameed <saeedm@mellanox.com>
-M: Leon Romanovsky <leonro@mellanox.com>
+M: Saeed Mahameed <saeedm@nvidia.com>
+M: Leon Romanovsky <leonro@nvidia.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
S: Supported
F: include/linux/mlx5/
MELLANOX MLX5 IB driver
-M: Leon Romanovsky <leonro@mellanox.com>
+M: Leon Romanovsky <leonro@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.mellanox.com
F: include/uapi/rdma/mlx5-abi.h
MELLANOX MLXCPLD I2C AND MUX DRIVER
-M: Vadim Pasternak <vadimp@mellanox.com>
-M: Michael Shych <michaelsh@mellanox.com>
+M: Vadim Pasternak <vadimp@nvidia.com>
+M: Michael Shych <michaelsh@nvidia.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: Documentation/i2c/busses/i2c-mlxcpld.rst
F: drivers/i2c/muxes/i2c-mux-mlxcpld.c
MELLANOX MLXCPLD LED DRIVER
-M: Vadim Pasternak <vadimp@mellanox.com>
+M: Vadim Pasternak <vadimp@nvidia.com>
L: linux-leds@vger.kernel.org
S: Supported
F: Documentation/leds/leds-mlxcpld.rst
F: drivers/leds/leds-mlxreg.c
MELLANOX PLATFORM DRIVER
-M: Vadim Pasternak <vadimp@mellanox.com>
+M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/x86/mlx-platform.c
F: net/ipv6/tcp*.c
NETWORKING [TLS]
-M: Boris Pismenny <borisp@mellanox.com>
-M: Aviad Yehezkel <aviadye@mellanox.com>
+M: Boris Pismenny <borisp@nvidia.com>
+M: Aviad Yehezkel <aviadye@nvidia.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Jakub Kicinski <kuba@kernel.org>
F: drivers/nfc/nxp-nci
OBJAGG
-M: Jiri Pirko <jiri@mellanox.com>
+M: Jiri Pirko <jiri@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/objagg.h
F: include/linux/hp_sdc.h
PARMAN
-M: Jiri Pirko <jiri@mellanox.com>
+M: Jiri Pirko <jiri@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/parman.h
F: drivers/pci/controller/dwc/*artpec*
PCIE DRIVER FOR CAVIUM THUNDERX
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
+S: Odd Fixes
F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON
F: tools/lib/perf/
F: tools/perf/
-PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS
+PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.garry@huawei.com>
R: Will Deacon <will@kernel.org>
+R: Mathieu Poirier <mathieu.poirier@linaro.org>
+R: Leo Yan <leo.yan@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
+F: tools/build/feature/test-libopencsd.c
+F: tools/perf/arch/arm*/
F: tools/perf/pmu-events/arch/arm64/
+F: tools/perf/util/arm-spe*
+F: tools/perf/util/cs-etm*
PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>
L: iommu@lists.linux-foundation.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
-F: drivers/iommu/qcom_iommu.c
+F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
QUALCOMM IPCC MAILBOX DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
F: include/uapi/rdma/siw-abi.h
SOFT-ROCE DRIVER (rxe)
-M: Zhu Yanjun <yanjunz@mellanox.com>
+M: Zhu Yanjun <yanjunz@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rxe/
F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml
F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt
-F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
-F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
+F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml
F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt
F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
F: drivers/clk/keystone/sci-clk.c
F: drivers/net/thunderbolt.c
THUNDERX GPIO DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Maintained
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
F: drivers/gpio/gpio-thunderx.c
TI AM437X VPFE DRIVER
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
F: arch/x86/platform
+X86 PLATFORM UV HPE SUPERDOME FLEX
+M: Steve Wahl <steve.wahl@hpe.com>
+R: Dimitri Sivanich <dimitri.sivanich@hpe.com>
+R: Russ Anderson <russ.anderson@hpe.com>
+S: Supported
+F: arch/x86/include/asm/uv/
+F: arch/x86/kernel/apic/x2apic_uv_x.c
+F: arch/x86/platform/uv/
+
X86 VDSO
M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org
VERSION = 5
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
outputmakefile
-no-sync-config-targets := $(no-dot-config-targets) install %install \
- kernelrelease
+no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
config-build :=
endif
ifeq ($(KBUILD_EXTMOD),)
- ifneq ($(filter config %config,$(MAKECMDGOALS)),)
+ ifneq ($(filter %config,$(MAKECMDGOALS)),)
config-build := 1
ifneq ($(words $(MAKECMDGOALS)),1)
mixed-build := 1
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
- /* FALLTHRU */
+ fallthrough;
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
regs->r0 = EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->r0 = r0; /* reset v0 and a3 and replay syscall */
regs->r19 = r19;
case 0x26: /* sts */
fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
- /* FALLTHRU */
+ fallthrough;
case 0x2c: /* stl */
__asm__ __volatile__(
case 0x27: /* stt */
fake_reg = alpha_read_fp_reg(reg);
- /* FALLTHRU */
+ fallthrough;
case 0x2d: /* stq */
__asm__ __volatile__(
arcpct: pct {
compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
};
/* TIMER0 with interrupt for clockevent */
reg = <0x8000 0x2000>;
interrupts = <10>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@0 {
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>;
};
};
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
- * to deal with struct page. Thay way in future we can make it allocate
+ * deal with struct page. That way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
- * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
- /* intentional fall-through */
+ fallthrough;
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, has_interrupts;
+ int i, has_interrupts, irq;
int counter_size; /* in bits */
union cc_name {
.attr_groups = arc_pmu->attr_groups,
};
- if (has_interrupts) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq < 0) {
- pr_err("Cannot get IRQ number for the platform\n");
- return -ENODEV;
- }
+ if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
arc_pmu->irq = irq;
this_cpu_ptr(&arc_pmu_cpu));
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
-
- } else
+ } else {
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ }
/*
* perf parser doesn't really like '-' symbol in events name, so let's
regs->r0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
/*
#define ARC_PATH_MAX 256
-/*
- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
- * -Prints 3 regs per line and a CR.
- * -To continue, callee regs right after scratch, special handling of CR
- */
-static noinline void print_reg_file(long *reg_rev, int start_num)
+static noinline void print_regs_scratch(struct pt_regs *regs)
{
- unsigned int i;
- char buf[512];
- int n = 0, len = sizeof(buf);
-
- for (i = start_num; i < start_num + 13; i++) {
- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
- i, (unsigned long)*reg_rev);
-
- if (((i + 1) % 3) == 0)
- n += scnprintf(buf + n, len - n, "\n");
-
- /* because pt_regs has regs reversed: r12..r0, r25..r13 */
- if (is_isa_arcv2() && start_num == 0)
- reg_rev++;
- else
- reg_rev--;
- }
-
- if (start_num != 0)
- n += scnprintf(buf + n, len - n, "\n\n");
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
- /* To continue printing callee regs on same line as scratch regs */
- if (start_num == 0)
- pr_info("%s", buf);
- else
- pr_cont("%s\n", buf);
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
}
-static void show_callee_regs(struct callee_regs *cregs)
+static void print_regs_callee(struct callee_regs *regs)
{
- print_reg_file(&(cregs->r13), 13);
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct callee_regs *cregs;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
- pr_cont(" [%2s%2s%2s%2s]",
+ pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
- pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
- regs->bta, regs->sp, regs->fp, (void *)regs->blink);
- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
- regs->lp_start, regs->lp_end, regs->lp_count);
-
- /* print regs->r0 thru regs->r12
- * Sequential printing was generating horrible code
- */
- print_reg_file(&(regs->r0), 0);
- /* If Callee regs were saved, display them too */
- cregs = (struct callee_regs *)current->thread.callee_reg;
+ print_regs_scratch(regs);
if (cregs)
- show_callee_regs(cregs);
+ print_regs_callee(cregs);
preempt_disable();
}
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
- /* Fall through */
+ fallthrough;
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
- /* fall through */
+ fallthrough;
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
-static u64 high_mem_start;
-static u64 high_mem_sz;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
#endif
#ifdef CONFIG_DISCONTIGMEM
high_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 1);
+ memblock_reserve(base, size);
#endif
}
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
kmap_init();
free_area_init(max_zone_pfn);
}
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
+static void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- reset_all_zones_managed_pages();
+ memblock_free(high_mem_start, high_mem_sz);
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
+}
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
memblock_free_all();
+ highmem_init();
mem_init_print_info(NULL);
}
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
- /* Else, fall through */
+ fallthrough;
default:
return -EINVAL;
}
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Else, fall through */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Else, fall through */
+ fallthrough;
default:
ret = -EINVAL;
goto out;
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
- /* Fall through */
+ fallthrough;
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
ARM_DBG_READ(c1, c1, 4, oslsr);
if (oslsr & ARM_OSLSR_OSLM0)
return true;
- /* Else, fall through */
+ fallthrough;
default:
return false;
}
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart -= 2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
- /* Fall through */
+ fallthrough;
case THREAD_NOTIFY_EXIT:
crunch_task_release(thread);
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_PCR_PJ_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_CHIP_SLEEP:
apcr |= MPMU_PCR_PJ_SLPEN;
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_PCR_PJ_APBSD; /* set APBSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_PCR_PJ_AXISD; /* set AXISDD bit */
apcr |= MPMU_PCR_PJ_DDRCORSD; /* set DDRCORSD bit */
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_PWRDWN; /* PJ power down */
apcr |= MPMU_PCR_PJ_SPSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_PJ_IDLE_CFG_PJ_IDLE; /* set the IDLE bit */
idle_cfg &= ~APMU_PJ_IDLE_CFG_ISO_MODE_CNTRL_MASK;
case POWER_MODE_UDR:
/* only shutdown APB in UDR */
apcr |= MPMU_APCR_STBYEN | MPMU_APCR_APBSD;
- /* fall through */
+ fallthrough;
case POWER_MODE_SYS_SLEEP:
apcr |= MPMU_APCR_SLPEN; /* set the SLPEN bit */
apcr |= MPMU_APCR_VCTCXOSD; /* set VCTCXOSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_SLEEP:
apcr |= MPMU_APCR_DDRCORSD; /* set DDRCORSD */
- /* fall through */
+ fallthrough;
case POWER_MODE_APPS_IDLE:
apcr |= MPMU_APCR_AXISD; /* set AXISDD bit */
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_EXTIDLE:
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_IDLE;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWRDWN;
idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWR_SW(3)
| APMU_MOH_IDLE_CFG_MOH_L2_PWR_SW(3);
- /* fall through */
+ fallthrough;
case POWER_MODE_CORE_INTIDLE:
break;
}
cpu_rev = "3.1";
break;
case 7:
- /* FALLTHROUGH */
default:
/* Use the latest known revision as default */
omap_revision = OMAP3430_REV_ES3_1_2;
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
default:
omap_revision = AM35XX_REV_ES1_1;
cpu_rev = "1.1";
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = OMAP3630_REV_ES1_2;
cpu_rev = "1.2";
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8168_REV_ES2_1;
cpu_rev = "2.1";
cpu_rev = "2.0";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM335X_REV_ES2_1;
cpu_rev = "2.1";
cpu_rev = "1.1";
break;
case 2:
- /* FALLTHROUGH */
default:
omap_revision = AM437X_REV_ES1_2;
cpu_rev = "1.2";
case 0xb968:
switch (rev) {
case 0:
- /* FALLTHROUGH */
case 1:
omap_revision = TI8148_REV_ES1_0;
cpu_rev = "1.0";
cpu_rev = "2.0";
break;
case 3:
- /* FALLTHROUGH */
default:
omap_revision = TI8148_REV_ES2_1;
cpu_rev = "2.1";
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
omap_auxdata_legacy_init(dev);
- /* fall through */
+ fallthrough;
default:
od = to_omap_device(pdev);
if (od)
if (omap_irq_pending())
return;
- trace_cpu_idle_rcuidle(1, smp_processor_id());
-
omap_sram_idle();
-
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_SUSPEND
dns323ab_leds[0].active_low = 1;
gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
- /* Fall through */
+ fallthrough;
case DNS323_REV_B1:
i2c_register_board_info(0, dns323ab_i2c_devices,
ARRAY_SIZE(dns323ab_i2c_devices));
switch (tag->u.acorn.vram_pages) {
case 512:
vram_size += PAGE_SIZE * 256;
- /* Fall through - ??? */
+ fallthrough; /* ??? */
case 256:
vram_size += PAGE_SIZE * 256;
default:
switch (err) {
case -ENOSYS:
tegra_cpu_reset_handler_set(reset_address);
- /* fall through */
+ fallthrough;
case 0:
is_enabled = true;
break;
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
(tinstr & 255); /* register_list */
}
- /* Else, fall through - for illegal instruction case */
+ fallthrough; /* for illegal instruction case */
default:
return BAD_INSTR;
case 0xe8e0:
case 0xe9e0:
poffset->un = (tinst2 & 0xff) << 2;
- /* Fall through */
+ fallthrough;
case 0xe940:
case 0xe9c0:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
- /* fallthrough */
+ fallthrough;
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
* not supported by current hardware on OMAP1
* w |= (0x03 << 7);
*/
- /* fall through */
+ fallthrough;
case OMAP_DMA_DATA_BURST_16:
if (dma_omap2plus()) {
burst = 0x3;
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
BUG();
}
break;
}
/* OMAP1 don't support burst 16 */
- /* fall through */
+ fallthrough;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
case REG_TYPE_NOPCWB:
if (!is_writeback(insn))
break; /* No writeback, so any register is OK */
- /* fall through... */
+ fallthrough;
case REG_TYPE_NOPC:
case REG_TYPE_NOPCX:
/* Reject PC (R15) */
/* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
- /* fall through */
+ fallthrough;
default:
/* impossible cases */
BUG();
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
-ifneq ($(CONFIG_AS_HAS_ARMV8_4), y)
-branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
+ifeq ($(CONFIG_AS_HAS_PAC), y)
+asm-arch := armv8.3-a
endif
endif
ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
# make sure to pass the newest target architecture to -march.
-KBUILD_CFLAGS += -Wa,-march=armv8.4-a
+asm-arch := armv8.4-a
+endif
+
+ifdef asm-arch
+KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
+ -DARM64_ASM_ARCH='"$(asm-arch)"'
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+ $(if $(CONFIG_COMPAT_VDSO), \
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
clocks = <&pericfg CLK_PERI_MSDC30_0_PD>,
<&topckgen CLK_TOP_MSDC50_0_SEL>;
clock-names = "source", "hclk";
+ resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>;
+ reset-names = "hrst";
status = "disabled";
};
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03400000 0x0 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC1>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>,
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03420000 0x0 0x10000>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC2>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC2>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>,
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03440000 0x0 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC3>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>,
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03460000 0x0 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
<&bpmp TEGRA186_CLK_PLLC4_VCO>;
assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>;
compatible = "nvidia,tegra194-sdhci";
reg = <0x03400000 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC1>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>,
compatible = "nvidia,tegra194-sdhci";
reg = <0x03440000 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC3>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>,
compatible = "nvidia,tegra194-sdhci";
reg = <0x03460000 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
<&bpmp TEGRA194_CLK_PLLC4>;
assigned-clock-parents =
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 14>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0200 0x0 0x200>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC2>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 9>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-1v8-drv";
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0400 0x0 0x200>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC3>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 69>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0600 0x0 0x200>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC4>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 15>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv";
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x1>;
+ ti,sci-dev-id = <100>;
+ ti,interrupt-ranges = <0 392 32>;
};
main_navss {
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x0>, <0x2>;
+ ti,sci-dev-id = <182>;
+ ti,interrupt-ranges = <0 64 64>,
+ <64 448 64>;
};
inta_main_udmass: interrupt-controller@33d00000 {
msi-controller;
ti,sci = <&dmsc>;
ti,sci-dev-id = <179>;
- ti,sci-rm-range-vint = <0x0>;
- ti,sci-rm-range-global-event = <0x1>;
+ ti,interrupt-ranges = <0 0 256>;
};
secure_proxy_main: mailbox@32c00000 {
<0x0 0x33000000 0x0 0x40000>;
reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
ti,num-rings = <818>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
ti,dma-ring-reset-quirk;
ti,sci = <&dmsc>;
ti,sci-dev-id = <187>;
ti,sci-dev-id = <188>;
ti,ringacc = <&ringacc>;
- ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
- <0x2>; /* TX_CHAN */
- ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
- <0x5>; /* RX_CHAN */
- ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+ ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */
+ <0xd>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */
+ <0xa>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */
};
cpts@310d0000 {
reg-names = "cpts";
clocks = <&main_cpts_mux>;
clock-names = "cpts";
- interrupts-extended = <&intr_main_navss 163 0>;
+ interrupts-extended = <&intr_main_navss 391>;
interrupt-names = "cpts";
ti,cpts-periodic-outputs = <6>;
ti,cpts-ext-ts-inputs = <8>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_main_gpio>;
- interrupts = <57 256>, <57 257>, <57 258>, <57 259>, <57 260>,
- <57 261>;
+ interrupts = <192>, <193>, <194>, <195>, <196>, <197>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <96>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_main_gpio>;
- interrupts = <58 256>, <58 257>, <58 258>, <58 259>, <58 260>,
- <58 261>;
+ interrupts = <200>, <201>, <202>, <203>, <204>, <205>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <90>;
<0x0 0x2a500000 0x0 0x40000>;
reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
ti,num-rings = <286>;
- ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */
ti,dma-ring-reset-quirk;
ti,sci = <&dmsc>;
ti,sci-dev-id = <195>;
ti,sci-dev-id = <194>;
ti,ringacc = <&mcu_ringacc>;
- ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
- <0x2>; /* TX_CHAN */
- ti,sci-rm-range-rchan = <0x3>, /* RX_HCHAN */
- <0x4>; /* RX_CHAN */
- ti,sci-rm-range-rflow = <0x5>; /* GP RFLOW */
+ ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */
+ <0xd>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */
+ <0xa>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */
};
};
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <56>;
- ti,sci-rm-range-girq = <0x4>;
+ ti,sci-dev-id = <156>;
+ ti,interrupt-ranges = <0 712 16>;
};
wkup_gpio0: wkup_gpio0@42110000 {
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&intr_wkup_gpio>;
- interrupts = <59 128>, <59 129>, <59 130>, <59 131>;
+ interrupts = <60>, <61>, <62>, <63>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <56>;
};
&mailbox0_cluster0 {
- interrupts = <164 0>;
+ interrupts = <436>;
mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
ti,mbox-tx = <1 0 0>;
};
&mailbox0_cluster1 {
- interrupts = <165 0>;
+ interrupts = <432>;
mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 {
ti,mbox-tx = <1 0 0>;
};
&mailbox0_cluster0 {
- interrupts = <214 0>;
+ interrupts = <436>;
mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 {
ti,mbox-rx = <0 0 0>;
};
&mailbox0_cluster1 {
- interrupts = <215 0>;
+ interrupts = <432>;
mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
ti,mbox-rx = <0 0 0>;
};
&mailbox0_cluster2 {
- interrupts = <216 0>;
+ interrupts = <428>;
mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
ti,mbox-rx = <0 0 0>;
};
&mailbox0_cluster3 {
- interrupts = <217 0>;
+ interrupts = <424>;
mbox_c66_0: mbox-c66-0 {
ti,mbox-rx = <0 0 0>;
};
&mailbox0_cluster4 {
- interrupts = <218 0>;
+ interrupts = <420>;
mbox_c71_0: mbox-c71-0 {
ti,mbox-rx = <0 0 0>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0x1>;
+ ti,sci-dev-id = <131>;
+ ti,interrupt-ranges = <8 392 56>;
};
main_navss {
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0>, <2>;
+ ti,sci-dev-id = <213>;
+ ti,interrupt-ranges = <0 64 64>,
+ <64 448 64>,
+ <128 672 64>;
};
main_udmass_inta: interrupt-controller@33d00000 {
msi-controller;
ti,sci = <&dmsc>;
ti,sci-dev-id = <209>;
- ti,sci-rm-range-vint = <0xa>;
- ti,sci-rm-range-global-event = <0xd>;
+ ti,interrupt-ranges = <0 0 256>;
};
secure_proxy_main: mailbox@32c00000 {
reg-names = "cpts";
clocks = <&k3_clks 201 1>;
clock-names = "cpts";
- interrupts-extended = <&main_navss_intr 201 0>;
+ interrupts-extended = <&main_navss_intr 391>;
interrupt-names = "cpts";
ti,cpts-periodic-outputs = <6>;
ti,cpts-ext-ts-inputs = <8>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <105 0>, <105 1>, <105 2>, <105 3>,
- <105 4>, <105 5>, <105 6>, <105 7>;
+ interrupts = <256>, <257>, <258>, <259>,
+ <260>, <261>, <262>, <263>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <106 0>, <106 1>, <106 2>;
+ interrupts = <288>, <289>, <290>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <107 0>, <107 1>, <107 2>, <107 3>,
- <107 4>, <107 5>, <107 6>, <107 7>;
+ interrupts = <264>, <265>, <266>, <267>,
+ <268>, <269>, <270>, <271>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <108 0>, <108 1>, <108 2>;
+ interrupts = <292>, <293>, <294>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <109 0>, <109 1>, <109 2>, <109 3>,
- <109 4>, <109 5>, <109 6>, <109 7>;
+ interrupts = <272>, <273>, <274>, <275>,
+ <276>, <277>, <278>, <279>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <110 0>, <110 1>, <110 2>;
+ interrupts = <296>, <297>, <298>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <111 0>, <111 1>, <111 2>, <111 3>,
- <111 4>, <111 5>, <111 6>, <111 7>;
+ interrupts = <280>, <281>, <282>, <283>,
+ <284>, <285>, <286>, <287>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <128>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&main_gpio_intr>;
- interrupts = <112 0>, <112 1>, <112 2>;
+ interrupts = <300>, <301>, <302>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <36>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
ti,sci = <&dmsc>;
- ti,sci-dst-id = <14>;
- ti,sci-rm-range-girq = <0x5>;
+ ti,sci-dev-id = <137>;
+ ti,interrupt-ranges = <16 960 16>;
};
wkup_gpio0: gpio@42110000 {
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&wkup_gpio_intr>;
- interrupts = <113 0>, <113 1>, <113 2>,
- <113 3>, <113 4>, <113 5>;
+ interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <84>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&wkup_gpio_intr>;
- interrupts = <114 0>, <114 1>, <114 2>,
- <114 3>, <114 4>, <114 5>;
+ interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
interrupt-controller;
#interrupt-cells = <2>;
ti,ngpio = <84>;
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H
+#ifdef ARM64_ASM_ARCH
+#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n"
+#else
+#define ARM64_ASM_PREAMBLE
+#endif
+
/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
return res;
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
+ * PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO)
+ HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
*__hyp_this_cpu_ptr(sym); \
})
+#define __KVM_EXTABLE(from, to) \
+ " .pushsection __kvm_ex_table, \"a\"\n" \
+ " .align 3\n" \
+ " .long (" #from " - .), (" #to " - .)\n" \
+ " .popsection\n"
+
+
+#define __kvm_at(at_op, addr) \
+( { \
+ int __kvm_at_err = 0; \
+ u64 spsr, elr; \
+ asm volatile( \
+ " mrs %1, spsr_el2\n" \
+ " mrs %2, elr_el2\n" \
+ "1: at "at_op", %3\n" \
+ " isb\n" \
+ " b 9f\n" \
+ "2: msr spsr_el2, %1\n" \
+ " msr elr_el2, %2\n" \
+ " mov %w0, %4\n" \
+ "9:\n" \
+ __KVM_EXTABLE(1b, 2b) \
+ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
+ : "r" (addr), "i" (-EFAULT)); \
+ __kvm_at_err; \
+} )
+
+
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
+ */
+.macro _kvm_extable, from, to
+ .pushsection __kvm_ex_table, "a"
+ .align 3
+ .long (\from - .), (\to - .)
+ .popsection
+.endm
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
* not. The macros handles invoking the asm with or without the
* register argument as appropriate.
*/
-#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
+#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
-#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
+#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
+ "tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
*/
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);
- /* fall through */
+ fallthrough;
default:
if (region->attribute & EFI_MEMORY_WB)
.desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
},
#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
case FTR_HIGHER_OR_ZERO_SAFE:
if (!cur || !new)
break;
- /* Fallthrough */
+ fallthrough;
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
default:
- /* Fallthrough */
case ICACHE_POLICY_VIPT:
/* Assume aliasing */
set_bit(ICACHEF_ALIASING, &__icache_flags);
stp x28, x29, [sp, #16 * 14]
.if \el == 0
- .if \regsize == 32
- /*
- * If we're returning from a 32-bit task on a system affected by
- * 1418040 then re-enable userspace access to the virtual counter.
- */
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
- mrs x0, cntkctl_el1
- orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
- .endif
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20
tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
- mrs x0, cntkctl_el1
- bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
-
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
* level.
*/
enable_debug_monitors(dbg_el);
- /* Fall through */
+ fallthrough;
case HW_BREAKPOINT_RESTORE:
/* Setup the address register. */
write_wb_reg(val_reg, i, info->address);
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
- /* Fallthrough */
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
- /* Fallthrough */
+ fallthrough;
default:
return -EINVAL;
}
KVM_NVHE_ALIAS(gic_pmr_sync);
#endif
+/* EL2 exception handling */
+KVM_NVHE_ALIAS(__start___kvm_ex_table);
+KVM_NVHE_ALIAS(__stop___kvm_ex_table);
+
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
mod->arch.init.plt_shndx = i;
- else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
- !strcmp(secstrings + sechdrs[i].sh_name,
+ else if (!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
tramp = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB)
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
- /* Fall through */
+ fallthrough;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
* This should do all the clock switching and wait for interrupt
* tricks
*/
- trace_cpu_idle_rcuidle(1, smp_processor_id());
cpu_do_idle();
local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_HOTPLUG_CPU
}
/*
+ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
+ * Assuming the virtual counter is enabled at the beginning of times:
+ *
+ * - disable access when switching from a 64bit task to a 32bit task
+ * - enable access when switching from a 32bit task to a 64bit task
+ */
+static void erratum_1418040_thread_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ bool prev32, next32;
+ u64 val;
+
+ if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
+ return;
+
+ prev32 = is_compat_thread(task_thread_info(prev));
+ next32 = is_compat_thread(task_thread_info(next));
+
+ if (prev32 == next32)
+ return;
+
+ val = read_sysreg(cntkctl_el1);
+
+ if (!next32)
+ val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+ else
+ val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ write_sysreg(val, cntkctl_el1);
+}
+
+/*
* Thread switching.
*/
__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ssbs_thread_switch(next);
+ erratum_1418040_thread_switch(prev, next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
{
return __cpu_logical_map[cpu];
}
-EXPORT_SYMBOL_GPL(cpu_logical_map);
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
break;
}
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
- /* Fall through */
+ fallthrough;
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
# Install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
+quiet_cmd_vdso_install = INSTALL32 $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
vdso.so: $(obj)/vdso.so.dbg
jiffies = jiffies_64;
+
+#define HYPERVISOR_EXTABLE \
+ . = ALIGN(SZ_8); \
+ __start___kvm_ex_table = .; \
+ *(__kvm_ex_table) \
+ __stop___kvm_ex_table = .;
+
#define HYPERVISOR_TEXT \
/* \
* Align to 4 KB so that \
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
+ HYPERVISOR_EXTABLE \
__hyp_text_end = .;
#define IDMAP_TEXT \
return -ENODEV;
}
+ if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
+ kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
+ "Only trusted guests should be used on this system.\n");
+
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) {
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
- /* fall through */
+ fallthrough;
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_BKPT32:
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB.
- .global abort_guest_exit_start
abort_guest_exit_start:
isb
- .global abort_guest_exit_end
abort_guest_exit_end:
msr daifset, #4 // Mask aborts
+ ret
+
+ _kvm_extable abort_guest_exit_start, 9997f
+ _kvm_extable abort_guest_exit_end, 9997f
+9997:
+ msr daifset, #4 // Mask aborts
+ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
- // If the exception took place, restore the EL1 exception
- // context so that we can report some information.
- // Merge the exception code with the SError pending bit.
- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+ // restore the EL1 exception context so that we can report some
+ // information. Merge the exception code with the SError pending bit.
msr elr_el2, x2
msr esr_el2, x3
msr spsr_el2, x4
#include <asm/kvm_mmu.h>
#include <asm/mmu.h>
+.macro save_caller_saved_regs_vect
+ /* x0 and x1 were saved in the vector entry */
+ stp x2, x3, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+ ldp x16, x17, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x0, x1, [sp], #16
+.endm
+
.text
.macro do_el2_call
b __guest_exit
el2_sync:
- /* Check for illegal exception return, otherwise panic */
+ /* Check for illegal exception return */
mrs x0, spsr_el2
+ tbnz x0, #20, 1f
- /* if this was something else, then panic! */
- tst x0, #PSR_IL_BIT
- b.eq __hyp_panic
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+ bl kvm_unexpected_el2_exception
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
+ eret
+
+1:
/* Let's attempt a recovery from the illegal exception return */
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IL
el2_error:
- ldp x0, x1, [sp], #16
+ save_caller_saved_regs_vect
+ stp x29, x30, [sp, #-16]!
+
+ bl kvm_unexpected_el2_exception
+
+ ldp x29, x30, [sp], #16
+ restore_caller_saved_regs_vect
- /*
- * Only two possibilities:
- * 1) Either we come from the exit path, having just unmasked
- * PSTATE.A: change the return code to an EL2 fault, and
- * carry on, as we're already in a sane state to handle it.
- * 2) Or we come from anywhere else, and that's a bug: we panic.
- *
- * For (1), x0 contains the original return code and x1 doesn't
- * contain anything meaningful at that stage. We can reuse them
- * as temp registers.
- * For (2), who cares?
- */
- mrs x0, elr_el2
- adr x1, abort_guest_exit_start
- cmp x0, x1
- adr x1, abort_guest_exit_end
- ccmp x0, x1, #4, ne
- b.ne __hyp_panic
- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
sb
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: ptr[14] = read_debug(reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: ptr[13] = read_debug(reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: ptr[12] = read_debug(reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: ptr[11] = read_debug(reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: ptr[10] = read_debug(reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: ptr[9] = read_debug(reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: ptr[8] = read_debug(reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: ptr[7] = read_debug(reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: ptr[6] = read_debug(reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: ptr[5] = read_debug(reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: ptr[4] = read_debug(reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: ptr[3] = read_debug(reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: ptr[2] = read_debug(reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: ptr[1] = read_debug(reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
- /* Fall through */ \
+ fallthrough; \
case 14: write_debug(ptr[14], reg, 14); \
- /* Fall through */ \
+ fallthrough; \
case 13: write_debug(ptr[13], reg, 13); \
- /* Fall through */ \
+ fallthrough; \
case 12: write_debug(ptr[12], reg, 12); \
- /* Fall through */ \
+ fallthrough; \
case 11: write_debug(ptr[11], reg, 11); \
- /* Fall through */ \
+ fallthrough; \
case 10: write_debug(ptr[10], reg, 10); \
- /* Fall through */ \
+ fallthrough; \
case 9: write_debug(ptr[9], reg, 9); \
- /* Fall through */ \
+ fallthrough; \
case 8: write_debug(ptr[8], reg, 8); \
- /* Fall through */ \
+ fallthrough; \
case 7: write_debug(ptr[7], reg, 7); \
- /* Fall through */ \
+ fallthrough; \
case 6: write_debug(ptr[6], reg, 6); \
- /* Fall through */ \
+ fallthrough; \
case 5: write_debug(ptr[5], reg, 5); \
- /* Fall through */ \
+ fallthrough; \
case 4: write_debug(ptr[4], reg, 4); \
- /* Fall through */ \
+ fallthrough; \
case 3: write_debug(ptr[3], reg, 3); \
- /* Fall through */ \
+ fallthrough; \
case 2: write_debug(ptr[2], reg, 2); \
- /* Fall through */ \
+ fallthrough; \
case 1: write_debug(ptr[1], reg, 1); \
- /* Fall through */ \
+ fallthrough; \
default: write_debug(ptr[0], reg, 0); \
}
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
extern const char __hyp_panic_string[];
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
- asm volatile("at s1e1r, %0" : : "r" (far));
- isb();
-
- tmp = read_sysreg(par_el1);
+ if (!__kvm_at("s1e1r", far))
+ tmp = read_sysreg(par_el1);
+ else
+ tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & SYS_PAR_EL1_F))
#endif
}
+static inline void __kvm_unexpected_el2_exception(void)
+{
+ unsigned long addr, fixup;
+ struct kvm_cpu_context *host_ctxt;
+ struct exception_table_entry *entry, *end;
+ unsigned long elr_el2 = read_sysreg(elr_el2);
+
+ entry = hyp_symbol_addr(__start___kvm_ex_table);
+ end = hyp_symbol_addr(__stop___kvm_ex_table);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
+ while (entry < end) {
+ addr = (unsigned long)&entry->insn + entry->insn;
+ fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+ if (addr != elr_el2) {
+ entry++;
+ continue;
+ }
+
+ write_sysreg(fixup, elr_el2);
+ return;
+ }
+
+ hyp_panic(host_ctxt);
+}
+
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
read_sysreg(hpfar_el2), par, vcpu);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
case 7:
cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
}
case 7:
cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
- /* Fall through */
+ fallthrough;
case 6:
cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
- /* Fall through */
+ fallthrough;
default:
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
case 7:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
}
case 7:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
- /* Fall through */
+ fallthrough;
case 6:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
- /* Fall through */
+ fallthrough;
default:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
}
__hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable();
}
+
+asmlinkage void kvm_unexpected_el2_exception(void)
+{
+ return __kvm_unexpected_el2_exception();
+}
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
-static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
+ bool may_block)
{
struct kvm *kvm = mmu->kvm;
pgd_t *pgd;
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
- if (next != end)
+ if (may_block && next != end)
cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
+{
+ __unmap_stage2_range(mmu, start, size, true);
+}
+
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
- unmap_stage2_range(&kvm->arch.mmu, gpa, size);
+ unsigned flags = *(unsigned *)data;
+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
+
+ __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end, unsigned flags)
{
if (!kvm->arch.mmu.pgd)
return 0;
trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
return 0;
}
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
- /* Fallthrough */
+ fallthrough;
case 0:
asid = 8;
break;
regs->a4 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->a4 = regs->orig_a4;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a4 = regs->orig_a4;
regs->pc -= 4;
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->pc -= TRAP0_SIZE;
regs->er0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->er0 = regs->orig_er0;
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
- /* fallthrough */
+ fallthrough;
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
regs->r00 = -EINTR;
break;
}
- /* Fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
}
#define pgd_index pgd_index
+/*
+ * In the kernel's mapped region we know everything is in region number 5, so
+ * as an optimisation its PGD already points to the area for that region.
+ * However, this also means that we cannot use pgd_index() and we must
+ * never add the region here.
+ */
+#define pgd_offset_k(addr) \
+ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */
case DIE_INIT_MONARCH_LEAVE:
if (!kdump_freeze_monarch)
break;
- /* fall through */
+ fallthrough;
case DIE_INIT_SLAVE_LEAVE:
case DIE_INIT_MONARCH_ENTER:
case DIE_MCA_RENDZVOUS_LEAVE:
* Architecture-specific kernel symbols
*/
-#ifdef CONFIG_VIRTUAL_MEM_MAP
+#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM)
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/memblock.h>
}
} else if (!is_internal(mod, val))
val = get_plt(mod, location, val, &ok);
- /* FALL THROUGH */
+ fallthrough;
default:
val -= bundle(location);
break;
break;
case PFM_CTX_LOADED:
if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
- /* fall through */
+ fallthrough;
case PFM_CTX_UNLOADED:
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
/* note: scr->pt.r10 is already -1 */
break;
}
- /*FALLTHRU*/
+ fallthrough;
case ERESTARTNOINTR:
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LDS_IMM_OP:
case LDSA_IMM_OP:
case LDFS_OP:
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case LD_IMM_OP:
case LDA_IMM_OP:
case LDBIAS_IMM_OP:
if (u.insn.x)
/* oops, really a semaphore op (cmpxchg, etc) */
goto failure;
- /*FALLTHRU*/
+ fallthrough;
case ST_IMM_OP:
case STREL_IMM_OP:
ret = emulate_store_int(ifa, u.insn, regs);
return 0;
}
}
- /* fall through */
+ fallthrough;
case UNW_NAT_NONE:
dummy_nat = 0;
nat_addr = &dummy_nat;
self_test_last_rcv = jiffies;
break;
}
- /* FALL THROUGH */
+ fallthrough;
default:
break_flag = scancode & BREAK_MASK;
regs->d0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
*/
platform_device_register_simple("mac_scsi", 1,
mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc));
- /* fall through */
+ fallthrough;
case MAC_SCSI_OLD:
/* Addresses from Developer Notes for Duo System,
* PowerBook 180 & 160, 140 & 170, Macintosh IIsi
/* Allow NuBus slots 9 through F. */
via2[vDirA] &= 0x80 | ~(1 << irq_idx);
}
- /* fall through */
+ fallthrough;
case MAC_VIA_IICI:
via_irq_enable(irq);
break;
pr_debug("do_page_fault: good_area\n");
switch (error_code & 3) {
default: /* 3: write, present */
- /* fall through */
+ fallthrough;
case 2: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto acc_err;
regs->r3 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
do_restart:
/* offset of 4 bytes to re-execute trap (brki) instruction */
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
return !(flags & 1);
}
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
#endif /* #ifndef __ASSEMBLY__ */
/*
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end, unsigned flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
#define cpu_has_counter 1
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_divec 0
-#define cpu_has_ejtag 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
#define cpu_has_veic 0
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
-#define cpu_has_watch 1
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
#ifndef __ASM_MACH_LOONGSON64_IRQ_H_
#define __ASM_MACH_LOONGSON64_IRQ_H_
-#include <boot_param.h>
-
/* cpu core interrupt numbers */
#define NR_IRQS_LEGACY 16
#define NR_MIPS_CPU_IRQS 8
#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
#define _ASM_MACH_LOONGSON64_MMZONE_H
-#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL
#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL
BUILD_BUG_ON(!__builtin_constant_p(times)); \
\
switch (times) { \
- case 32: fn(__VA_ARGS__); /* fall through */ \
- case 31: fn(__VA_ARGS__); /* fall through */ \
- case 30: fn(__VA_ARGS__); /* fall through */ \
- case 29: fn(__VA_ARGS__); /* fall through */ \
- case 28: fn(__VA_ARGS__); /* fall through */ \
- case 27: fn(__VA_ARGS__); /* fall through */ \
- case 26: fn(__VA_ARGS__); /* fall through */ \
- case 25: fn(__VA_ARGS__); /* fall through */ \
- case 24: fn(__VA_ARGS__); /* fall through */ \
- case 23: fn(__VA_ARGS__); /* fall through */ \
- case 22: fn(__VA_ARGS__); /* fall through */ \
- case 21: fn(__VA_ARGS__); /* fall through */ \
- case 20: fn(__VA_ARGS__); /* fall through */ \
- case 19: fn(__VA_ARGS__); /* fall through */ \
- case 18: fn(__VA_ARGS__); /* fall through */ \
- case 17: fn(__VA_ARGS__); /* fall through */ \
- case 16: fn(__VA_ARGS__); /* fall through */ \
- case 15: fn(__VA_ARGS__); /* fall through */ \
- case 14: fn(__VA_ARGS__); /* fall through */ \
- case 13: fn(__VA_ARGS__); /* fall through */ \
- case 12: fn(__VA_ARGS__); /* fall through */ \
- case 11: fn(__VA_ARGS__); /* fall through */ \
- case 10: fn(__VA_ARGS__); /* fall through */ \
- case 9: fn(__VA_ARGS__); /* fall through */ \
- case 8: fn(__VA_ARGS__); /* fall through */ \
- case 7: fn(__VA_ARGS__); /* fall through */ \
- case 6: fn(__VA_ARGS__); /* fall through */ \
- case 5: fn(__VA_ARGS__); /* fall through */ \
- case 4: fn(__VA_ARGS__); /* fall through */ \
- case 3: fn(__VA_ARGS__); /* fall through */ \
- case 2: fn(__VA_ARGS__); /* fall through */ \
- case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 32: fn(__VA_ARGS__); fallthrough; \
+ case 31: fn(__VA_ARGS__); fallthrough; \
+ case 30: fn(__VA_ARGS__); fallthrough; \
+ case 29: fn(__VA_ARGS__); fallthrough; \
+ case 28: fn(__VA_ARGS__); fallthrough; \
+ case 27: fn(__VA_ARGS__); fallthrough; \
+ case 26: fn(__VA_ARGS__); fallthrough; \
+ case 25: fn(__VA_ARGS__); fallthrough; \
+ case 24: fn(__VA_ARGS__); fallthrough; \
+ case 23: fn(__VA_ARGS__); fallthrough; \
+ case 22: fn(__VA_ARGS__); fallthrough; \
+ case 21: fn(__VA_ARGS__); fallthrough; \
+ case 20: fn(__VA_ARGS__); fallthrough; \
+ case 19: fn(__VA_ARGS__); fallthrough; \
+ case 18: fn(__VA_ARGS__); fallthrough; \
+ case 17: fn(__VA_ARGS__); fallthrough; \
+ case 16: fn(__VA_ARGS__); fallthrough; \
+ case 15: fn(__VA_ARGS__); fallthrough; \
+ case 14: fn(__VA_ARGS__); fallthrough; \
+ case 13: fn(__VA_ARGS__); fallthrough; \
+ case 12: fn(__VA_ARGS__); fallthrough; \
+ case 11: fn(__VA_ARGS__); fallthrough; \
+ case 10: fn(__VA_ARGS__); fallthrough; \
+ case 9: fn(__VA_ARGS__); fallthrough; \
+ case 8: fn(__VA_ARGS__); fallthrough; \
+ case 7: fn(__VA_ARGS__); fallthrough; \
+ case 6: fn(__VA_ARGS__); fallthrough; \
+ case 5: fn(__VA_ARGS__); fallthrough; \
+ case 4: fn(__VA_ARGS__); fallthrough; \
+ case 3: fn(__VA_ARGS__); fallthrough; \
+ case 2: fn(__VA_ARGS__); fallthrough; \
+ case 1: fn(__VA_ARGS__); fallthrough; \
case 0: break; \
\
default: \
(base_id >= 64 && base_id < 90) ||
(base_id >= 128 && base_id < 164) ||
(base_id >= 192 && base_id < 200) ||
- (base_id >= 256 && base_id < 274) ||
- (base_id >= 320 && base_id < 358) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
(base_id >= 384 && base_id < 574))
break;
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
switch (counters) {
#define HANDLE_COUNTER(n) \
- fallthrough; \
case n + 1: \
control = r_c0_perfctrl ## n(); \
counter = r_c0_perfcntr ## n(); \
handled = IRQ_HANDLED; \
}
HANDLE_COUNTER(3)
+ fallthrough;
HANDLE_COUNTER(2)
+ fallthrough;
HANDLE_COUNTER(1)
+ fallthrough;
HANDLE_COUNTER(0)
}
irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
- if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
- NULL))
+ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
+ IRQF_SHARED, "ISA", sni_isa_irq_handler))
pr_err("Failed to register ISA interrupt\n");
}
{
return !flags;
}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fsdi $fd15, [%0+0x78]\n\t"
"fsdi $fd14, [%0+0x70]\n\t"
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fsdi $fd7, [%0+0x38]\n\t"
"fsdi $fd6, [%0+0x30]\n\t"
: /* no output */
: "r" (&tsk->thread.fpu)
: "memory");
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fsdi $fd3, [%1+0x18]\n\t"
"fsdi $fd2, [%1+0x10]\n\t"
"fldi $fd16, [%0+0x80]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP32_DP16_reg:
asm volatile ("fldi $fd15, [%0+0x78]\n\t"
"fldi $fd14, [%0+0x70]\n\t"
"fldi $fd8, [%0+0x40]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP16_DP8_reg:
asm volatile ("fldi $fd7, [%0+0x38]\n\t"
"fldi $fd6, [%0+0x30]\n\t"
"fldi $fd4, [%0+0x20]\n\t"
: /* no output */
: "r" (fpregs));
- /* fall through */
+ fallthrough;
case SP8_DP4_reg:
asm volatile ("fldi $fd3, [%1+0x18]\n\t"
"fldi $fd2, [%1+0x10]\n\t"
regs->uregs[0] = -EINTR;
break;
}
- /* Else, fall through */
+ fallthrough;
case -ERESTARTNOINTR:
regs->uregs[0] = regs->orig_r0;
regs->ipc -= 4;
switch (regs->uregs[0]) {
case -ERESTART_RESTARTBLOCK:
regs->uregs[15] = __NR_restart_syscall;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart = -2;
- /* Fall through */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->gr[28] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
check_syscallno_in_delay_branch(regs);
break;
break;
default:
- /* Fall through */
break;
}
case 15:
/* Data TLB miss fault/Data page fault */
- /* Fall through */
+ fallthrough;
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
- /* Fall through */
+ fallthrough;
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
handle_unaligned(regs);
return;
}
- /* Fall Through */
+ fallthrough;
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
- /* fall thru */
+ fallthrough;
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
}
mmap_read_unlock(current->mm);
}
- /* Fall Through */
+ fallthrough;
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
- /* fall through */
+ fallthrough;
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
}
/* probably address is outside of mapped file */
- /* fall through */
+ fallthrough;
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
signo = SIGBUS;
If unsure, say N here.
+config PPC_PROT_SAO_LPAR
+ bool "Support PROT_SAO mappings in LPARs"
+ depends on PPC_BOOK3S_64
+ help
+ This option adds support for PROT_SAO mappings from userspace
+ inside LPARs on supported CPUs.
+
+ This may cause issues when performing guest migration from
+ a CPU that supports SAO to one that does not.
+
+ If unsure, say N here.
+
config PPC_COPRO_BASE
bool
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
-
-#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */
- /* No bits set is normal cacheable memory */
- /* 0x00010 unused, is SAO bit on radix POWER9 */
+#define _PAGE_SAO 0x00010 /* Strong access order */
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
-
#define _PAGE_DIRTY 0x00080 /* C: page changed */
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
/*
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
}
+#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
#ifndef __ASSEMBLY__
+/*
+ * Added to include __machine_check_early_realmode_* functions
+ */
+#include <asm/mce.h>
+
/* This structure can grow, it's real size is used by head.S code
* via the mkdefs mechanism.
*/
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
-// Free LONG_ASM_CONST(0x0000000008000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
FIX_HOLE,
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
- FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#define powerpc_local_irq_pmu_save(flags) \
do { \
raw_local_irq_pmu_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while(0)
#define powerpc_local_irq_pmu_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_pmu_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_pmu_restore(flags); \
- } \
+ raw_local_irq_pmu_restore(flags); \
} while(0)
#else
#define powerpc_local_irq_pmu_save(flags) \
#ifndef __ASSEMBLY__
#include <asm/page.h>
+#include <linux/sizes.h>
#define KASAN_SHADOW_SCALE_SHIFT 3
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
+#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
+#else
+#define KASAN_KERN_START PAGE_OFFSET
+#endif
+
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
- (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT))
+ (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end,
+ unsigned flags);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
#define MCE_EVENT_RELEASE true
#define MCE_EVENT_DONTRELEASE false
+struct pt_regs;
+struct notifier_block;
+
extern void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err, uint64_t nip,
uint64_t addr, uint64_t phys_addr);
int mce_unregister_notifier(struct notifier_block *nb);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
+long __machine_check_early_realmode_p7(struct pt_regs *regs);
+long __machine_check_early_realmode_p8(struct pt_regs *regs);
+long __machine_check_early_realmode_p9(struct pt_regs *regs);
+long __machine_check_early_realmode_p10(struct pt_regs *regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* __ASM_PPC64_MCE_H__ */
#include <linux/pkeys.h>
#include <asm/cpu_has_feature.h>
-#ifdef CONFIG_PPC_MEM_KEYS
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
- return pkey_to_vmflag_bits(pkey);
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
+#else
+ return ((prot & PROT_SAO) ? VM_SAO : 0);
+#endif
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return __pgprot(vmflag_to_pte_pkey_bits(vm_flags));
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (vm_flags & VM_SAO) ?
+ __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
+ __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
+#else
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+#endif
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-#endif
+
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return false;
+ if (prot & PROT_SAO) {
+ if (!cpu_has_feature(CPU_FTR_SAO))
+ return false;
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
+ return false;
+ }
+ return true;
+}
+#define arch_validate_prot arch_validate_prot
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */
*/
#include <asm/nohash/pte-book3e.h>
+#define _PAGE_SAO 0
+
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
/*
/* To support perf_regs sier update */
extern bool is_sier_available(void);
+/* To define perf extended regs mask value */
+extern u64 PERF_REG_EXTENDED_MASK;
+#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
#endif
int *blacklist_ev;
/* BHRB entries in the PMU */
int bhrb_nr;
+ /*
+ * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
+ * the pmu supports extended perf regs capability
+ */
+ int capabilities;
};
/*
#include <asm-generic/mman-common.h>
-#define PROT_SAO 0x10 /* Unsupported since v5.9 */
+#define PROT_SAO 0x10 /* Strong Access Ordering */
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
- PERF_REG_POWERPC_MAX,
+ /* Extended registers */
+ PERF_REG_POWERPC_MMCR0,
+ PERF_REG_POWERPC_MMCR1,
+ PERF_REG_POWERPC_MMCR2,
+ PERF_REG_POWERPC_MMCR3,
+ PERF_REG_POWERPC_SIER2,
+ PERF_REG_POWERPC_SIER3,
+ /* Max regs without the extended regs */
+ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
};
+
+#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */
+#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK)
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */
+#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK)
+
+#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1)
+#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1)
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
extern void __restore_cpu_power9(void);
extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power10(void);
-extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
.machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
+ { /* Power10 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00800000,
+ .cpu_name = "POWER10 (raw)",
+ .cpu_features = CPU_FTRS_POWER10,
+ .cpu_user_features = COMMON_USER_POWER10,
+ .cpu_user_features2 = COMMON_USER2_POWER10,
+ .mmu_features = MMU_FTRS_POWER10,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power10",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .machine_check_early = __machine_check_early_realmode_p10,
+ .platform = "power10",
+ },
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
* Set up the base CPU
*/
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p10(struct pt_regs *regs);
-
static int hv_mode;
static struct {
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
{"no-execute", feat_enable, 0},
- /* strong-access-ordering is unused */
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
{"coprocessor-icswx", feat_enable, 0},
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
/*
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
* would clobber syscall parameters. Also we always enter with IRQs
* are live for the user thread).
*/
if ((!(msr & MSR_FP)) && should_restore_fp())
- new_msr |= MSR_FP | current->thread.fpexc_mode;
+ new_msr |= MSR_FP;
if ((!(msr & MSR_VEC)) && should_restore_altivec())
new_msr |= MSR_VEC;
}
if (new_msr) {
+ unsigned long fpexc_mode = 0;
+
msr_check_and_set(new_msr);
- if (new_msr & MSR_FP)
+ if (new_msr & MSR_FP) {
do_restore_fp();
+ // This also covers VSX, because VSX implies FP
+ fpexc_mode = current->thread.fpexc_mode;
+ }
+
if (new_msr & MSR_VEC)
do_restore_altivec();
msr_check_and_clear(new_msr);
- regs->msr |= new_msr;
+ regs->msr |= new_msr | fpexc_mode;
}
}
#endif
min = pvr & 0xFF;
break;
case 0x004e: /* POWER9 bits 12-15 give chip type */
+ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF;
break;
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}
return 0;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
/* kvm_unmap_hva flushes everything anyways */
kvm_unmap_hva(kvm, start);
{
if (!IS_ENABLED(CONFIG_MODULES))
return false;
+#ifdef MODULES_VADDR
+ if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
+ return false;
+ if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
+ return false;
+#else
if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
return false;
- if (addr >= ALIGN(VMALLOC_END, SZ_256M))
+ if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
return false;
+#endif
return true;
}
rflags |= HPTE_R_I;
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
rflags |= (HPTE_R_I | HPTE_R_G);
+ else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+ rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
else
/*
* Add memory coherence if cache inhibited is not set
&& cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
- if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY))
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (mmu_has_feature(MMU_FTR_PKEY))
mtspr(SPRN_UAMOR, default_uamor);
+#endif
}
#endif /* CONFIG_SMP */
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE;
- /* Fall through */
+ fallthrough;
cond_branch:
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
ret = 0;
out:
if (has_branch_stack(event)) {
- power_pmu_bhrb_enable(event);
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
- event->attr.branch_sample_type);
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter != -1) {
+ cpuhw->bhrb_filter = bhrb_filter;
+ power_pmu_bhrb_enable(event);
+ }
}
perf_pmu_enable(event->pmu);
int n;
int err;
struct cpu_hw_events *cpuhw;
- u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- bhrb_filter = ppmu->bhrb_filter_map(
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
+ } else if (period) {
+ /* Account for interrupt in case of invalid SIAR */
+ if (perf_event_account_interrupt(event))
+ power_pmu_stop(event, 0);
}
}
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
+ power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
#ifdef MSR_HV
/*
NULL,
};
+static struct attribute *cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group cpumask_attr_group = {
+ .attrs = cpumask_attrs,
+};
+
static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
&dev_attr_sockets.attr,
&dev_attr_chipspersocket.attr,
&dev_attr_coresperchip.attr,
- &dev_attr_cpumask.attr,
NULL,
};
&event_desc_group,
&event_long_desc_group,
&if_group,
+ &cpumask_attr_group,
NULL,
};
header->misc = 0;
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) {
+ switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
case 0:/* when MSR HV and PR not set in the trace-record */
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
break;
header->misc |= PERF_RECORD_MISC_GUEST_USER;
break;
case 2: /* MSR HV is 1 and PR is 0 */
- header->misc |= PERF_RECORD_MISC_HYPERVISOR;
+ header->misc |= PERF_RECORD_MISC_KERNEL;
break;
case 3: /* MSR HV is 1 and PR is 1 */
header->misc |= PERF_RECORD_MISC_USER;
#include <asm/ptrace.h>
#include <asm/perf_regs.h>
+u64 PERF_REG_EXTENDED_MASK;
+
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
-#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1))
+#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
+/* Function to return the extended register values */
+static u64 get_ext_regs_value(int idx)
+{
+ switch (idx) {
+ case PERF_REG_POWERPC_MMCR0:
+ return mfspr(SPRN_MMCR0);
+ case PERF_REG_POWERPC_MMCR1:
+ return mfspr(SPRN_MMCR1);
+ case PERF_REG_POWERPC_MMCR2:
+ return mfspr(SPRN_MMCR2);
+#ifdef CONFIG_PPC64
+ case PERF_REG_POWERPC_MMCR3:
+ return mfspr(SPRN_MMCR3);
+ case PERF_REG_POWERPC_SIER2:
+ return mfspr(SPRN_SIER2);
+ case PERF_REG_POWERPC_SIER3:
+ return mfspr(SPRN_SIER3);
+#endif
+ default: return 0;
+ }
+}
+
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
- if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
- return 0;
+ u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ perf_reg_extended_max = PERF_REG_MAX_ISA_31;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
+ perf_reg_extended_max = PERF_REG_MAX_ISA_300;
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32)))
return 0;
+ if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max)
+ return get_ext_regs_value(idx);
+
+ /*
+ * If the idx is referring to value beyond the
+ * supported registers, return 0 with a warning
+ */
+ if (WARN_ON_ONCE(idx >= perf_reg_extended_max))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
#define POWER10_MMCRA_IFM3 0x00000000C0000000UL
#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
.cache_events = &power10_cache_events,
.attr_groups = power10_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power10_pmu(void)
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10"))
return -ENODEV;
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
+
rc = register_power_pmu(&power10_pmu);
if (rc)
return rc;
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
.cache_events = &power9_cache_events,
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
};
int init_power9_pmu(void)
}
}
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
+
rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
select PPC_HAVE_PMU_SUPPORT
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
- select HAVE_ARCH_VMAP_STACK
+ select HAVE_ARCH_VMAP_STACK if !ADB_PMU
config PPC_BOOK3S_601
bool "PowerPC 601"
return;
}
- if (pvr_version_is(PVR_POWER9))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
pnv_power9_idle_init();
for (i = 0; i < nr_pnv_idle_states; i++)
struct iommu_table *tbl = pe->table_group.tables[0];
int64_t rc;
- if (pe->dma_setup_done)
+ if (!pe->dma_setup_done)
return;
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
*/
static void pseries_cpu_die(unsigned int cpu)
{
- int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
+ unsigned long timeout = jiffies + msecs_to_jiffies(120000);
- for (tries = 0; tries < 25; tries++) {
+ while (true) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
- cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
+ cpu, pcpu);
+ timeout = jiffies + msecs_to_jiffies(120000);
+ }
+
+ cond_resched();
}
- if (cpu_status != 0) {
- printk("Querying DEAD? cpu %i (%i) shows %i\n",
- cpu, pcpu, cpu_status);
+ if (cpu_status == QCSS_HARDWARE_ERROR) {
+ pr_warn("CPU %i (hwid %i) reported error while dying\n",
+ cpu, pcpu);
}
/* Isolation and deallocation are definitely done by
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
- orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_INTC
- select RISCV_TIMER
+ select RISCV_TIMER if RISCV_SBI
select SPARSEMEM_STATIC if 32BIT
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
config SOC_VIRT
bool "QEMU Virt Machine"
+ select CLINT_TIMER if RISCV_M_MODE
select POWER_RESET
select POWER_RESET_SYSCON
select POWER_RESET_SYSCON_POWEROFF
config SOC_KENDRYTE
bool "Kendryte K210 SoC"
depends on !MMU
+ select CLINT_TIMER if RISCV_M_MODE
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC
CONFIG_SLOB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
+CONFIG_SOC_VIRT=y
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
-# CONFIG_DEVMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-CONFIG_SIFIVE_PLIC=y
-# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
CONFIG_DRM=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_RISCV_CLINT_H
-#define _ASM_RISCV_CLINT_H 1
-
-#include <linux/io.h>
-#include <linux/smp.h>
-
-#ifdef CONFIG_RISCV_M_MODE
-extern u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void);
-
-static inline void clint_send_ipi_single(unsigned long hartid)
-{
- writel(1, clint_ipi_base + hartid);
-}
-
-static inline void clint_send_ipi_mask(const struct cpumask *mask)
-{
- int cpu;
-
- for_each_cpu(cpu, mask)
- clint_send_ipi_single(cpuid_to_hartid_map(cpu));
-}
-
-static inline void clint_clear_ipi(unsigned long hartid)
-{
- writel(0, clint_ipi_base + hartid);
-}
-#else /* CONFIG_RISCV_M_MODE */
-#define clint_init_boot_cpu() do { } while (0)
-
-/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
-void clint_send_ipi_single(unsigned long hartid);
-void clint_send_ipi_mask(const struct cpumask *hartid_mask);
-void clint_clear_ipi(unsigned long hartid);
-#endif /* CONFIG_RISCV_M_MODE */
-
-#endif /* _ASM_RISCV_CLINT_H */
struct seq_file;
extern unsigned long boot_cpu_hartid;
+struct riscv_ipi_ops {
+ void (*ipi_inject)(const struct cpumask *target);
+ void (*ipi_clear)(void);
+};
+
#ifdef CONFIG_SMP
/*
* Mapping between linux logical cpu index and hartid.
int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+/* Set custom IPI operations */
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+
+/* Clear IPI for current CPU */
+void riscv_clear_ipi(void);
+
/* Secondary hart entry */
asmlinkage void smp_callin(void);
cpumask_set_cpu(boot_cpu_hartid, out);
}
+static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+}
+
+static inline void riscv_clear_ipi(void)
+{
+}
+
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
#define _ASM_RISCV_TIMEX_H
#include <asm/csr.h>
-#include <asm/mmio.h>
typedef unsigned long cycles_t;
-extern u64 __iomem *riscv_time_val;
-extern u64 __iomem *riscv_time_cmp;
-
-#ifdef CONFIG_64BIT
-#define mmio_get_cycles() readq_relaxed(riscv_time_val)
-#else
-#define mmio_get_cycles() readl_relaxed(riscv_time_val)
-#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1)
-#endif
-
static inline cycles_t get_cycles(void)
{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- return csr_read(CSR_TIME);
- return mmio_get_cycles();
+ return csr_read(CSR_TIME);
}
#define get_cycles get_cycles
+static inline u32 get_cycles_hi(void)
+{
+ return csr_read(CSR_TIMEH);
+}
+#define get_cycles_hi get_cycles_hi
+
#ifdef CONFIG_64BIT
static inline u64 get_cycles64(void)
{
return get_cycles();
}
#else /* CONFIG_64BIT */
-static inline u32 get_cycles_hi(void)
-{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- return csr_read(CSR_TIMEH);
- return mmio_get_cycles_hi();
-}
-
static inline u64 get_cycles64(void)
{
u32 hi, lo;
obj-y += patch.o
obj-$(CONFIG_MMU) += vdso.o vdso/
-obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o
+obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 Christoph Hellwig.
- */
-
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/types.h>
-#include <asm/clint.h>
-#include <asm/csr.h>
-#include <asm/timex.h>
-#include <asm/smp.h>
-
-/*
- * This is the layout used by the SiFive clint, which is also shared by the qemu
- * virt platform, and the Kendryte KD210 at least.
- */
-#define CLINT_IPI_OFF 0
-#define CLINT_TIME_CMP_OFF 0x4000
-#define CLINT_TIME_VAL_OFF 0xbff8
-
-u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void)
-{
- struct device_node *np;
- void __iomem *base;
-
- np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
- if (!np) {
- panic("clint not found");
- return;
- }
-
- base = of_iomap(np, 0);
- if (!base)
- panic("could not map CLINT");
-
- clint_ipi_base = base + CLINT_IPI_OFF;
- riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
- riscv_time_val = base + CLINT_TIME_VAL_OFF;
-
- clint_clear_ipi(boot_cpu_hartid);
-}
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
+static void sbi_send_cpumask_ipi(const struct cpumask *target)
+{
+ struct cpumask hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(target, &hartid_mask);
+
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static struct riscv_ipi_ops sbi_ipi_ops = {
+ .ipi_inject = sbi_send_cpumask_ipi
+};
int __init sbi_init(void)
{
__sbi_rfence = __sbi_rfence_v01;
}
+ riscv_set_ipi_ops(&sbi_ipi_ops);
+
return 0;
}
#include <linux/swiotlb.h>
#include <linux/smp.h>
-#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
#else
unflatten_device_tree();
#endif
- clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
regs->a0 = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
regs->epc -= 0x4;
#include <linux/delay.h>
#include <linux/irq_work.h>
-#include <asm/clint.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
wait_for_interrupt();
}
+static struct riscv_ipi_ops *ipi_ops;
+
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+ ipi_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
+
+void riscv_clear_ipi(void)
+{
+ if (ipi_ops && ipi_ops->ipi_clear)
+ ipi_ops->ipi_clear();
+
+ csr_clear(CSR_IP, IE_SIE);
+}
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
+
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
- struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(&hartid_mask));
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(mask);
else
- clint_send_ipi_mask(mask);
+ pr_warn("SMP: IPI inject method not available\n");
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
- int hartid = cpuid_to_hartid_map(cpu);
-
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
- else
- clint_send_ipi_single(hartid);
-}
-
-static inline void clear_ipi(void)
-{
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- csr_clear(CSR_IP, IE_SIE);
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(cpumask_of(cpu));
else
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ pr_warn("SMP: IPI inject method not available\n");
}
#ifdef CONFIG_IRQ_WORK
irq_enter();
- clear_ipi();
+ riscv_clear_ipi();
while (true) {
unsigned long ops;
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
-#include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
- if (!IS_ENABLED(CONFIG_RISCV_SBI))
- clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+ riscv_clear_ipi();
/* All kernel threads share the same mm context. */
mmgrab(mm);
emit_zext64(dst, ctx);
break;
}
- /* Fallthrough. */
+ fallthrough;
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
case 16:
emit(rv_slli(lo(rd), lo(rd), 16), ctx);
emit(rv_srli(lo(rd), lo(rd), 16), ctx);
- /* Fallthrough. */
+ fallthrough;
case 32:
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
def_bool y
config GENERIC_LOCKBREAK
- def_bool y if PREEMPTTION
+ def_bool y if PREEMPTION
config PGSTE
def_bool y if KVM
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_TEST_LOCKUP=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_LKDTM=m
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
new__ = old__ op (val); \
prev__ = cmpxchg(ptr__, old__, new__); \
} while (prev__ != old__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
new__; \
})
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
: [val__] "d" (val__) \
: "cc"); \
} \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
old__ + val__; \
})
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
p1__ = raw_cpu_ptr(&(pcp1)); \
p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
ret__; \
})
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
- trace_cpu_idle_rcuidle(1, smp_processor_id());
local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
local_irq_restore(flags);
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
/* Account time spent with enabled wait psw loaded as idle time. */
+ /* XXX seqcount has tracepoints that require RCU */
write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
cb->pc == 1 &&
cb->qc == 0 &&
cb->reserved2 == 0 &&
- cb->key == PAGE_DEFAULT_KEY &&
cb->reserved3 == 0 &&
cb->reserved4 == 0 &&
cb->reserved5 == 0 &&
kfree(data);
return -EINVAL;
}
-
+ /*
+ * Override access key in any case, since user space should
+ * not be able to set it, nor should it care about it.
+ */
+ ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable();
if (!target->thread.ri_cb)
target->thread.ri_cb = data;
cb->k = 1;
cb->ps = 1;
cb->pc = 1;
- cb->key = PAGE_DEFAULT_KEY;
+ cb->key = PAGE_DEFAULT_KEY >> 4;
cb->v = 1;
}
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pud)
goto out;
+ p4d_populate(&init_mm, p4d, pud);
}
ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret)
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
+void zpci_remove_device(struct zpci_dev *zdev)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (pdev) {
+ if (pdev->is_virtfn)
+ return zpci_remove_virtfn(pdev, zdev->vfn);
+ pci_stop_and_remove_bus_device_locked(pdev);
+ }
+}
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
- if (zdev->zbus->bus) {
- struct pci_dev *pdev;
-
- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
- if (pdev)
- pci_stop_and_remove_bus_device_locked(pdev);
- }
+ if (zdev->zbus->bus)
+ zpci_remove_device(zdev);
switch (zdev->state) {
case ZPCI_FN_STATE_ONLINE:
{
int rc;
- virtfn->physfn = pci_dev_get(pdev);
rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
- if (rc) {
- pci_dev_put(pdev);
- virtfn->physfn = NULL;
+ if (rc)
return rc;
- }
+
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ virtfn->physfn = pci_dev_get(pdev);
+
return 0;
}
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
- virtfn->is_virtfn = 1;
- virtfn->multifunction = 0;
- WARN_ON(vfid < 0);
+ if (!zbus->multifunction)
+ return 0;
+
/* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
zdev = zbus->function[i];
if (zdev && zdev->is_physfn) {
pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (!pdev)
+ continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
break;
}
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
}
}
return rc;
static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
struct pci_dev *virtfn, int vfn)
{
- virtfn->is_virtfn = 1;
- virtfn->multifunction = 0;
return 0;
}
#endif
+void pcibios_bus_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ /*
+ * With pdev->no_vf_scan the common PCI probing code does not
+ * perform PF/VF linking.
+ */
+ if (zdev->vfn)
+ zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
+
+}
+
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
struct pci_bus *bus;
}
pdev = pci_scan_single_device(bus, zdev->devfn);
- if (pdev) {
- if (!zdev->is_physfn) {
- rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
- if (rc)
- goto failed_with_pdev;
- }
+ if (pdev)
pci_bus_add_device(pdev);
- }
- return 0;
-failed_with_pdev:
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- return rc;
+ return 0;
}
static void zpci_bus_add_devices(struct zpci_bus *zbus)
return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
}
+
+#ifdef CONFIG_PCI_IOV
+static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn)
+{
+
+ pci_lock_rescan_remove();
+ /* Linux' vfid's start at 0 vfn at 1 */
+ pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
+ pci_unlock_rescan_remove();
+}
+#else /* CONFIG_PCI_IOV */
+static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {}
+#endif /* CONFIG_PCI_IOV */
ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
break;
}
+ /* the configuration request may be stale */
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ break;
zdev->fh = ccdf->fh;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
ret = zpci_enable_device(zdev);
if (!zdev)
break;
if (pdev)
- pci_stop_and_remove_bus_device_locked(pdev);
+ zpci_remove_device(zdev);
ret = zpci_disable_device(zdev);
if (ret)
/* Give the driver a hint that the function is
* already unusable. */
pdev->error_state = pci_channel_io_perm_failure;
- pci_stop_and_remove_bus_device_locked(pdev);
+ zpci_remove_device(zdev);
}
zdev->state = ZPCI_FN_STATE_STANDBY;
case EARLY_PLATFORM_ID_ERROR:
pr_warn("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
- /* fall-through */
+ fallthrough;
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
pr_cont("xd%d", rn & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_N:
pr_cont("dr%d", rn);
break;
pr_cont("xd%d", rm & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_M:
pr_cont("dr%d", rm);
break;
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
- /* fallthrough */
+ fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
- /* FALL-THROUGH */
default:
break;
}
else
return 5;
}
- /* Fallthrough */
+ fallthrough;
default:
return 4;
}
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
* counter overflow interrupt so we can't make use of
* their hardware currently.
*/
- /* fallthrough */
+ fallthrough;
default:
err = -ENODEV;
goto out_unregister;
case PROMDEV_TTYB:
skip = 1;
- /* FALLTHRU */
+ fallthrough;
case PROMDEV_TTYA:
type = "serial";
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
- /* fall through */
+ fallthrough;
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
- /* fall through */
+ fallthrough;
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
- /* fall through */
+ fallthrough;
case 1:
rd = (void *)&fregs[freg];
break;
} else {
emit_loadimm(K, r_A);
}
- /* Fallthrough */
+ fallthrough;
case BPF_RET | BPF_A:
if (seen_or_pass0) {
if (i != flen - 1) {
PT_REGS_SYSCALL_RET(regs) = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
PT_REGS_RESTART_SYSCALL(regs);
PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
/* else */
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (c == '=' && !*opptr) {
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr)
}
*size = 0;
}
- /* Fall through */
+ fallthrough;
default:
/*
* If w/o offset, only size specified, memmap=nn[KMG] has the
#define STATIC static
/*
- * Use normal definitions of mem*() from string.c. There are already
- * included header files which expect a definition of memset() and by
- * the time we define memset macro, it is too late.
+ * Provide definitions of memzero and memmove as some of the decompressors will
+ * try to define their own functions if these are not defined as macros.
*/
-#undef memcpy
-#undef memset
#define memzero(s, n) memset((s), 0, (n))
#define memmove memmove
void *memset(void *dst, int c, size_t len);
int memcmp(const void *s1, const void *s2, size_t len);
-/*
- * Access builtin version by default. If one needs to use optimized version,
- * do "undef memcpy" in .c file and link against right string.c
- */
+/* Access builtin version by default. */
#define memcpy(d,s,l) __builtin_memcpy(d,s,l)
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
* Fetch the per-CPU GSBASE value for this processor and put it in @reg.
* We normally use %gs for accessing per-CPU data, but we are setting up
* %gs here and obviously can not use %gs itself to access per-CPU data.
+ *
+ * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
+ * may not restore the host's value until the CPU returns to userspace.
+ * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
+ * while running KVM's run loop.
*/
.macro GET_PERCPU_BASE reg:req
- ALTERNATIVE \
- "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \
- "RDPID \reg", \
- X86_FEATURE_RDPID
+ LOAD_CPU_AND_NODE_SEG_LIMIT \reg
andq $VDSO_CPUNODE_MASK, \reg
movq __per_cpu_offset(, \reg, 8), \reg
.endm
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
{
- unsigned int nr = (unsigned int)regs->orig_ax;
-
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- /*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
- */
- return (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
+ return (unsigned int)regs->orig_ax;
}
/*
{
unsigned int nr = syscall_32_enter(regs);
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
}
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ /*
+ * This cannot use syscall_enter_from_user_mode() as it has to
+ * fetch EBP before invoking any of the syscall entry work
+ * functions.
+ */
+ syscall_enter_from_user_mode_prepare(regs);
+
instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
return false;
}
+ /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
+ nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
SYM_CODE_END(\name)
.endm
-#ifdef CONFIG_TRACE_IRQFLAGS
- THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
- THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
case INTEL_FAM6_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
- /* fall through */
+ fallthrough;
case INTEL_FAM6_CORE2_MEROM_L:
case INTEL_FAM6_CORE2_PENRYN:
case INTEL_FAM6_SKYLAKE_X:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_SKYLAKE_L:
case INTEL_FAM6_SKYLAKE:
case INTEL_FAM6_KABYLAKE_L:
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
pmem = true;
- /* fall through */
+ fallthrough;
case INTEL_FAM6_ICELAKE_L:
case INTEL_FAM6_ICELAKE:
case INTEL_FAM6_TIGERLAKE_L:
ret = X86_BR_ZERO_CALL;
break;
}
- /* fall through */
+ fallthrough;
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
+ INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
+ INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
+
{ /* end: all zeroes */ },
};
#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
+/* BW break down- legacy counters */
+#define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
+#define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
+#define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
+#define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
+#define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
+#define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
+
enum perf_snb_uncore_imc_freerunning_types {
- SNB_PCI_UNCORE_IMC_DATA = 0,
+ SNB_PCI_UNCORE_IMC_DATA_READS = 0,
+ SNB_PCI_UNCORE_IMC_DATA_WRITES,
+ SNB_PCI_UNCORE_IMC_GT_REQUESTS,
+ SNB_PCI_UNCORE_IMC_IA_REQUESTS,
+ SNB_PCI_UNCORE_IMC_IO_REQUESTS,
+
SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
};
static struct freerunning_counters snb_uncore_imc_freerunning[] = {
- [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
+ [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
+ [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
+ 0x0, 0x0, 1, 32 },
};
static struct attribute *snb_uncore_imc_formats_attr[] = {
base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
idx = UNCORE_PMC_IDX_FREERUNNING;
break;
+ case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
+ case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
+ case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
+ base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
+ idx = UNCORE_PMC_IDX_FREERUNNING;
+ break;
default:
return -EINVAL;
}
static struct intel_uncore_type snb_uncore_imc = {
.name = "imc",
- .num_counters = 2,
+ .num_counters = 5,
.num_boxes = 1,
.num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
.mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
kernel_fpu_end(); \
})
-
#define arch_efi_call_virt(p, f, args...) p->f(args)
-#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
-
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
kernel_fpu_end(); \
})
-extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
- u32 type, u64 attribute);
-
#ifdef CONFIG_KASAN
/*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
-extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
extern void __init efi_print_memmap(void);
-extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
-extern void __init old_map_region(efi_memory_desc_t *md);
-extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_update_mappings(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
* state, not the interrupt state as imagined by Xen.
*/
unsigned long flags = native_save_fl();
- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
- X86_EFLAGS_NT));
+ unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
+
+ /*
+ * For !SMAP hardware we patch out CLAC on entry.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMAP) ||
+ (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ mask |= X86_EFLAGS_AC;
+
+ WARN_ON_ONCE(flags & mask);
/* We think we came from user mode. Make sure pt_regs agrees. */
WARN_ON_ONCE(!user_mode(regs));
_ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
}
void leave_mm(int cpu);
+#define leave_mm leave_mm
#endif /* _ASM_X86_MMU_H */
static const unsigned int argument_offs[] = {
#ifdef __i386__
offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, cx),
#define NR_REG_ARGUMENTS 3
#else
offsetof(struct pt_regs, di),
return;
}
- /* fall through */
+ fallthrough;
default:
#ifdef CONFIG_X86_64
return IOAPIC_POL_HIGH;
case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
- /* fall through */
+ fallthrough;
case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
return IOAPIC_EDGE;
case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
- /* fall through */
+ fallthrough;
case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
break;
}
/* P4 and above */
- /* fall through */
+ fallthrough;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
apicd->move_in_progress = true;
apicd->prev_vector = apicd->vector;
apicd->prev_cpu = apicd->cpu;
+ WARN_ON_ONCE(apicd->cpu == newcpu);
} else {
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
managed);
__send_cleanup_vector(apicd);
}
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+void irq_complete_move(struct irq_cfg *cfg)
{
struct apic_chip_data *apicd;
if (likely(!apicd->move_in_progress))
return;
- if (vector == apicd->vector && apicd->cpu == smp_processor_id())
+ /*
+ * If the interrupt arrived on the new target CPU, cleanup the
+ * vector on the old target CPU. A vector check is not required
+ * because an interrupt can never move from one vector to another
+ * on the same CPU.
+ */
+ if (apicd->cpu == smp_processor_id())
__send_cleanup_vector(apicd);
}
-void irq_complete_move(struct irq_cfg *cfg)
-{
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
/*
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
*/
switch (leaf) {
case 1:
l1 = &l1i;
- /* fall through */
+ fallthrough;
case 0:
if (!l1->val)
return;
* calling irq_enter, but the necessary
* machinery isn't exported currently.
*/
- /*FALL THROUGH*/
+ fallthrough;
case MCJ_CTX_PROCESS:
raise_exception(m, NULL);
break;
if (!atomic_sub_return(1, &cmci_storm_on_cpus))
pr_notice("CMCI storm subsided: switching to interrupt mode\n");
- /* FALLTHROUGH */
+ fallthrough;
case CMCI_STORM_SUBSIDED:
/*
case 7:
if (size < 0x40)
break;
- /* Else, fall through */
+ fallthrough;
case 6:
case 5:
case 4:
hw->len = X86_BREAKPOINT_LEN_X;
return 0;
}
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->ip = addr;
- /* fall through */
+ fallthrough;
case 'D':
case 'k':
/* clear the trace bit */
* a system call which should be ignored
*/
return NOTIFY_DONE;
- /* fall through */
+ fallthrough;
default:
if (user_mode(regs))
return NOTIFY_DONE;
case 2:
if (i == 0 || i == 13)
continue; /* IRQ0 & IRQ13 not connected */
- /* fall through */
+ fallthrough;
default:
if (i == 2)
continue; /* IRQ2 is never connected */
default:
pr_err("???\nUnknown standard configuration %d\n",
mpc_default_type);
- /* fall through */
+ fallthrough;
case 1:
case 5:
memcpy(bus.bustype, "ISA ", 6);
*/
void __cpuidle default_idle(void)
{
- trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
- trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */
clflush((void *)¤t_thread_info()->flags);
__sti_mwait(0, 0);
else
local_irq_enable();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
case offsetof(struct user_regs_struct, ss):
if (unlikely(value == 0))
return -EIO;
- /* Else, fall through */
+ fallthrough;
default:
*pt_regs_access(task_pt_regs(task), offset) = value;
case BOOT_CF9_FORCE:
port_cf9_safe = true;
- /* Fall through */
+ fallthrough;
case BOOT_CF9_SAFE:
if (port_cf9_safe) {
regs->ax = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->ax = regs->orig_ax;
regs->ip -= 2;
if (ret)
return ret;
- /*
- * Disable the local APIC. Otherwise IPI broadcasts will reach
- * it. It still responds normally to INIT, NMI, SMI, and SIPI
- * messages.
- */
- apic_soft_disable();
cpu_disable_common();
+ /*
+ * Disable the local APIC. Otherwise IPI broadcasts will reach
+ * it. It still responds normally to INIT, NMI, SMI, and SIPI
+ * messages.
+ *
+ * Disabling the APIC must happen after cpu_disable_common()
+ * which invokes fixup_irqs().
+ *
+ * Disabling the APIC preserves already set bits in IRR, but
+ * an interrupt arriving after disabling the local APIC does not
+ * set the corresponding IRR bit.
+ *
+ * fixup_irqs() scans IRR for set bits so it can raise a not
+ * yet handled interrupt on the new destination CPU via an IPI
+ * but obviously it can't do so for IRR bits which are not set.
+ * IOW, interrupts arriving after disabling the local APIC will
+ * be lost.
+ */
+ apic_soft_disable();
+
return 0;
}
#endif
}
-static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+static __always_inline unsigned long debug_read_clear_dr6(void)
{
- /*
- * Disable breakpoints during exception handling; recursive exceptions
- * are exceedingly 'fun'.
- *
- * Since this function is NOKPROBE, and that also applies to
- * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
- * HW_BREAKPOINT_W on our stack)
- *
- * Entry text is excluded for HW_BP_X and cpu_entry_area, which
- * includes the entry stack is excluded for everything.
- */
- *dr7 = local_db_save();
+ unsigned long dr6;
/*
* The Intel SDM says:
*
* Keep it simple: clear DR6 immediately.
*/
- get_debugreg(*dr6, 6);
+ get_debugreg(dr6, 6);
set_debugreg(0, 6);
/* Filter out all the reserved bits which are preset to 1 */
- *dr6 &= ~DR6_RESERVED;
-}
+ dr6 &= ~DR6_RESERVED;
-static __always_inline void debug_exit(unsigned long dr7)
-{
- local_db_restore(dr7);
+ return dr6;
}
/*
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
+
+ local_db_restore(dr7);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
*/
WARN_ON_ONCE(!user_mode(regs));
+ /*
+ * NB: We can't easily clear DR7 here because
+ * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+ * user memory, etc. This means that a recursive #DB is possible. If
+ * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
+ * Since we're not on the IST stack right now, everything will be
+ * fine.
+ */
+
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_kernel(regs, dr6);
- debug_exit(dr7);
+ exc_debug_kernel(regs, debug_read_clear_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_user(regs, dr6);
- debug_exit(dr7);
+ exc_debug_user(regs, debug_read_clear_dr6());
}
#else
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
+ unsigned long dr6 = debug_read_clear_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);
else
exc_debug_kernel(regs, dr6);
-
- debug_exit(dr7);
}
#endif
* OPCODE1() of the "short" jmp which checks the same condition.
*/
opc1 = OPCODE2(insn) - 0x10;
- /* fall through */
+ fallthrough;
default:
if (!is_cond_jmp_opcode(opc1))
return -ENOSYS;
fix_ip_or_call = 0;
break;
}
- /* fall through */
+ fallthrough;
default:
riprel_analyze(auprobe, &insn);
}
case 0xa4: /* movsb */
case 0xa5: /* movsd/w */
*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
- /* fall through */
+ fallthrough;
case 0xaa: /* stosb */
case 0xab: /* stosd/w */
*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
- /* fall through - maybe userspace knows this conn_id. */
+ fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
switch (ue->u.irqchip.irqchip) {
case KVM_IRQCHIP_PIC_SLAVE:
e->irqchip.pin += PIC_NUM_PINS / 2;
- /* fall through */
+ fallthrough;
case KVM_IRQCHIP_PIC_MASTER:
if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
return -EINVAL;
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
- /* fall through */
+ fallthrough;
case APIC_DM_FIXED:
if (unlikely(trig_mode && !level))
break;
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
- /* fall thru */
+ fallthrough;
default:
val = kvm_lapic_get_reg(apic, offset);
break;
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
- /* fall through */
+ fallthrough;
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
- /* fall through */
+ fallthrough;
case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
- /* Fall through */
+ fallthrough;
default:
return kvm_set_msr_common(vcpu, msr);
}
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return false;
- /* fall through */
+ fallthrough;
case DB_VECTOR:
return !(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
- /* fall through */
+ fallthrough;
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
error_code =
vmcs_read32(IDT_VECTORING_ERROR_CODE);
}
- /* fall through */
+ fallthrough;
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
* keeping track of global entries in shadow page tables.
*/
- /* fall-through */
+ fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
return kvm_skip_emulated_instruction(vcpu);
break;
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
+ fallthrough;
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
break;
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+ X86_CR4_SMEP;
if (kvm_valid_cr4(vcpu, cr4))
return 1;
vcpu->arch.eff_db[dr] = val;
break;
case 4:
- /* fall through */
case 6:
if (!kvm_dr6_valid(val))
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
break;
case 5:
- /* fall through */
default: /* 7 */
if (!kvm_dr7_valid(val))
return -1; /* #GP */
*val = vcpu->arch.db[array_index_nospec(dr, size)];
break;
case 4:
- /* fall through */
case 6:
*val = vcpu->arch.dr6;
break;
case 5:
- /* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
- pr = true; /* fall through */
+ pr = true;
+ fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
if (kvm_pmu_is_valid_msr(vcpu, msr))
case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0])
return -EINVAL;
- /* fall through */
+ fallthrough;
case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm))
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
- /* fall through */
+ fallthrough;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
struct x86_exception fault;
+ u32 access = error_code &
+ (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
- vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
+ vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
CFLAGS_REMOVE_cmdline.o = -pg
endif
-CFLAGS_cmdline.o := -fno-stack-protector
+CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
endif
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
state = st_wordcmp;
opptr = option;
wstart = pos;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if (!*opptr) {
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (!c)
state = st_wordcmp;
opptr = option;
- /* fall through */
+ fallthrough;
case st_wordcmp:
if ((c == '=') && !*opptr) {
break;
}
state = st_wordskip;
- /* fall through */
+ fallthrough;
case st_wordskip:
if (myisspace(c))
if (insn->addr_bytes == 2)
return -EINVAL;
- /* fall through */
+ fallthrough;
case -EDOM:
case offsetof(struct pt_regs, bx):
case INAT_SEG_REG_GS:
return vm86regs->gs;
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
*/
return get_user_gs(regs);
case INAT_SEG_REG_IGNORE:
- /* fall through */
default:
return -EINVAL;
}
*/
return INSN_CODE_SEG_PARAMS(4, 8);
case 3: /* Invalid setting. CS.L=1, CS.D=1 */
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
case TAG_Special:
/* Update tagi for the printk below */
tagi = FPU_Special(r);
- /* fall through */
+ fallthrough;
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
case TW_Denormal:
if (denormal_operand() < 0)
return;
- /* fall through */
+ fallthrough;
case TAG_Zero:
case TAG_Valid:
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
return pmd_k;
}
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ *
+ * This is needed because there is a race condition between the time
+ * when the vmalloc mapping code updates the PMD to the point in time
+ * where it synchronizes this update with the other page-tables in the
+ * system.
+ *
+ * In this race window another thread/CPU can map an area on the same
+ * PMD, finds it already present and does not synchronize it with the
+ * rest of the system yet. As a result v[mz]alloc might return areas
+ * which are not mapped in every page-table in the system, causing an
+ * unhandled page-fault when they are accessed.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ unsigned long pgd_paddr;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3_pa();
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+
+ if (pmd_large(*pmd_k))
+ return 0;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(vmalloc_fault);
+
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
*/
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
+#ifdef CONFIG_X86_32
+ /*
+ * We can fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * Before doing this on-demand faulting, ensure that the
+ * fault is not any of the following:
+ * 1. A fault on a PTE with a reserved bit set.
+ * 2. A fault caused by a user-mode access. (Do not demand-
+ * fault kernel memory due to user-mode accesses).
+ * 3. A fault caused by a page-level protection violation.
+ * (A demand fault would be on a non-present page which
+ * would have X86_PF_PROT==0).
+ *
+ * This is only needed to close a race condition on x86-32 in
+ * the vmalloc mapping/unmapping code. See the comment above
+ * vmalloc_fault() for details. On x86-64 the race does not
+ * exist as the vmalloc mappings don't need to be synchronized
+ * there.
+ */
+ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ }
+#endif
+
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
/* For SEV, these areas are encrypted */
if (sev_active())
break;
- /* Fallthrough */
+ fallthrough;
case E820_TYPE_PRAM:
return true;
u64 addr, u64 max_addr, u64 size)
{
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
- 0, NULL, NUMA_NO_NODE);
+ 0, NULL, 0);
}
static int __init setup_emu2phys_nid(int *dfl_phys_nid)
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
load_new_mm_cr3(next->pgd, new_asid, true);
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
load_new_mm_cr3(next->pgd, new_asid, false);
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
/* Make sure we write CR3 before loaded_mm. */
#include <asm/xen/pci.h>
#include <asm/xen/cpuid.h>
#include <asm/apic.h>
+#include <asm/acpi.h>
#include <asm/i8259.h>
static int xen_pcifront_enable_irq(struct pci_dev *dev)
#include <asm/efi.h>
#include <asm/e820/api.h>
#include <asm/time.h>
-#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/uv/uv.h>
efi_print_memmap();
}
-#if defined(CONFIG_X86_32)
-
-void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
-{
- u64 addr, npages;
-
- addr = md->virt_addr;
- npages = md->num_pages;
-
- memrange_efi_to_native(&addr, &npages);
-
- if (executable)
- set_memory_x(addr, npages);
- else
- set_memory_nx(addr, npages);
-}
-
-void __init runtime_code_page_mkexec(void)
-{
- efi_memory_desc_t *md;
-
- /* Make EFI runtime service code area executable */
- for_each_efi_memory_desc(md) {
- if (md->type != EFI_RUNTIME_SERVICES_CODE)
- continue;
-
- efi_set_executable(md, true);
- }
-}
-
-void __init efi_memory_uc(u64 addr, unsigned long size)
-{
- unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
- u64 npages;
-
- npages = round_up(size, page_shift) / page_shift;
- memrange_efi_to_native(&addr, &npages);
- set_memory_uc(addr, npages);
-}
-
-void __init old_map_region(efi_memory_desc_t *md)
-{
- u64 start_pfn, end_pfn, end;
- unsigned long size;
- void *va;
-
- start_pfn = PFN_DOWN(md->phys_addr);
- size = md->num_pages << PAGE_SHIFT;
- end = md->phys_addr + size;
- end_pfn = PFN_UP(end);
-
- if (pfn_range_is_mapped(start_pfn, end_pfn)) {
- va = __va(md->phys_addr);
-
- if (!(md->attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)va, size);
- } else
- va = efi_ioremap(md->phys_addr, size,
- md->type, md->attribute);
-
- md->virt_addr = (u64) (unsigned long) va;
- if (!va)
- pr_err("ioremap of 0x%llX failed!\n",
- (unsigned long long)md->phys_addr);
-}
-
-#endif
-
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
+#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ u64 start_pfn, end_pfn, end;
+ unsigned long size;
+ void *va;
+
+ start_pfn = PFN_DOWN(md->phys_addr);
+ size = md->num_pages << PAGE_SHIFT;
+ end = md->phys_addr + size;
+ end_pfn = PFN_UP(end);
+
+ if (pfn_range_is_mapped(start_pfn, end_pfn)) {
+ va = __va(md->phys_addr);
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ set_memory_uc((unsigned long)va, md->num_pages);
+ } else {
+ va = ioremap_cache(md->phys_addr, size);
+ }
+
+ md->virt_addr = (unsigned long)va;
+ if (!va)
+ pr_err("ioremap of 0x%llX failed!\n", md->phys_addr);
+}
+
/*
* To make EFI call EFI runtime service in physical addressing mode we need
* prolog/epilog before/after the invocation to claim the EFI runtime service
return 0;
}
-void __init efi_map_region(efi_memory_desc_t *md)
-{
- old_map_region(md);
-}
-
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
void __init efi_runtime_update_mappings(void)
{
- if (__supported_pte_mask & _PAGE_NX)
- runtime_code_page_mkexec();
+ if (__supported_pte_mask & _PAGE_NX) {
+ efi_memory_desc_t *md;
+
+ /* Make EFI runtime service code area executable */
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_RUNTIME_SERVICES_CODE)
+ continue;
+
+ set_memory_x(md->virt_addr, md->num_pages);
+ }
+ }
}
npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
rodata = __pa(__start_rodata);
pfn = rodata >> PAGE_SHIFT;
+
+ pf = _PAGE_NX | _PAGE_ENC;
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
pr_err("Failed to map kernel rodata 1:1\n");
return 1;
regs->areg[2] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;
case 3:
if (newline != '\n')
return -EINVAL;
- /* fall through */
+ fallthrough;
case 2:
if (length <= 0)
return -EINVAL;
kfree(bfqg);
}
-void bfqg_and_blkg_get(struct bfq_group *bfqg)
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
{
/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
bfqg_get(bfqg);
pr_err("bdi %s: bfq: bad prio class %d\n",
bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
ioprio_class);
- /* fall through */
+ fallthrough;
case IOPRIO_CLASS_NONE:
/*
* No prio set, inherit CPU scheduling settings.
return &bfqg->async_bfqq[0][ioprio];
case IOPRIO_CLASS_NONE:
ioprio = IOPRIO_NORM;
- /* fall through */
+ fallthrough;
case IOPRIO_CLASS_BE:
return &bfqg->async_bfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
-void bfqg_and_blkg_get(struct bfq_group *bfqg);
void bfqg_and_blkg_put(struct bfq_group *bfqg);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
bfqq->ref++;
bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
bfqq, bfqq->ref);
- } else
- bfqg_and_blkg_get(container_of(entity, struct bfq_group,
- entity));
+ }
}
/**
entity->on_st_or_in_serv = false;
st->wsum -= entity->weight;
- if (is_in_service)
- return;
-
- if (bfqq)
+ if (bfqq && !is_in_service)
bfq_put_queue(bfqq);
- else
- bfqg_and_blkg_put(container_of(entity, struct bfq_group,
- entity));
}
/**
struct page *page, unsigned int len, unsigned int off,
bool *same_page)
{
- phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
- bv->bv_offset + bv->bv_len - 1;
+ size_t bv_end = bv->bv_offset + bv->bv_len;
+ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
phys_addr_t page_addr = page_to_phys(page);
if (vec_end_addr + 1 != page_addr + off)
return false;
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
- if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
- return false;
- return true;
+ if (*same_page)
+ return true;
+ return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
}
/*
if (preloaded)
radix_tree_preload_end();
- ret = blk_iolatency_init(q);
+ ret = blk_throtl_init(q);
if (ret)
goto err_destroy_all;
- ret = blk_throtl_init(q);
- if (ret)
+ ret = blk_iolatency_init(q);
+ if (ret) {
+ blk_throtl_exit(q);
goto err_destroy_all;
+ }
return 0;
err_destroy_all:
goto fail_stats;
q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
+ q->backing_dev_info->io_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->node = node_id;
{
struct ioc_gq *iocg = pd_to_iocg(pd);
struct ioc *ioc = iocg->ioc;
+ unsigned long flags;
if (ioc) {
- spin_lock(&ioc->lock);
+ spin_lock_irqsave(&ioc->lock, flags);
if (!list_empty(&iocg->active_list)) {
propagate_active_weight(iocg, 0, 0);
list_del_init(&iocg->active_list);
}
- spin_unlock(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
hrtimer_cancel(&iocg->waitq_timer);
hrtimer_cancel(&iocg->delay_timer);
if (max_sectors > start_offset)
return max_sectors - start_offset;
- return sectors & (lbs - 1);
+ return sectors & ~(lbs - 1);
}
static inline unsigned get_max_segment_size(const struct request_queue *q,
}
EXPORT_SYMBOL(__blk_rq_map_sg);
+static inline unsigned int blk_rq_get_max_segments(struct request *rq)
+{
+ if (req_op(rq) == REQ_OP_DISCARD)
+ return queue_max_discard_segments(rq->q);
+ return queue_max_segments(rq->q);
+}
+
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs)
{
- if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
+ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
goto no_merge;
if (blk_integrity_merge_bio(req->q, req, bio) == false)
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
- if (total_phys_segments > queue_max_segments(q))
+ if (total_phys_segments > blk_rq_get_max_segments(req))
return 0;
if (blk_integrity_merge_rq(q, req, next) == false)
return;
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ /*
+ * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
+ * in blk_mq_run_hw_queue(). Its pair is the barrier in
+ * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
+ * meantime new request added to hctx->dispatch is missed to check in
+ * blk_mq_run_hw_queue().
+ */
+ smp_mb();
+
blk_mq_run_hw_queue(hctx, true);
}
spin_unlock(&hctx->lock);
/*
+ * Order adding requests to hctx->dispatch and checking
+ * SCHED_RESTART flag. The pair of this smp_mb() is the one
+ * in blk_mq_sched_restart(). Avoid restart code path to
+ * miss the new added requests to hctx->dispatch, meantime
+ * SCHED_RESTART is observed here.
+ */
+ smp_mb();
+
+ /*
* If SCHED_RESTART was set by the caller of this function and
* it is no longer set that means that it was cleared by another
* thread and hence that a queue rerun is needed.
/**
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
* @rq: Pointer to request to be inserted.
+ * @at_head: true if the request should be inserted at the head of the list.
* @run_queue: If we should run the hardware queue after inserting the request.
*
* Should only be used carefully, when the caller knows we want to
if (bypass_insert)
return BLK_STS_RESOURCE;
- blk_mq_request_bypass_insert(rq, false, run_queue);
+ blk_mq_sched_insert_request(rq, false, run_queue, false);
+
return BLK_STS_OK;
}
struct blk_stat_callback *cb)
{
unsigned int bucket;
+ unsigned long flags;
int cpu;
for_each_possible_cpu(cpu) {
blk_rq_stat_init(&cpu_stat[bucket]);
}
- spin_lock(&q->stats->lock);
+ spin_lock_irqsave(&q->stats->lock, flags);
list_add_tail_rcu(&cb->list, &q->stats->callbacks);
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
- spin_unlock(&q->stats->lock);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
}
void blk_stat_remove_callback(struct request_queue *q,
struct blk_stat_callback *cb)
{
- spin_lock(&q->stats->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->stats->lock, flags);
list_del_rcu(&cb->list);
if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
- spin_unlock(&q->stats->lock);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
del_timer_sync(&cb->timer);
}
void blk_stat_enable_accounting(struct request_queue *q)
{
- spin_lock(&q->stats->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->stats->lock, flags);
q->stats->enable_accounting = true;
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
- spin_unlock(&q->stats->lock);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
}
EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
(REQ_SYNC | REQ_IDLE))
return false;
- /* fallthrough */
+ fallthrough;
case REQ_OP_DISCARD:
return true;
default:
bset->timeout_fn = timeout;
set = &bset->tag_set;
- set->ops = &bsg_mq_ops,
+ set->ops = &bsg_mq_ops;
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
case IOPRIO_CLASS_RT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
/* rt has prio field too */
case IOPRIO_CLASS_BE:
if (data >= IOPRIO_BE_NR || data < 0)
{
struct hd_struct *part =
container_of(to_rcu_work(work), struct hd_struct, rcu_work);
+ struct gendisk *disk = part_to_disk(part);
+
+ /*
+ * Release the disk reference acquired in delete_partition here.
+ * We can't release it in hd_struct_free because the final put_device
+ * needs process context and thus can't be run directly from a
+ * percpu_ref ->release handler.
+ */
+ put_device(disk_to_dev(disk));
part->start_sect = 0;
part->nr_sects = 0;
rcu_dereference_protected(disk->part_tbl, 1);
rcu_assign_pointer(ptbl->last_lookup, NULL);
- put_device(disk_to_dev(disk));
INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work);
queue_rcu_work(system_wq, &part->rcu_work);
int bdev_del_partition(struct block_device *bdev, int partno)
{
struct block_device *bdevp;
- struct hd_struct *part;
- int ret = 0;
+ struct hd_struct *part = NULL;
+ int ret;
- part = disk_get_part(bdev->bd_disk, partno);
- if (!part)
- return -ENXIO;
-
- ret = -ENOMEM;
- bdevp = bdget(part_devt(part));
+ bdevp = bdget_disk(bdev->bd_disk, partno);
if (!bdevp)
- goto out_put_part;
+ return -ENOMEM;
mutex_lock(&bdevp->bd_mutex);
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+
+ ret = -ENXIO;
+ part = disk_get_part(bdev->bd_disk, partno);
+ if (!part)
+ goto out_unlock;
ret = -EBUSY;
if (bdevp->bd_openers)
sync_blockdev(bdevp);
invalidate_bdev(bdevp);
- mutex_lock_nested(&bdev->bd_mutex, 1);
delete_partition(bdev->bd_disk, part);
- mutex_unlock(&bdev->bd_mutex);
-
ret = 0;
out_unlock:
+ mutex_unlock(&bdev->bd_mutex);
mutex_unlock(&bdevp->bd_mutex);
bdput(bdevp);
-out_put_part:
- disk_put_part(part);
+ if (part)
+ disk_put_part(part);
return ret;
}
#include <linux/module.h>
#include <linux/net.h>
#include <linux/rwsem.h>
+#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/security.h>
}
lock_sock(sk);
- if (ctx->init && (init || !ctx->more)) {
- err = -EINVAL;
- goto unlock;
+ if (ctx->init && !ctx->more) {
+ if (ctx->used) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ pr_info_once(
+ "%s sent an empty control message without MSG_MORE.\n",
+ current->comm);
}
ctx->init = true;
case -EALREADY:
err = 0;
- /* fall through */
+ fallthrough;
default:
drbg->random_ready.func = NULL;
test_hash_speed(alg, sec, generic_hash_speed_template);
break;
}
- /* fall through */
+ fallthrough;
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 310:
test_hash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 311:
test_hash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 312:
test_hash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 320:
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 323:
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 324:
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 325:
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 326:
test_hash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 327:
test_hash_speed("streebog256", sec,
generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 328:
test_hash_speed("streebog512", sec,
generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
- /* fall through */
+ fallthrough;
case 399:
break;
test_ahash_speed(alg, sec, generic_hash_speed_template);
break;
}
- /* fall through */
+ fallthrough;
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 419:
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 420:
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 421:
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 422:
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 423:
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 424:
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 425:
test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 426:
test_mb_ahash_speed("streebog256", sec,
generic_hash_speed_template, num_mb);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 427:
test_mb_ahash_speed("streebog512", sec,
generic_hash_speed_template, num_mb);
if (mode > 400 && mode < 500) break;
- /* fall through */
+ fallthrough;
case 499:
break;
break;
case '\t':
c = ' ';
- /* Fallthrough */
+ fallthrough;
default:
if (c < 32)
/* Ignore other control sequences */
one of the listed synthesizers, you should say n.
if SPEAKUP
+
+config SPEAKUP_SERIALIO
+ def_bool y
+ depends on ISA || COMPILE_TEST
+
config SPEAKUP_SYNTH_ACNTSA
tristate "Accent SA synthesizer support"
help
config SPEAKUP_SYNTH_ACNTPC
tristate "Accent PC synthesizer support"
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
help
This is the Speakup driver for the accent pc
synthesizer. You can say y to build it into the kernel,
config SPEAKUP_SYNTH_DECPC
depends on m
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
tristate "DECtalk PC (big ISA card) synthesizer support"
help
config SPEAKUP_SYNTH_DTLK
tristate "DoubleTalk PC synthesizer support"
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
help
This is the Speakup driver for the internal DoubleTalk
config SPEAKUP_SYNTH_KEYPC
tristate "Keynote Gold PC synthesizer support"
- depends on ISA || COMPILE_TEST
+ depends on SPEAKUP_SERIALIO
help
This is the Speakup driver for the Keynote Gold
keyhelp.o \
kobjects.o \
selection.o \
- serialio.o \
spk_ttyio.o \
synth.o \
thread.o \
varhandlers.o
+speakup-$(CONFIG_SPEAKUP_SERIALIO) += serialio.o
static unsigned char spk_serial_in(void);
static unsigned char spk_serial_in_nowait(void);
static void spk_serial_flush_buffer(void);
+static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_serial_io_ops = {
.synth_out = spk_serial_out,
.synth_in = spk_serial_in,
.synth_in_nowait = spk_serial_in_nowait,
.flush_buffer = spk_serial_flush_buffer,
+ .wait_for_xmitr = spk_serial_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_serial_io_ops);
}
EXPORT_SYMBOL_GPL(spk_stop_serial_interrupt);
-int spk_wait_for_xmitr(struct spk_synth *in_synth)
+static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth)
{
int tmout = SPK_XMITR_TIMEOUT;
static int spk_serial_out(struct spk_synth *in_synth, const char ch)
{
- if (in_synth->alive && spk_wait_for_xmitr(in_synth)) {
+ if (in_synth->alive && spk_serial_wait_for_xmitr(in_synth)) {
outb_p(ch, speakup_info.port_tts);
return 1;
}
while ((ch = *buff)) {
if (ch == '\n')
ch = synth->procspeech;
- if (spk_wait_for_xmitr(synth))
+ if (spk_serial_wait_for_xmitr(synth))
outb(ch, speakup_info.port_tts);
else
return buff;
const struct old_serial_port *spk_serial_init(int index);
void spk_stop_serial_interrupt(void);
-int spk_wait_for_xmitr(struct spk_synth *in_synth);
void spk_serial_release(void);
void spk_ttyio_release(void);
void spk_ttyio_register_ldisc(void);
static unsigned char spk_ttyio_in(void);
static unsigned char spk_ttyio_in_nowait(void);
static void spk_ttyio_flush_buffer(void);
+static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_ttyio_ops = {
.synth_out = spk_ttyio_out,
.synth_in = spk_ttyio_in,
.synth_in_nowait = spk_ttyio_in_nowait,
.flush_buffer = spk_ttyio_flush_buffer,
+ .wait_for_xmitr = spk_ttyio_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_ttyio_ops);
mutex_unlock(&speakup_tty_mutex);
}
+static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
+{
+ return 1;
+}
+
static unsigned char ttyio_in(int timeout)
{
struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
unsigned char (*synth_in)(void);
unsigned char (*synth_in_nowait)(void);
void (*flush_buffer)(void);
+ int (*wait_for_xmitr)(struct spk_synth *synth);
};
struct spk_synth {
{
if (synth->alive)
return 1;
- if (spk_wait_for_xmitr(synth) > 0) {
+ if (synth->io_ops->wait_for_xmitr(synth) > 0) {
/* restart */
synth->alive = 1;
synth_printf("%s", synth->init);
if (ret < 0)
return -ENOENT;
- acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj);
- clk_data->is_rv = obj->integer.value;
+ if (!acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj))
+ clk_data->is_rv = obj->integer.value;
list_for_each_entry(rentry, &resource_list, node) {
clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = acpi_map(pg_off, pg_sz);
+ virt = acpi_map(phys, size);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
}
INIT_LIST_HEAD(&map->list);
- map->virt = virt;
+ map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
map->phys = pg_off;
map->size = pg_sz;
map->track.refcount = 1;
acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
u32 level)
{
+ acpi_status status;
+
if (!(res->flags & IORESOURCE_MEM))
return AE_TYPE;
- return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
- acpi_deactivate_mem_region, NULL, res, NULL);
+ status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+ acpi_deactivate_mem_region, NULL,
+ res, NULL);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ /*
+ * Wait for all of the mappings queued up for removal by
+ * acpi_deactivate_mem_region() to actually go away.
+ */
+ synchronize_rcu();
+ rcu_barrier();
+ flush_scheduled_work();
+
+ return AE_OK;
}
EXPORT_SYMBOL_GPL(acpi_release_memory);
(sstatus & 0xf) != 1)
break;
- ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
- port);
+ ata_link_info(link, "avn bounce port%d\n", port);
pci_read_config_word(pdev, 0x92, &val);
val &= ~(1 << port);
switch (priv->version) {
case BRCM_SATA_BCM7425:
hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
- /* fall through */
+ fallthrough;
case BRCM_SATA_NSP:
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
node);
break;
}
- /* fall through */
+ fallthrough;
case -ENODEV:
/* continue normally */
hpriv->phys[port] = NULL;
case ATA_LITER_PMP_FIRST:
if (sata_pmp_attached(ap))
return ap->pmp_link;
- /* fall through */
+ fallthrough;
case ATA_LITER_HOST_FIRST:
return &ap->link;
}
case ATA_LITER_HOST_FIRST:
if (sata_pmp_attached(ap))
return ap->pmp_link;
- /* fall through */
+ fallthrough;
case ATA_LITER_PMP_FIRST:
if (unlikely(ap->slave_link))
return ap->slave_link;
- /* fall through */
+ fallthrough;
case ATA_LITER_EDGE:
return NULL;
}
case ATA_12:
if (atapi_passthru16)
return ATAPI_PASS_THRU;
- /* fall thru */
+ fallthrough;
default:
return ATAPI_MISC;
}
switch (class) {
case ATA_DEV_SEMB:
class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
- /* fall through */
+ fallthrough;
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
tf.command = ATA_CMD_ID_ATA;
case -ENODEV:
/* give it just one more chance */
tries[dev->devno] = min(tries[dev->devno], 1);
- /* fall through */
+ fallthrough;
case -EIO:
if (tries[dev->devno] == 1) {
/* This is the last chance, better to slow
case ATA_DNXFER_FORCE_PIO0:
pio_mask &= 1;
- /* fall through */
+ fallthrough;
case ATA_DNXFER_FORCE_PIO:
mwdma_mask = 0;
udma_mask = 0;
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
- /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
- SD7SN6S256G and SD8SN8U256G */
- { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
+ /* Sandisk SD7/8/9s lock up hard on large trims */
+ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
qc->tf.feature != SETFEATURES_RA_ON &&
qc->tf.feature != SETFEATURES_RA_OFF)
break;
- /* fall through */
+ fallthrough;
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
case ATA_CMD_SET_MULTI: /* multi_count changed */
/* revalidate device */
case ATA_DEV_ZAC:
if (stat & ATA_SENSE)
ata_eh_request_sense(qc, qc->scsicmd);
- /* fall through */
+ fallthrough;
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
case -ENODEV:
/* device missing or wrong IDENTIFY data, schedule probing */
ehc->i.probe_mask |= (1 << dev->devno);
- /* fall through */
+ fallthrough;
case -EINVAL:
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
- /* fall through */
+ fallthrough;
case -EIO:
if (ehc->tries[dev->devno] == 1) {
/* This is the last chance, better to slow
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
{
+ struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
- put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
+ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
+
+ if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+ max_blocks = 128 << (20 - SECTOR_SHIFT);
+
+ put_unaligned_be64(max_blocks, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
break;
}
- /* Fallthrough */
+ fallthrough;
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
* turning this into a no-op.
*/
case SYNCHRONIZE_CACHE:
- /* fall through */
+ fallthrough;
/* no-op's, complete with success */
case REZERO_UNIT:
default:
printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
- /* fall through */
+ fallthrough;
case 9 ... 12:
clocks = 7; /* 12 clk */
break;
default:
printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
- /* fall through */
+ fallthrough;
case 12: /* default 12 clk */
clocks = 0;
break;
break;
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
ata_pci_bmdma_clear_simplex(pdev);
- /* fall through */
+ fallthrough;
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
rc = serverworks_fixup_csb(pdev);
break;
case ATA_CMD_WRITE_MULTI_FUA_EXT:
tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
- /* fall through */
+ fallthrough;
case ATA_CMD_WRITE_MULTI_EXT:
tf->command = ATA_CMD_PIO_WRITE_EXT;
break;
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
return AC_ERR_OK;
- /* fall-thru */
+ fallthrough;
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
- /* fall through */
+ fallthrough;
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_FIRST;
break;
return AC_ERR_OTHER;
break; /* use bmdma for this */
}
- /* fall thru */
+ fallthrough;
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
- /* fall through */
+ fallthrough;
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
- /* fall through */
+ fallthrough;
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
- /*FALLTHROUGH*/
+ fallthrough;
case ATA_PROT_NODATA:
i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
break;
case ATAPI_PROT_DMA:
pdc_fill_sg(qc);
- /*FALLTHROUGH*/
+ fallthrough;
case ATAPI_PROT_NODATA:
pdc_atapi_pkt(qc);
break;
case ATAPI_PROT_NODATA:
if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
break;
- /*FALLTHROUGH*/
+ fallthrough;
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
- /*FALLTHROUGH*/
+ fallthrough;
case ATAPI_PROT_DMA:
case ATA_PROT_DMA:
pdc_packet_start(qc);
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
- /*FALLTHROUGH*/
+ fallthrough;
case ATA_PROT_DMA:
pdc20621_packet_start(qc);
return 0;
switch (STATUS_CODE (qe)) {
case 0x01: /* This is for AAL0 where we put the chip in streaming mode */
- /* Fall through */
+ fallthrough;
case 0x02:
/* Process a real txdone entry. */
tmp = qe->p0;
error = make_rate (pcr, r, &tmc0, NULL);
if (error) {
kfree(tc);
+ kfree(vcc);
return error;
}
}
case FORE200E_STATE_COMPLETE:
kfree(fore200e->stats);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_IRQ:
free_irq(fore200e->irq, fore200e->atm_dev);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_ALLOC_BUF:
fore200e_free_rx_buf(fore200e);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_BSQ:
fore200e_uninit_bs_queue(fore200e);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_RXQ:
fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_TXQ:
fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INIT_CMDQ:
fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_INITIALIZE:
/* nothing to do for that state */
case FORE200E_STATE_MAP:
fore200e->bus->unmap(fore200e);
- /* fall through */
+ fallthrough;
case FORE200E_STATE_CONFIGURE:
/* nothing to do for that state */
switch (type) {
case ITYPE_RBRQ_THRESH:
HPRINTK("rbrq%d threshold\n", group);
- /* fall through */
+ fallthrough;
case ITYPE_RBRQ_TIMER:
if (he_service_rbrq(he_dev, group))
he_service_rbpl(he_dev, group);
break;
case ITYPE_TBRQ_THRESH:
HPRINTK("tbrq%d threshold\n", group);
- /* fall through */
+ fallthrough;
case ITYPE_TPD_COMPLETE:
he_service_tbrq(he_dev, group);
break;
switch (cmd) {
case IDT77105_GETSTATZ:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- /* fall through */
+ fallthrough;
case IDT77105_GETSTAT:
return fetch_stats(dev, arg, cmd == IDT77105_GETSTATZ);
case ATM_SETLOOP:
switch (*vpip) {
case ATM_VPI_ANY:
*vpip = 0;
- /* FALLTHROUGH */
+ fallthrough;
case 0:
break;
default:
switch (cmd) {
case ZATM_GETPOOLZ:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- /* fall through */
+ fallthrough;
case ZATM_GETPOOL:
{
struct zatm_pool_info info;
/*
* Driver for the on-board character LCD found on some ARM reference boards
* This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
- * http://en.wikipedia.org/wiki/HD44780_Character_LCD
+ * https://en.wikipedia.org/wiki/HD44780_Character_LCD
* Currently it will just display the text "ARM Linux" and the linux version
*
* Author: Linus Walleij <triad@df.lth.se>
break;
input->rise_timer = 0;
input->state = INPUT_ST_RISING;
- /* fall through */
+ fallthrough;
case INPUT_ST_RISING:
if ((phys_curr & input->mask) != input->value) {
input->state = INPUT_ST_LOW;
}
input->high_timer = 0;
input->state = INPUT_ST_HIGH;
- /* fall through */
+ fallthrough;
case INPUT_ST_HIGH:
if (input_state_high(input))
break;
- /* fall through */
+ fallthrough;
case INPUT_ST_FALLING:
input_state_falling(input);
}
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
- if (fwnode) {
- struct fwnode_handle *fn = dev->fwnode;
+ struct fwnode_handle *fn = dev->fwnode;
+ if (fwnode) {
if (fwnode_is_primary(fn))
fn = fn->secondary;
}
dev->fwnode = fwnode;
} else {
- dev->fwnode = fwnode_is_primary(dev->fwnode) ?
- dev->fwnode->secondary : NULL;
+ if (fwnode_is_primary(fn)) {
+ dev->fwnode = fn->secondary;
+ fn->secondary = NULL;
+ } else {
+ dev->fwnode = NULL;
+ }
}
}
EXPORT_SYMBOL_GPL(set_primary_fwnode);
}
break;
}
- /* fallthrough */
+ fallthrough;
default:
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- /* fallthrough */
+ fallthrough;
case -1:
fw_load_abort(fw_sysfs);
break;
}
/*
- * If a device configured to wake up the system from sleep states
- * has been suspended at run time and there's a resume request pending
- * for it, this is equivalent to the device signaling wakeup, so the
- * system suspend operation should be aborted.
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
*/
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
+ pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
- /* fall through */
+ fallthrough;
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
- /* Fall through */
+ fallthrough;
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
+ fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
/* Remember whether we saw a READ or WRITE error.
thi->t_state = RESTARTING;
drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
- /* fall through */
+ fallthrough;
case RUNNING:
case RESTARTING:
default:
if (nla_put_u32(skb, T_helper_exit_code,
sib->helper_exit_code))
goto nla_put_failure;
- /* fall through */
+ fallthrough;
case SIB_HELPER_PRE:
if (nla_put_string(skb, T_helper, sib->helper_name))
goto nla_put_failure;
break;
else
drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
- /* Fall through */
+ fallthrough;
case WO_BDEV_FLUSH:
case WO_DRAIN_IO:
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
- /* fall through */
+ fallthrough;
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
rv = 1;
break;
}
- /* Else fall through - to one of the other strategies... */
+ fallthrough; /* to one of the other strategies */
case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
- /* fall through */
+ fallthrough;
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
- /* else, fall through */
+ fallthrough;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(device, "pri-lost");
- /* fall through */
+ fallthrough;
case ASB_DISCONNECT:
drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
return C_MASK;
drbd_set_out_of_sync(device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
- /* fall through. */
+ fallthrough;
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
} /* else: FIXME can this happen? */
break;
}
- /* else, fall through - to BARRIER_ACKED */
+ fallthrough; /* to BARRIER_ACKED */
case BARRIER_ACKED:
/* barrier ack for READ requests does not make sense */
clear_bit(FD_DISK_NEWCHANGE_BIT,
&drive_state[current_drive].flags);
drive_state[current_drive].select_date = jiffies;
- /* fall through */
+ fallthrough;
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
- /* fall through */
+ fallthrough;
case FDGETDRVSTAT:
outparam = &drive_state[drive];
break;
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
+ u32 granularity, max_discard_sectors;
/*
* If the backing device is a block device, mirror its zeroing
struct request_queue *backingq;
backingq = bdev_get_queue(inode->i_bdev);
- blk_queue_max_discard_sectors(q,
- backingq->limits.max_write_zeroes_sectors);
- blk_queue_max_write_zeroes_sectors(q,
- backingq->limits.max_write_zeroes_sectors);
+ max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
+ granularity = backingq->limits.discard_granularity ?:
+ queue_physical_block_size(backingq);
/*
* We use punch hole to reclaim the free space used by the
* useful information.
*/
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
- q->limits.discard_granularity = 0;
- q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, 0);
- blk_queue_max_write_zeroes_sectors(q, 0);
+ max_discard_sectors = 0;
+ granularity = 0;
} else {
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = 0;
-
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+ max_discard_sectors = UINT_MAX >> 9;
+ granularity = inode->i_sb->s_blocksize;
}
- if (q->limits.max_write_zeroes_sectors)
+ if (max_discard_sectors) {
+ q->limits.discard_granularity = granularity;
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- else
+ } else {
+ q->limits.discard_granularity = 0;
+ blk_queue_max_discard_sectors(q, 0);
+ blk_queue_max_write_zeroes_sectors(q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
+ }
+ q->limits.discard_alignment = 0;
}
static void loop_unprepare_queue(struct loop_device *lo)
mapping = file->f_mapping;
inode = mapping->host;
- size = get_loop_size(lo, file);
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
+
+ size = get_loop_size(lo, file);
loop_set_size(lo, size);
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
case LOOP_SET_BLOCK_SIZE:
if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Fall through */
+ fallthrough;
default:
err = lo_simple_ioctl(lo, cmd, arg);
break;
case LOOP_SET_STATUS64:
case LOOP_CONFIGURE:
arg = (unsigned long) compat_ptr(arg);
- /* fall through */
+ fallthrough;
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
nbd->tag_set.timeout = timeout * HZ;
if (timeout)
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
+ else
+ blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
}
/* Must be called with config_lock held */
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
- req_op(rq) & REQ_FUA);
+ rq->cmd_flags & REQ_FUA);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
- /* fall through */
+ fallthrough;
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
if (stop)
return;
}
- /* fall through */
+ fallthrough;
case Hold:
schedule_fsm();
return;
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
- /* fall through */
+ fallthrough;
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
case __RBD_OBJ_COPYUP_OBJECT_MAPS:
if (!pending_result_dec(&obj_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_COPYUP_OBJECT_MAPS:
if (*result) {
rbd_warn(rbd_dev, "snap object map update failed: %d",
case __RBD_OBJ_COPYUP_WRITE_OBJECT:
if (!pending_result_dec(&obj_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_COPYUP_WRITE_OBJECT:
return true;
default:
case __RBD_OBJ_WRITE_COPYUP:
if (!rbd_obj_advance_copyup(obj_req, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_WRITE_COPYUP:
if (*result) {
rbd_warn(rbd_dev, "copyup failed: %d", *result);
case __RBD_IMG_OBJECT_REQUESTS:
if (!pending_result_dec(&img_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_IMG_OBJECT_REQUESTS:
return true;
default:
/* Generate bio with pages pointing to the rdma buffer */
bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
if (IS_ERR(bio)) {
- rnbd_srv_err(sess_dev, "Failed to generate bio, err: %ld\n", PTR_ERR(bio));
+ err = PTR_ERR(bio);
+ rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
goto sess_dev_put;
}
* Fall through so the DMA devices can be attached and
* the user can attempt to pull off their data.
*/
- /* fall through */
+ fallthrough;
case CARD_STATE_GOOD:
st = rsxx_get_card_size8(card, &card->size8);
if (st)
blk_mq_requeue_request(req, true);
break;
}
- /* fall through */
+ fallthrough;
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
if (!range)
return -ENOMEM;
- __rq_for_each_bio(bio, req) {
- u64 sector = bio->bi_iter.bi_sector;
- u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-
- range[n].flags = cpu_to_le32(flags);
- range[n].num_sectors = cpu_to_le32(num_sectors);
- range[n].sector = cpu_to_le64(sector);
- n++;
+ /*
+ * Single max discard segment means multi-range discard isn't
+ * supported, and block layer only runs contiguity merge like
+ * normal RW request. So we can't reply on bio for retrieving
+ * each range info.
+ */
+ if (queue_max_discard_segments(req->q) == 1) {
+ range[0].flags = cpu_to_le32(flags);
+ range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
+ range[0].sector = cpu_to_le64(blk_rq_pos(req));
+ n = 1;
+ } else {
+ __rq_for_each_bio(bio, req) {
+ u64 sector = bio->bi_iter.bi_sector;
+ u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
+ range[n].flags = cpu_to_le32(flags);
+ range[n].num_sectors = cpu_to_le32(num_sectors);
+ range[n].sector = cpu_to_le64(sector);
+ n++;
+ }
}
+ WARN_ON_ONCE(n != segments);
+
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
req->special_vec.bv_len = sizeof(*range) * segments;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
- /* fall through */
+ fallthrough;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through */
+ fallthrough;
/* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
case BLKIF_RSP_EOPNOTSUPP:
return REQ_EOPNOTSUPP;
case BLKIF_RSP_ERROR:
- /* Fallthrough. */
default:
return REQ_ERROR;
}
info->feature_flush = 0;
xlvbd_flush(info);
}
- /* fall through */
+ fallthrough;
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
if (info)
blkfront_closing(info);
switch (ddata->nr_clocks) {
case 2:
ick = ddata->clocks[SYSC_ICK];
- /* fallthrough */
+ fallthrough;
case 1:
fck = ddata->clocks[SYSC_FCK];
break;
default:
break;
}
- /*FALLTHROUGH*/
+ fallthrough;
default:
bridge->driver = &ali_generic_bridge;
}
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
pr_err("%s: Failed to map RNG registers\n", __func__);
- ret = PTR_ERR(priv->base);
- goto err_free_rng;
+ return PTR_ERR(priv->base);
}
priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
ret = hwrng_register(&priv->rng);
if (ret) {
dev_err(&pdev->dev, "Failed to register hwrng\n");
- goto err_free_rng;
+ return ret;
}
platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "Ingenic RNG driver registered\n");
return 0;
-
-err_free_rng:
- kfree(priv);
- return ret;
}
static int ingenic_rng_remove(struct platform_device *pdev)
switch (kcs_bmc->phase) {
case KCS_PHASE_WRITE_START:
kcs_bmc->phase = KCS_PHASE_WRITE_DATA;
- /* fall through */
+ fallthrough;
case KCS_PHASE_WRITE_DATA:
if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) {
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fall through - for 64-bit */
+ fallthrough; /* for 64-bit */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- /* fall through - for x32 mode */
+ fallthrough; /* for x32 mode */
case LPSETTIMEOUT_NEW:
ret = lp_set_timeout64(minor, (void __user *)arg);
break;
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
- /* fall through */
+ fallthrough;
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
if ((unsigned long long)offset >= -MAX_ERRNO) {
#ifdef CONFIG_PPC
case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- /* fall through */
+ fallthrough;
case IOC_NVRAM_GET_OFFSET:
ret = -EINVAL;
#ifdef CONFIG_PPC_PMAC
This option enables support for the Andestech ATCPIT100 timers.
config RISCV_TIMER
- bool "Timer for the RISC-V platform"
+ bool "Timer for the RISC-V platform" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && RISCV
- default y
select TIMER_PROBE
select TIMER_OF
help
is accessed via both the SBI and the rdcycle instruction. This is
required for all RISC-V systems.
+config CLINT_TIMER
+ bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK && RISCV
+ select TIMER_PROBE
+ select TIMER_OF
+ help
+ This option enables the CLINT timer for RISC-V systems. The CLINT
+ driver is usually used for NoMMU RISC-V systems.
+
config CSKY_MP_TIMER
bool "SMP Timer for the C-SKY platform" if COMPILE_TEST
depends on CSKY
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
+obj-$(CONFIG_CLINT_TIMER) += timer-clint.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
/* restore original register value */
writel_relaxed(ttccs->scale_clk_ctrl_reg_old,
ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
- /* fall through */
+ fallthrough;
default:
return NOTIFY_DONE;
}
clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
- /* fall through */
+ fallthrough;
case PRE_RATE_CHANGE:
case ABORT_RATE_CHANGE:
default:
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
+ * CLINT MMIO timer device.
+ */
+
+#define pr_fmt(fmt) "clint: " fmt
+#include <linux/bitops.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/smp.h>
+
+#define CLINT_IPI_OFF 0
+#define CLINT_TIMER_CMP_OFF 0x4000
+#define CLINT_TIMER_VAL_OFF 0xbff8
+
+/* CLINT manages IPI and Timer for RISC-V M-mode */
+static u32 __iomem *clint_ipi_base;
+static u64 __iomem *clint_timer_cmp;
+static u64 __iomem *clint_timer_val;
+static unsigned long clint_timer_freq;
+static unsigned int clint_timer_irq;
+
+static void clint_send_ipi(const struct cpumask *target)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, target)
+ writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
+}
+
+static void clint_clear_ipi(void)
+{
+ writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
+}
+
+static struct riscv_ipi_ops clint_ipi_ops = {
+ .ipi_inject = clint_send_ipi,
+ .ipi_clear = clint_clear_ipi,
+};
+
+#ifdef CONFIG_64BIT
+#define clint_get_cycles() readq_relaxed(clint_timer_val)
+#else
+#define clint_get_cycles() readl_relaxed(clint_timer_val)
+#define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1)
+#endif
+
+#ifdef CONFIG_64BIT
+static u64 notrace clint_get_cycles64(void)
+{
+ return clint_get_cycles();
+}
+#else /* CONFIG_64BIT */
+static u64 notrace clint_get_cycles64(void)
+{
+ u32 hi, lo;
+
+ do {
+ hi = clint_get_cycles_hi();
+ lo = clint_get_cycles();
+ } while (hi != clint_get_cycles_hi());
+
+ return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+static u64 clint_rdtime(struct clocksource *cs)
+{
+ return clint_get_cycles64();
+}
+
+static struct clocksource clint_clocksource = {
+ .name = "clint_clocksource",
+ .rating = 300,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = clint_rdtime,
+};
+
+static int clint_clock_next_event(unsigned long delta,
+ struct clock_event_device *ce)
+{
+ void __iomem *r = clint_timer_cmp +
+ cpuid_to_hartid_map(smp_processor_id());
+
+ csr_set(CSR_IE, IE_TIE);
+ writeq_relaxed(clint_get_cycles64() + delta, r);
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
+ .name = "clint_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 100,
+ .set_next_event = clint_clock_next_event,
+};
+
+static int clint_timer_starting_cpu(unsigned int cpu)
+{
+ struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
+
+ ce->cpumask = cpumask_of(cpu);
+ clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+
+ enable_percpu_irq(clint_timer_irq,
+ irq_get_trigger_type(clint_timer_irq));
+ return 0;
+}
+
+static int clint_timer_dying_cpu(unsigned int cpu)
+{
+ disable_percpu_irq(clint_timer_irq);
+ return 0;
+}
+
+static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
+
+ csr_clear(CSR_IE, IE_TIE);
+ evdev->event_handler(evdev);
+
+ return IRQ_HANDLED;
+}
+
+static int __init clint_timer_init_dt(struct device_node *np)
+{
+ int rc;
+ u32 i, nr_irqs;
+ void __iomem *base;
+ struct of_phandle_args oirq;
+
+ /*
+ * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
+ * RV_IRQ_SOFT. If it's anything else then we ignore the device.
+ */
+ nr_irqs = of_irq_count(np);
+ for (i = 0; i < nr_irqs; i++) {
+ if (of_irq_parse_one(np, i, &oirq)) {
+ pr_err("%pOFP: failed to parse irq %d.\n", np, i);
+ continue;
+ }
+
+ if ((oirq.args_count != 1) ||
+ (oirq.args[0] != RV_IRQ_TIMER &&
+ oirq.args[0] != RV_IRQ_SOFT)) {
+ pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
+ np, i, oirq.args[0]);
+ return -ENODEV;
+ }
+
+ /* Find parent irq domain and map timer irq */
+ if (!clint_timer_irq &&
+ oirq.args[0] == RV_IRQ_TIMER &&
+ irq_find_host(oirq.np))
+ clint_timer_irq = irq_of_parse_and_map(np, i);
+ }
+
+ /* If CLINT timer irq not found then fail */
+ if (!clint_timer_irq) {
+ pr_err("%pOFP: timer irq not found\n", np);
+ return -ENODEV;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%pOFP: could not map registers\n", np);
+ return -ENODEV;
+ }
+
+ clint_ipi_base = base + CLINT_IPI_OFF;
+ clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
+ clint_timer_val = base + CLINT_TIMER_VAL_OFF;
+ clint_timer_freq = riscv_timebase;
+
+ pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
+
+ rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
+ if (rc) {
+ pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
+ goto fail_iounmap;
+ }
+
+ sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
+
+ rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
+ "clint-timer", &clint_clock_event);
+ if (rc) {
+ pr_err("registering percpu irq failed [%d]\n", rc);
+ goto fail_iounmap;
+ }
+
+ rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
+ "clockevents/clint/timer:starting",
+ clint_timer_starting_cpu,
+ clint_timer_dying_cpu);
+ if (rc) {
+ pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
+ goto fail_free_irq;
+ }
+
+ riscv_set_ipi_ops(&clint_ipi_ops);
+ clint_clear_ipi();
+
+ return 0;
+
+fail_free_irq:
+ free_irq(clint_timer_irq, &clint_clock_event);
+fail_iounmap:
+ iounmap(base);
+ return rc;
+}
+
+TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
+TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
#include <linux/of_irq.h>
#include <asm/smp.h>
#include <asm/sbi.h>
-
-u64 __iomem *riscv_time_cmp;
-u64 __iomem *riscv_time_val;
-
-static inline void mmio_set_timer(u64 val)
-{
- void __iomem *r;
-
- r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
- writeq_relaxed(val, r);
-}
+#include <asm/timex.h>
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
csr_set(CSR_IE, IE_TIE);
- if (IS_ENABLED(CONFIG_RISCV_SBI))
- sbi_set_timer(get_cycles64() + delta);
- else
- mmio_set_timer(get_cycles64() + delta);
+ sbi_set_timer(get_cycles64() + delta);
return 0;
}
freq = arch_freq_get_on_cpu(policy->cpu);
if (freq)
ret = sprintf(buf, "%u\n", freq);
- else if (cpufreq_driver && cpufreq_driver->setpolicy &&
- cpufreq_driver->get)
+ else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
ret = sprintf(buf, "%u\n", policy->cur);
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
- * operation
* @epp_cached Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
+ * @suspended: Whether or not the driver has been suspended.
*
* This structure stores per CPU instance data for all CPUs.
*/
s16 epp_powersave;
s16 epp_policy;
s16 epp_default;
- s16 epp_saved;
s16 epp_cached;
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
unsigned int sched_flags;
u32 hwp_boost_min;
+ bool suspended;
};
static struct cpudata **all_cpu_data;
static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
{
+ int ret;
+
/*
* Use the cached HWP Request MSR value, because in the active mode the
* register itself may be updated by intel_pstate_hwp_boost_up() or
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ if (!ret)
+ cpu->epp_cached = epp;
+
+ return ret;
}
static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
else if (epp == -EINVAL)
epp = epp_values[pref_index - 1];
+ /*
+ * To avoid confusion, refuse to set EPP to any values different
+ * from 0 (performance) if the current policy is "performance",
+ * because those values would be overridden.
+ */
+ if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return -EBUSY;
+
ret = intel_pstate_set_epp(cpu_data, epp);
} else {
if (epp == -EINVAL)
cpufreq_stop_governor(policy);
ret = intel_pstate_set_epp(cpu, epp);
err = cpufreq_start_governor(policy);
- if (!ret) {
- cpu->epp_cached = epp;
+ if (!ret)
ret = err;
- }
}
}
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
- if (global.no_turbo)
+ if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
*current_max = HWP_HIGHEST_PERF(cap);
cpu_data->epp_policy = cpu_data->policy;
- if (cpu_data->epp_saved >= 0) {
- epp = cpu_data->epp_saved;
- cpu_data->epp_saved = -EINVAL;
- goto update_epp;
- }
-
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
epp = intel_pstate_get_epp(cpu_data, value);
cpu_data->epp_powersave = epp;
epp = cpu_data->epp_powersave;
}
-update_epp:
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
-static void intel_pstate_hwp_force_min_perf(int cpu)
+static void intel_pstate_hwp_offline(struct cpudata *cpu)
{
- u64 value;
+ u64 value = READ_ONCE(cpu->hwp_req_cached);
int min_perf;
- value = all_cpu_data[cpu]->hwp_req_cached;
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * In case the EPP has been set to "performance" by the
+ * active mode "performance" scaling algorithm, replace that
+ * temporary value with the cached EPP one.
+ */
+ value &= ~GENMASK_ULL(31, 24);
+ value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ }
+
value &= ~GENMASK_ULL(31, 0);
- min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached);
+ min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached);
/* Set hwp_max = hwp_min */
value |= HWP_MAX_PERF(min_perf);
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
-}
-
-static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
-{
- struct cpudata *cpu_data = all_cpu_data[policy->cpu];
-
- if (!hwp_active)
- return 0;
-
- cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
-
- return 0;
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
#define POWER_CTL_EE_ENABLE 1
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
+static void intel_pstate_hwp_reenable(struct cpudata *cpu)
+{
+ intel_pstate_hwp_enable(cpu);
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+}
+
+static int intel_pstate_suspend(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d suspending\n", cpu->cpu);
+
+ cpu->suspended = true;
+
+ return 0;
+}
+
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d resuming\n", cpu->cpu);
/* Only restore if the system default is changed */
if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
set_power_ctl_ee_state(false);
- if (!hwp_active)
- return 0;
+ if (cpu->suspended && hwp_active) {
+ mutex_lock(&intel_pstate_limits_lock);
- mutex_lock(&intel_pstate_limits_lock);
+ /* Re-enable HWP, because "online" has not done that. */
+ intel_pstate_hwp_reenable(cpu);
- if (policy->cpu == 0)
- intel_pstate_hwp_enable(all_cpu_data[policy->cpu]);
+ mutex_unlock(&intel_pstate_limits_lock);
+ }
- all_cpu_data[policy->cpu]->epp_policy = 0;
- intel_pstate_hwp_set(policy->cpu);
-
- mutex_unlock(&intel_pstate_limits_lock);
+ cpu->suspended = false;
return 0;
}
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
- cpudata->epp_policy = 0;
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
}
all_cpu_data[cpunum] = cpu;
- cpu->epp_default = -EINVAL;
- cpu->epp_powersave = -EINVAL;
- cpu->epp_saved = -EINVAL;
- }
-
- cpu = all_cpu_data[cpunum];
+ cpu->cpu = cpunum;
- cpu->cpu = cpunum;
+ cpu->epp_default = -EINVAL;
- if (hwp_active) {
- const struct x86_cpu_id *id;
+ if (hwp_active) {
+ const struct x86_cpu_id *id;
- intel_pstate_hwp_enable(cpu);
+ intel_pstate_hwp_enable(cpu);
- id = x86_match_cpu(intel_pstate_hwp_boost_ids);
- if (id && intel_pstate_acpi_pm_profile_server())
- hwp_boost = true;
+ id = x86_match_cpu(intel_pstate_hwp_boost_ids);
+ if (id && intel_pstate_acpi_pm_profile_server())
+ hwp_boost = true;
+ }
+ } else if (hwp_active) {
+ /*
+ * Re-enable HWP in case this happens after a resume from ACPI
+ * S3 if the CPU was offline during the whole system/resume
+ * cycle.
+ */
+ intel_pstate_hwp_reenable(cpu);
}
+ cpu->epp_powersave = -EINVAL;
+ cpu->epp_policy = 0;
+
intel_pstate_get_cpu_pstates(cpu);
pr_debug("controlling: cpu %d\n", cpunum);
return 0;
}
-static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d going offline\n", cpu->cpu);
+
+ if (cpu->suspended)
+ return 0;
+
+ /*
+ * If the CPU is an SMT thread and it goes offline with the performance
+ * settings different from the minimum, it will prevent its sibling
+ * from getting to lower performance levels, so force the minimum
+ * performance on CPU offline to prevent that from happening.
+ */
if (hwp_active)
- intel_pstate_hwp_force_min_perf(policy->cpu);
+ intel_pstate_hwp_offline(cpu);
else
- intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
+ intel_pstate_set_min_pstate(cpu);
+
+ intel_pstate_exit_perf_limits(policy);
+
+ return 0;
+}
+
+static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d going online\n", cpu->cpu);
+
+ intel_pstate_init_acpi_perf_limits(policy);
+
+ if (hwp_active) {
+ /*
+ * Re-enable HWP and clear the "suspended" flag to let "resume"
+ * know that it need not do that.
+ */
+ intel_pstate_hwp_reenable(cpu);
+ cpu->suspended = false;
+ }
+
+ return 0;
}
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
{
- pr_debug("CPU %d exiting\n", policy->cpu);
+ pr_debug("CPU %d stopping\n", policy->cpu);
intel_pstate_clear_update_util_hook(policy->cpu);
- if (hwp_active)
- intel_pstate_hwp_save_state(policy);
-
- intel_cpufreq_stop_cpu(policy);
}
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
- intel_pstate_exit_perf_limits(policy);
+ pr_debug("CPU %d exiting\n", policy->cpu);
policy->fast_switch_possible = false;
*/
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ if (hwp_active) {
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
+ }
+
return 0;
}
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
- .suspend = intel_pstate_hwp_save_state,
+ .suspend = intel_pstate_suspend,
.resume = intel_pstate_resume,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
+ .offline = intel_pstate_cpu_offline,
+ .online = intel_pstate_cpu_online,
.update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
- cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24;
+ cpu->epp_cached = intel_pstate_get_epp(cpu, value);
} else {
turbo_max = cpu->pstate.turbo_pstate;
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
.fast_switch = intel_cpufreq_fast_switch,
.init = intel_cpufreq_cpu_init,
.exit = intel_cpufreq_cpu_exit,
- .stop_cpu = intel_cpufreq_stop_cpu,
+ .offline = intel_pstate_cpu_offline,
+ .online = intel_pstate_cpu_online,
+ .suspend = intel_pstate_suspend,
+ .resume = intel_pstate_resume,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};
}
put_online_cpus();
- if (intel_pstate_driver == &intel_pstate)
- intel_pstate_sysfs_hide_hwp_dynamic_boost();
-
intel_pstate_driver = NULL;
}
return 0;
}
-static int intel_pstate_unregister_driver(void)
-{
- cpufreq_unregister_driver(intel_pstate_driver);
- intel_pstate_driver_cleanup();
-
- return 0;
-}
-
static ssize_t intel_pstate_show_status(char *buf)
{
if (!intel_pstate_driver)
static int intel_pstate_update_status(const char *buf, size_t size)
{
- int ret;
+ if (size == 3 && !strncmp(buf, "off", size)) {
+ if (!intel_pstate_driver)
+ return -EINVAL;
+
+ if (hwp_active)
+ return -EBUSY;
- if (size == 3 && !strncmp(buf, "off", size))
- return intel_pstate_driver ?
- intel_pstate_unregister_driver() : -EINVAL;
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_driver_cleanup();
+ }
if (size == 6 && !strncmp(buf, "active", size)) {
if (intel_pstate_driver) {
if (intel_pstate_driver == &intel_pstate)
return 0;
- ret = intel_pstate_unregister_driver();
- if (ret)
- return ret;
+ cpufreq_unregister_driver(intel_pstate_driver);
}
return intel_pstate_register_driver(&intel_pstate);
if (intel_pstate_driver == &intel_cpufreq)
return 0;
- ret = intel_pstate_unregister_driver();
- if (ret)
- return ret;
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_sysfs_hide_hwp_dynamic_boost();
}
return intel_pstate_register_driver(&intel_cpufreq);
return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
case 0x0D: /* Pentium M (Dothan) */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
- /* fall through */
+ fallthrough;
case 0x09: /* Pentium M (Banias) */
return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
}
} else
return SPEEDSTEP_CPU_PIII_C;
}
- /* fall through */
+ fallthrough;
default:
return 0;
}
static struct workqueue_struct *read_counters_wq;
-static enum cluster get_cpu_cluster(u8 cpu)
+static void get_cpu_cluster(void *cluster)
{
- return MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+
+ *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
/*
static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- int cl = get_cpu_cluster(policy->cpu);
u32 cpu;
+ u32 cl;
+
+ smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
if (cl >= data->num_clusters)
return -EINVAL;
case DRA76_EFUSE_HAS_PLUS_MPU_OPP:
case DRA76_EFUSE_HAS_ALL_MPU_OPP:
calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP;
- /* Fall through */
+ fallthrough;
case DRA7_EFUSE_HAS_ALL_MPU_OPP:
case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
- /* Fall through */
+ fallthrough;
case DRA7_EFUSE_HAS_OD_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
}
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/tick.h>
+#include <linux/mmu_context.h>
#include <trace/events/power.h>
#include "cpuidle.h"
* executing it contains RCU usage regarded as invalid in the idle
* context, so tell RCU about that.
*/
- RCU_NONIDLE(tick_freeze());
+ tick_freeze();
/*
* The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping
* suspended is generally unsafe.
*/
stop_critical_timings();
+ rcu_idle_enter();
drv->states[index].enter_s2idle(dev, drv, index);
- WARN_ON(!irqs_disabled());
+ if (WARN_ON_ONCE(!irqs_disabled()))
+ local_irq_disable();
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
* first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that.
*/
- RCU_NONIDLE(tick_unfreeze());
+ rcu_idle_exit();
+ tick_unfreeze();
start_critical_timings();
time_end = ns_to_ktime(local_clock());
broadcast = false;
}
+ if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(dev->cpu);
+
/* Take note of the planned idle state. */
sched_idle_set_state(target_state);
- trace_cpu_idle_rcuidle(index, dev->cpu);
+ trace_cpu_idle(index, dev->cpu);
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
+ rcu_idle_enter();
entered_state = target_state->enter(dev, drv, index);
+ rcu_idle_exit();
start_critical_timings();
sched_clock_idle_wakeup_event();
time_end = ns_to_ktime(local_clock());
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+ trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
select CRYPTO_AES
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
select HW_RANDOM
select SG_SPLIT
help
case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
ret = 0;
- /* Fallthrough */
+ fallthrough;
default:
artpec6_crypto_common_destroy(&req_ctx->common);
case 3:
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- /* Fall through */
+ fallthrough;
case 2:
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- /* Fall through */
+ fallthrough;
case 1:
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
break;
/* update to the next state and also initialize TCB */
tx_info->connection_state = new_state;
- /* FALLTHRU */
+ fallthrough;
case KTLS_CONN_ACT_OPEN_RPL:
/* if we are stuck in this state, means tcb init might not
* received by HW, try sending it again.
break;
/* update to the next state and check if l2t_state is valid */
tx_info->connection_state = new_state;
- /* FALLTHRU */
+ fallthrough;
case KTLS_CONN_SET_TCB_RPL:
/* Check if l2t state is valid, then move to ready state. */
if (cxgb4_check_l2t_valid(tx_info->l2te)) {
case 3:
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- /* Fall through */
+ fallthrough;
case 2:
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- /* Fall through */
+ fallthrough;
case 1:
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
ADF_CSR_WR(mailbox, mb_offset, 1);
- ret = readl_poll_timeout(mailbox + mb_offset, status,
- status == 0, ADF_ADMIN_POLL_DELAY_US,
- ADF_ADMIN_POLL_TIMEOUT_US);
+ ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
+ ADF_ADMIN_POLL_DELAY_US,
+ ADF_ADMIN_POLL_TIMEOUT_US, true,
+ mailbox, mb_offset);
if (ret < 0) {
/* Response timeout */
dev_err(&GET_DEV(accel_dev),
/* VF is newer than PF and decides whether it is compatible */
if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
break;
- /* fall through */
+ fallthrough;
case ADF_PF2VF_VF_INCOMPATIBLE:
dev_err(&GET_DEV(accel_dev),
"PF (vers %d) and VF (vers %d) are not compatible\n",
case ICP_GPA_ABS:
case ICP_GPB_ABS:
ctx_mask = 0;
- /* fall through */
+ fallthrough;
case ICP_GPA_REL:
case ICP_GPB_REL:
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
case ICP_SR_RD_ABS:
case ICP_DR_RD_ABS:
ctx_mask = 0;
- /* fall through */
+ fallthrough;
case ICP_SR_REL:
case ICP_DR_REL:
case ICP_SR_RD_REL:
case ICP_SR_WR_ABS:
case ICP_DR_WR_ABS:
ctx_mask = 0;
- /* fall through */
+ fallthrough;
case ICP_SR_WR_REL:
case ICP_DR_WR_REL:
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
case CRYP_KEY_SIZE_256:
ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_192:
ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_128:
ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
- /* Fall through */
+ fallthrough;
default:
ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
case CRYP_KEY_SIZE_256:
writel_relaxed(ctx->key_4_l, ®->key_4_l);
writel_relaxed(ctx->key_4_r, ®->key_4_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_192:
writel_relaxed(ctx->key_3_l, ®->key_3_l);
writel_relaxed(ctx->key_3_r, ®->key_3_r);
- /* Fall through */
+ fallthrough;
case CRYP_KEY_SIZE_128:
writel_relaxed(ctx->key_2_l, ®->key_2_l);
writel_relaxed(ctx->key_2_r, ®->key_2_r);
- /* Fall through */
+ fallthrough;
default:
writel_relaxed(ctx->key_1_l, ®->key_1_l);
return -EBUSY;
}
- dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX;
+ dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC;
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
return false;
}
+ if (!dax_dev && !bdev_dax_supported(bdev, blocksize)) {
+ pr_debug("%s: error: dax unsupported by block device\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
id = dax_read_lock();
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
if (ret < 0) {
dev_warn(&adev->dev,
"error in parsing resource group\n");
- return;
+ break;
}
grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
}
+
+ acpi_put_table((struct acpi_table_header *)csrt);
}
/**
default:
dev_err(&pl08x->adev->dev,
"illegal burst size for memcpy, set to 1\n");
- /* Fall through */
+ fallthrough;
case PL08X_BURST_SZ_1:
cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
default:
dev_err(&pl08x->adev->dev,
"illegal bus width for memcpy, set to 8 bits\n");
- /* Fall through */
+ fallthrough;
case PL08X_BUS_WIDTH_8_BITS:
cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
default:
dev_err(&pl08x->adev->dev,
"illegal bus width for memcpy, set to 8 bits\n");
- /* Fall through */
+ fallthrough;
case PL08X_BUS_WIDTH_8_BITS:
cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
switch (val) {
default:
dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
- /* Fall through */
+ fallthrough;
case 1:
pd->memcpy_burst_size = PL08X_BURST_SZ_1;
break;
switch (val) {
default:
dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
- /* Fall through */
+ fallthrough;
case 8:
pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS;
break;
return NULL;
dmac_pdev = of_find_device_by_node(dma_spec->np);
+ if (!dmac_pdev)
+ return NULL;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
- if (!atslave)
+ if (!atslave) {
+ put_device(&dmac_pdev->dev);
return NULL;
+ }
atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
/*
atslave->dma_dev = &dmac_pdev->dev;
chan = dma_request_channel(mask, at_dma_filter, atslave);
- if (!chan)
+ if (!chan) {
+ put_device(&dmac_pdev->dev);
+ kfree(atslave);
return NULL;
+ }
atchan = to_at_dma_chan(chan);
atchan->per_if = dma_spec->args[0] & 0xff;
return -EINVAL;
}
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- jzdma->irq = ret;
-
- ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
- jzdma);
- if (ret) {
- dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
- return ret;
- }
-
jzdma->clk = devm_clk_get(dev, NULL);
if (IS_ERR(jzdma->clk)) {
dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(jzdma->clk);
- goto err_free_irq;
+ return ret;
}
clk_prepare_enable(jzdma->clk);
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_disable_clk;
+
+ jzdma->irq = ret;
+
+ ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
+ jzdma);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
+ goto err_disable_clk;
+ }
+
ret = dmaenginem_async_device_register(dd);
if (ret) {
dev_err(dev, "failed to register device\n");
- goto err_disable_clk;
+ goto err_free_irq;
}
/* Register with OF DMA helpers. */
jzdma);
if (ret) {
dev_err(dev, "failed to register OF DMA controller\n");
- goto err_disable_clk;
+ goto err_free_irq;
}
dev_info(dev, "JZ4780 DMA controller initialised\n");
return 0;
-err_disable_clk:
- clk_disable_unprepare(jzdma->clk);
-
err_free_irq:
free_irq(jzdma->irq, jzdma);
+
+err_disable_clk:
+ clk_disable_unprepare(jzdma->clk);
return ret;
}
if (xfer->cyclic) {
burst->dar = xfer->xfer.cyclic.paddr;
} else {
- burst->dar = sg_dma_address(sg);
+ burst->dar = dst_addr;
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
* and destination addresses are increased
* by the same portion (data length)
*/
- src_addr += sg_dma_len(sg);
}
} else {
burst->dar = dst_addr;
if (xfer->cyclic) {
burst->sar = xfer->xfer.cyclic.paddr;
} else {
- burst->sar = sg_dma_address(sg);
+ burst->sar = src_addr;
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
* and destination addresses are increased
* by the same portion (data length)
*/
- dst_addr += sg_dma_len(sg);
}
}
- if (!xfer->cyclic)
+ if (!xfer->cyclic) {
+ src_addr += sg_dma_len(sg);
+ dst_addr += sg_dma_len(sg);
sg = sg_next(sg);
+ }
}
return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
- /* Fall through */
+ fallthrough;
case FSL_DMA_IP_83XX:
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
#else
static u64 fsl_ioread64(const u64 __iomem *addr)
{
- u32 fsl_addr = lower_32_bits(addr);
- u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32;
+ u32 val_lo = in_le32((u32 __iomem *)addr);
+ u32 val_hi = in_le32((u32 __iomem *)addr + 1);
- return fsl_addr_hi | in_le32((u32 *)fsl_addr);
+ return ((u64)val_hi << 32) + val_lo;
}
static void fsl_iowrite64(u64 val, u64 __iomem *addr)
static u64 fsl_ioread64be(const u64 __iomem *addr)
{
- u32 fsl_addr = lower_32_bits(addr);
- u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32;
+ u32 val_hi = in_be32((u32 __iomem *)addr);
+ u32 val_lo = in_be32((u32 __iomem *)addr + 1);
- return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1));
+ return ((u64)val_hi << 32) + val_lo;
}
static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
return 0;
}
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ idxd_wq_disable_cleanup(wq);
+ wq->state = IDXD_WQ_DISABLED;
+ }
+ }
+}
+
int idxd_device_disable(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
u32 status;
+ unsigned long flags;
if (!idxd_is_enabled(idxd)) {
dev_dbg(dev, "Device is not enabled\n");
return -ENXIO;
}
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ idxd_device_wqs_clear_state(idxd);
idxd->state = IDXD_DEV_CONF_READY;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
+ unsigned long flags;
+
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_CONF_READY;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
}
/* Device configuration bits */
#include "idxd.h"
#include "registers.h"
-void idxd_device_wqs_clear_state(struct idxd_device *idxd)
-{
- int i;
-
- lockdep_assert_held(&idxd->dev_lock);
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
-
- wq->state = IDXD_WQ_DISABLED;
- }
-}
-
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
* We fall-through here intentionally, since a 2D transfer is
* similar to MEMCPY just adding the 2D slot configuration.
*/
- /* Fall through */
+ fallthrough;
case IMXDMA_DESC_MEMCPY:
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
}
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
src_cnt = 24;
- /* fall through */
+ fallthrough;
case 17 ... 24:
if (!u_desc_ctrl.field.blk_ctrl) {
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
}
hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
src_cnt = 16;
- /* fall through */
+ fallthrough;
case 9 ... 16:
if (!u_desc_ctrl.field.blk_ctrl)
u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
}
hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
src_cnt = 8;
- /* fall through */
+ fallthrough;
case 2 ... 8:
shift = 1;
for (i = 0; i < src_cnt; i++) {
case 25 ... 32:
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
- /* fall through */
+ fallthrough;
case 17 ... 24:
if (!u_desc_ctrl.field.blk_ctrl) {
hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
}
hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
- /* fall through */
+ fallthrough;
case 9 ... 16:
if (!u_desc_ctrl.field.blk_ctrl)
u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
- /* fall through */
+ fallthrough;
case 1 ... 8:
if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
default:
pr_warn("%s(): invalid bus width %u\n", __func__, width);
- /* fall through */
+ fallthrough;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
size = burst;
}
return NULL;
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
- if (chan) {
- chan->router = ofdma->dma_router;
- chan->route_data = route_data;
- } else {
+ if (IS_ERR_OR_NULL(chan)) {
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
+ } else {
+ chan->router = ofdma->dma_router;
+ chan->route_data = route_data;
}
/*
if (_state(thrd) == PL330_STATE_KILLING)
UNTIL(thrd, PL330_STATE_STOPPED)
- /* fall through */
+ fallthrough;
case PL330_STATE_FAULTING:
_stop(thrd);
- /* fall through */
+ fallthrough;
case PL330_STATE_KILLING:
case PL330_STATE_COMPLETING:
UNTIL(thrd, PL330_STATE_STOPPED)
- /* fall through */
+ fallthrough;
case PL330_STATE_STOPPED:
return _trigger(thrd);
switch (direction) {
case DMA_MEM_TO_MEM:
- /* fall through */
case DMA_MEM_TO_DEV:
off += _emit_LD(dry_run, &buf[off], cond);
break;
switch (direction) {
case DMA_MEM_TO_MEM:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _emit_ST(dry_run, &buf[off], cond);
break;
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
cond);
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1,
while (burst != (1 << desc->rqcfg.brst_size))
desc->rqcfg.brst_size++;
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
/*
* If burst size is smaller than bus width then make sure we only
* transfer one at a time to avoid a burst stradling an MFIFO entry.
if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
desc->rqcfg.brst_len = 1;
- desc->rqcfg.brst_len = get_burst_len(desc, len);
desc->bytes_requested = len;
desc->txd.flags = flags;
switch (desc->mark) {
case DESC_COMPLETED:
desc->mark = DESC_WAITING;
- /* Fall through */
+ fallthrough;
case DESC_WAITING:
if (head_acked)
async_tx_ack(&desc->async_tx);
return NULL;
}
- cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
- CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
+ false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
tr_req[tr_idx].addr = sg_addr;
tr_req[tr_idx].icnt0 = tr0_cnt0;
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x2000,
+ .rchan_oes_offset = 0x200,
};
static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x2000,
+ .rchan_oes_offset = 0x200,
};
static struct udma_match_data j721e_main_data = {
pvt->ops = &family_types[F17_M70H_CPUS].ops;
break;
}
- /* fall through */
+ fallthrough;
case 0x18:
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
static bool __read_mostly force_load;
module_param(force_load, bool, 0);
+static bool system_scanned;
+
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
u8 type;
static void ghes_scan_system(void)
{
- static bool scanned;
-
- if (scanned)
+ if (system_scanned)
return;
dmi_walk(enumerate_dimms, &ghes_hw);
- scanned = true;
+ system_scanned = true;
}
void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
mutex_lock(&ghes_reg_mutex);
+ system_scanned = false;
+
if (!refcount_dec_and_test(&ghes_refcount))
goto unlock;
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv)
- tp_event = HW_EVENT_ERR_FATAL;
- else
tp_event = HW_EVENT_ERR_UNCORRECTED;
+ else
+ tp_event = HW_EVENT_ERR_FATAL;
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
}
switch (sz) {
case 8:
ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
- /* fall through */
+ fallthrough;
case 4:
ret |= _apl_rd_reg(port, off, op, (u32 *)data);
pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
int rc;
- tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
+ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
HW_EVENT_ERR_CORRECTED;
/*
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- tp_event = HW_EVENT_ERR_FATAL;
- } else {
tp_event = HW_EVENT_ERR_UNCORRECTED;
+ } else {
+ tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
if (uncorrected_error) {
core_err_cnt = 1;
if (ripv) {
- tp_event = HW_EVENT_ERR_FATAL;
- } else {
tp_event = HW_EVENT_ERR_UNCORRECTED;
+ } else {
+ tp_event = HW_EVENT_ERR_FATAL;
}
} else {
tp_event = HW_EVENT_ERR_CORRECTED;
device->bc_implemented = BC_IMPLEMENTED;
break;
}
- /* else, fall through - to case address error */
+ fallthrough; /* to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
if ((data[0] & bit) == (data[1] & bit))
continue;
- /* fall through - It's a 1394-1995 IRM, retry. */
+ fallthrough; /* It's a 1394-1995 IRM, retry */
default:
if (retry) {
retry--;
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
- /* fall through */
+ fallthrough;
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
rcode = RCODE_ADDRESS_ERROR;
break;
}
- /* else fall through */
+ fallthrough;
case CSR_NODE_IDS:
/*
* per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
* and 9.6, but interoperable with IEEE 1394.1-2004 bridges
*/
- /* fall through */
+ fallthrough;
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
packet->ack = RCODE_GENERATION;
break;
}
- /* fall through */
+ fallthrough;
default:
packet->ack = RCODE_SEND_ERROR;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
- /* fall through */
+ fallthrough;
case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
+ destroy_workqueue(efi_rts_wq);
return -ENOMEM;
}
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
+ destroy_workqueue(efi_rts_wq);
return error;
}
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len = strlen(cmdline) + 1;
+ size_t len;
efi_status_t status;
char *str, *buf;
+ if (!cmdline)
+ return EFI_SUCCESS;
+
+ len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1;
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
if (status != EFI_SUCCESS)
return status;
- str = skip_spaces(memcpy(buf, cmdline, len));
+ memcpy(buf, cmdline, len - 1);
+ buf[len - 1] = '\0';
+ str = skip_spaces(buf);
while (*str) {
char *param, *val;
str = next_arg(str, ¶m, &val);
+ if (!val && !strcmp(param, "--"))
+ break;
if (!strcmp(param, "nokaslr")) {
efi_nokaslr = true;
};
/**
- * struct ti_sci_rm_type_map - Structure representing TISCI Resource
- * management representation of dev_ids.
- * @dev_id: TISCI device ID
- * @type: Corresponding id as identified by TISCI RM.
- *
- * Note: This is used only as a work around for using RM range apis
- * for AM654 SoC. For future SoCs dev_id will be used as type
- * for RM range APIs. In order to maintain ABI backward compatibility
- * type is not being changed for AM654 SoC.
- */
-struct ti_sci_rm_type_map {
- u32 dev_id;
- u16 type;
-};
-
-/**
* struct ti_sci_desc - Description of SoC integration
* @default_host_id: Host identifier representing the compute entity
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msgs: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
- * @rm_type_map: RM resource type mapping structure.
*/
struct ti_sci_desc {
u8 default_host_id;
int max_rx_timeout_ms;
int max_msgs;
int max_msg_size;
- struct ti_sci_rm_type_map *rm_type_map;
};
/**
return ret;
}
-static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
- u16 *type)
-{
- struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
- bool found = false;
- int i;
-
- /* If map is not provided then assume dev_id is used as type */
- if (!rm_type_map) {
- *type = dev_id;
- return 0;
- }
-
- for (i = 0; rm_type_map[i].dev_id; i++) {
- if (rm_type_map[i].dev_id == dev_id) {
- *type = rm_type_map[i].type;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- return 0;
-}
-
/**
* ti_sci_get_resource_range - Helper to get a range of resources assigned
* to a host. Resource is uniquely identified by
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
- u16 type;
int ret = 0;
if (IS_ERR(handle))
return ret;
}
- ret = ti_sci_get_resource_type(info, dev_id, &type);
- if (ret) {
- dev_err(dev, "rm type lookup failed for %u\n", dev_id);
- goto fail;
- }
-
req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
req->secondary_host = s_host;
- req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
+ req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
ret = ti_sci_do_xfer(info, xfer);
EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
/**
- * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
* @handle: TISCI handle
* @dev: Device pointer to which the resource is assigned
* @dev_id: TISCI device id to which the resource is assigned
- * @of_prop: property name by which the resource are represented
+ * @sub_types: Array of sub_types assigned corresponding to device
+ * @sets: Number of sub_types
*
* Return: Pointer to ti_sci_resource if all went well else appropriate
* error pointer.
*/
-struct ti_sci_resource *
-devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
- struct device *dev, u32 dev_id, char *of_prop)
+static struct ti_sci_resource *
+devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, u32 *sub_types,
+ u32 sets)
{
struct ti_sci_resource *res;
bool valid_set = false;
- u32 resource_subtype;
int i, ret;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
- sizeof(u32));
- if (ret < 0) {
- dev_err(dev, "%s resource type ids not available\n", of_prop);
- return ERR_PTR(ret);
- }
- res->sets = ret;
-
+ res->sets = sets;
res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
GFP_KERNEL);
if (!res->desc)
return ERR_PTR(-ENOMEM);
for (i = 0; i < res->sets; i++) {
- ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
- &resource_subtype);
- if (ret)
- return ERR_PTR(-EINVAL);
-
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
- resource_subtype,
+ sub_types[i],
&res->desc[i].start,
&res->desc[i].num);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
- dev_id, resource_subtype);
+ dev_id, sub_types[i]);
res->desc[i].start = 0;
res->desc[i].num = 0;
continue;
}
dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
- dev_id, resource_subtype, res->desc[i].start,
+ dev_id, sub_types[i], res->desc[i].start,
res->desc[i].num);
valid_set = true;
return ERR_PTR(-EINVAL);
}
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @of_prop: property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, char *of_prop)
+{
+ struct ti_sci_resource *res;
+ u32 *sub_types;
+ int sets;
+
+ sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+ sizeof(u32));
+ if (sets < 0) {
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
+ return ERR_PTR(sets);
+ }
+
+ sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
+ if (!sub_types)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
+ res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
+ sets);
+
+ kfree(sub_types);
+ return res;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
+
+/**
+ * devm_ti_sci_get_resource() - Get a resource range assigned to the device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @suub_type: TISCI resource subytpe representing the resource.
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type)
+{
+ return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+
static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 64,
- .rm_type_map = NULL,
-};
-
-static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
- {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
- {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
- {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
- {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
- {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
- {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
- {.dev_id = 0, .type = 0x000}, /* end of table */
};
/* Description for AM654 */
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 60,
- .rm_type_map = ti_sci_am654_rm_type_map,
};
static const struct of_device_id ti_sci_of_match[] = {
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
type2 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_RISING:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
type1 |= bit;
handler = handle_level_irq;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
type2 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_RISING:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
type0 |= bit;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
type1 |= bit;
handler = handle_level_irq;
case IRQ_TYPE_LEVEL_HIGH:
polarity |= mask;
- /* fall through */
+ fallthrough;
case IRQ_TYPE_LEVEL_LOW:
type |= mask;
break;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
sprd_eic->chip.set = sprd_eic_set;
- /* fall-through */
+ fallthrough;
case SPRD_EIC_ASYNC:
- /* fall-through */
case SPRD_EIC_SYNC:
sprd_eic->chip.get = sprd_eic_get;
break;
case SPRD_EIC_LATCH:
- /* fall-through */
default:
break;
}
if (ret < 0)
return;
edge_det = !!(ret & mask);
- /* fall through */
+ fallthrough;
case STMPE1801:
rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank];
fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank];
if (ret < 0)
return;
fall = !!(ret & mask);
- /* fall through */
+ fallthrough;
case STMPE801:
case STMPE1600:
irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank];
switch (element->type) {
case ACPI_TYPE_LOCAL_REFERENCE:
element += 3;
- /* Fallthrough */
+ fallthrough;
case ACPI_TYPE_INTEGER:
element++;
count++;
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t sdma_engine_reg_base[2] = {
- SOC15_REG_OFFSET(SDMA0, 0,
- mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
- SOC15_REG_OFFSET(SDMA1, 0,
- mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
- };
- uint32_t retval = sdma_engine_reg_base[engine_id]
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ fallthrough;
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
- queue_id, retval);
+ queue_id, sdma_rlc_reg_offset);
- return retval;
+ return sdma_rlc_reg_offset;
}
static inline struct v9_mqd *get_mqd(void *mqd)
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
/* enable runpm if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->runpm = true;
* in the bitfields */
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
+ else if (se_num >= AMDGPU_GFX_MAX_SE)
+ return -EINVAL;
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
+ return -EINVAL;
if (info->read_mmr_reg.count > 128)
return -EINVAL;
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) ||
- (psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!obj || !obj->ent)
return;
- debugfs_remove(obj->ent);
obj->ent = NULL;
put_obj(obj);
}
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove_recursive(con->dir);
con->dir = NULL;
}
/* debugfs end */
unsigned int pages;
int i, r;
- *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
-
- /* only for Vega10 & Raven1 */
- data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
- /* fall through */
+ fallthrough;
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
{
uint32_t tmp;
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
bool value)
{
u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
case CHIP_VEGA10:
if (amdgpu_sriov_vf(adev))
break;
- /* fall through */
+ fallthrough;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
{
uint32_t tmp;
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
WREG32_SOC15(MMHUB, 0,
mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
return AMD_RESET_METHOD_MODE1;
default:
if (smu_baco_is_support(smu))
}
break;
case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
err = psp_init_ta_microcode(&adev->psp, chip_name);
if (err)
return err;
break;
- case CHIP_NAVY_FLOUNDER:
- break;
default:
BUG();
}
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
if (current_link_speed == AMDGPU_PCIE_GEN2)
break;
- /* fall through */
+ fallthrough;
case AMDGPU_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
drm_connector_update_edid_property(connector,
aconnector->edid);
+ drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
&dm_atomic_state_funcs);
r = amdgpu_display_modeset_create_props(adev);
- if (r)
+ if (r) {
+ dc_release_state(state->context);
+ kfree(state);
return r;
+ }
r = amdgpu_dm_audio_init(adev);
- if (r)
+ if (r) {
+ dc_release_state(state->context);
+ kfree(state);
return r;
+ }
return 0;
}
#if defined(CONFIG_ACPI)
struct amdgpu_dm_backlight_caps caps;
+ memset(&caps, 0, sizeof(caps));
+
if (dm->backlight_caps.caps_valid)
return;
return rc ? 0 : 1;
}
-static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
- const uint32_t user_brightness)
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned *min, unsigned *max)
{
- u32 min, max, conversion_pace;
- u32 brightness = user_brightness;
-
if (!caps)
- goto out;
+ return 0;
- if (!caps->aux_support) {
- max = caps->max_input_signal;
- min = caps->min_input_signal;
- /*
- * The brightness input is in the range 0-255
- * It needs to be rescaled to be between the
- * requested min and max input signal
- * It also needs to be scaled up by 0x101 to
- * match the DC interface which has a range of
- * 0 to 0xffff
- */
- conversion_pace = 0x101;
- brightness =
- user_brightness
- * conversion_pace
- * (max - min)
- / AMDGPU_MAX_BL_LEVEL
- + min * conversion_pace;
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
} else {
- /* TODO
- * We are doing a linear interpolation here, which is OK but
- * does not provide the optimal result. We probably want
- * something close to the Perceptual Quantizer (PQ) curve.
- */
- max = caps->aux_max_input_signal;
- min = caps->aux_min_input_signal;
-
- brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
- + user_brightness * max;
- // Multiple the value by 1000 since we use millinits
- brightness *= 1000;
- brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
}
+ return 1;
+}
-out:
- return brightness;
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+ AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (brightness < min)
+ return 0;
+ // Rescale min..max to 0..255
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ max - min);
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
link = (struct dc_link *)dm->backlight_link;
- brightness = convert_brightness(&caps, bd->props.brightness);
+ brightness = convert_brightness_from_user(&caps, bd->props.brightness);
// Change brightness based on AUX property
if (caps.aux_support)
return set_backlight_via_aux(link, brightness);
if (ret == DC_ERROR_UNEXPECTED)
return bd->props.brightness;
- return ret;
+ return convert_brightness_to_user(&dm->backlight_caps, ret);
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
- if (payload.write)
+ if (payload.write && result >= 0)
result = msg->size;
if (result < 0)
action);
}
+static enum bp_result bios_parser_enable_lvtma_control(
+ struct dc_bios *dcb,
+ uint8_t uc_pwr_on)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_lvtma_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on);
+}
+
static bool bios_parser_is_accelerated_mode(
struct dc_bios *dcb)
{
.get_board_layout_info = bios_get_board_layout_info,
.pack_data_tables = bios_parser_pack_data_tables,
- .get_atom_dc_golden_table = bios_get_atom_dc_golden_table
+ .get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
+
+ .enable_lvtma_control = bios_parser_enable_lvtma_control
};
static bool bios_parser2_construct(
return 0;
}
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** LVTMA CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_lvtma_control(
+ struct bios_parser *bp,
+ uint8_t uc_pwr_on);
+
+static void init_enable_lvtma_control(struct bios_parser *bp)
+{
+ /* TODO add switch for table vrsion */
+ bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control;
+
+}
+
+static enum bp_result enable_lvtma_control(
+ struct bios_parser *bp,
+ uint8_t uc_pwr_on)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ return result;
+}
+
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
{
init_dig_encoder_control(bp);
init_set_dce_clock(bp);
init_get_smu_clock_info(bp);
+ init_enable_lvtma_control(bp);
}
struct bp_set_dce_clock_parameters *bp_params);
unsigned int (*get_smu_clock_info)(
struct bios_parser *bp, uint8_t id);
-
+ enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
+ uint8_t uc_pwr_on);
};
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
return display_count;
}
+void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+}
+
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
.init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
.are_clock_states_equal = rn_are_clock_states_equal,
+ .set_low_power_state = rn_set_low_power_state,
.notify_wm_ranges = rn_notify_wm_ranges,
.notify_link_rate_change = rn_notify_link_rate_change,
};
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
sink_caps,
audio_support);
+ link->dpcd_caps.dongle_type = sink_caps->dongle_type;
}
return true;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
-
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
dpcd_backlight_get.raw,
- sizeof(union dpcd_source_backlight_get)))
+ sizeof(union dpcd_source_backlight_get)) != DC_OK)
return false;
*backlight_millinits_avg =
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *) backlight_millinits,
- sizeof(uint32_t)))
+ sizeof(uint32_t)) != DC_OK)
return false;
return true;
enum bp_result (*get_atom_dc_golden_table)(
struct dc_bios *dcb);
+
+ enum bp_result (*enable_lvtma_control)(
+ struct dc_bios *bios,
+ uint8_t uc_pwr_on);
};
struct bios_registers {
union stream_update_flags update_flags;
};
-#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+#define ABM_LEVEL_IMMEDIATE_DISABLE 255
struct dc_stream_update {
struct dc_stream_state *stream;
#define DCN_PANEL_CNTL_REG_LIST()\
DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
- DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
+
+ if (ctx->dc->ctx->dmub_srv &&
+ ctx->dc->debug.dmub_command_table) {
+ if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
+ bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_POWER_ON);
+ else
+ bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_POWER_OFF);
+ }
+
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
if (!power_up)
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
+
+ if (ctx->dc->ctx->dmub_srv &&
+ ctx->dc->debug.dmub_command_table) {
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
+ ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_LCD_BLON);
+ else
+ ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_LCD_BLOFF);
+ }
+
link_transmitter_control(ctx->dc_bios, &cntl);
+
+
if (enable && link->dpcd_sink_ext_caps.bits.oled)
msleep(OLED_POST_T7_DELAY);
void dcn10_power_down_on_boot(struct dc *dc)
{
int i = 0;
+ struct dc_link *edp_link;
- if (dc->config.power_down_display_on_boot) {
- struct dc_link *edp_link = get_edp_link(dc);
-
- if (edp_link &&
- edp_link->link_enc->funcs->is_dig_enabled &&
- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwseq->funcs.edp_backlight_control &&
- dc->hwss.power_down &&
- dc->hwss.edp_power_control) {
- dc->hwseq->funcs.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
- dc->hwss.edp_power_control(edp_link, false);
- } else {
- for (i = 0; i < dc->link_count; i++) {
- struct dc_link *link = dc->links[i];
-
- if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
- break;
- }
+ if (!dc->config.power_down_display_on_boot)
+ return;
+
+ edp_link = get_edp_link(dc);
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwseq->funcs.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
}
+
}
}
+
+ /*
+ * Call update_clocks with empty context
+ * to send DISPLAY_OFF
+ * Otherwise DISPLAY_OFF may not be asserted
+ */
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
}
void dcn10_reset_hw_ctx_wrap(
bool video_large = false;
bool desktop_large = false;
bool dcc_disabled = false;
+ bool mpo_enabled = false;
for (i = 0; i < context->stream_count; i++) {
if (context->stream_status[i].plane_count == 0)
if (context->stream_status[i].plane_count > 2)
return DC_FAIL_UNSUPPORTED_1;
+ if (context->stream_status[i].plane_count > 1)
+ mpo_enabled = true;
+
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
context->stream_status[i].plane_states[j];
}
}
+ /* Disable MPO in multi-display configurations. */
+ if (context->stream_count > 1 && mpo_enabled)
+ return DC_FAIL_UNSUPPORTED_1;
+
/*
* Workaround: On DCN10 there is UMC issue that causes underflow when
* playing 4k video on 4k desktop with video downscaled and single channel
switch (packet_index) {
case 0:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC0_FRAME_UPDATE, 1);
+ AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
break;
case 1:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC1_FRAME_UPDATE, 1);
+ AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
break;
case 2:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC2_FRAME_UPDATE, 1);
+ AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
break;
case 3:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC3_FRAME_UPDATE, 1);
+ AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
break;
case 4:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC4_FRAME_UPDATE, 1);
+ AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
break;
case 5:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC5_FRAME_UPDATE, 1);
+ AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
break;
case 6:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC6_FRAME_UPDATE, 1);
+ AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
break;
case 7:
REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
- AFMT_GENERIC7_FRAME_UPDATE, 1);
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
break;
default:
break;
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
type AFMT_GENERIC2_FRAME_UPDATE;\
type AFMT_GENERIC3_FRAME_UPDATE;\
type AFMT_GENERIC4_FRAME_UPDATE;\
+ type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_FRAME_UPDATE;\
type AFMT_GENERIC6_FRAME_UPDATE;\
type AFMT_GENERIC7_FRAME_UPDATE;\
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
- pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
- && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
- LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+ LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
#define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
DPCS_DCN2_MASK_SH_LIST(mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\
LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
- LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh)
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
+
void dcn30_link_encoder_construct(
struct dcn20_link_encoder *enc20,
[id] = {\
LE_DCN3_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
static const struct dce110_aux_registers_shift aux_shift = {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ } else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+ void (*set_low_power_state)(struct clk_mgr *clk_mgr);
+
void (*init_clocks)(struct clk_mgr *clk_mgr);
void (*enable_pme_wa) (struct clk_mgr *clk_mgr);
ASIC_PIPE_INIT
};
+enum bp_lvtma_control_action {
+ LVTMA_CONTROL_LCD_BLOFF = 2,
+ LVTMA_CONTROL_LCD_BLON = 3,
+ LVTMA_CONTROL_POWER_ON = 12,
+ LVTMA_CONTROL_POWER_OFF = 13
+};
+
struct bp_encoder_control {
enum bp_encoder_control_action action;
enum engine_id engine_id;
*/
static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
{
+ if (arg1.value == 0)
+ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
+
return dc_fixpt_exp(
dc_fixpt_mul(
dc_fixpt_log(arg1),
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
+ * - Delta for CEIL: delta_from_mid_point_in_us_1
+ * - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
- (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
- mid_point_frames_floor < 2)) {
+ if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+ /* Check for out of range.
+ * If using CEIL produces a value that is out of range,
+ * then we are forced to use FLOOR.
+ */
+ frames_to_insert = mid_point_frames_floor;
+ } else if (mid_point_frames_floor < 2) {
+ /* Check if FLOOR would result in non-LFC. In this case
+ * choose to use CEIL
+ */
+ frames_to_insert = mid_point_frames_ceil;
+ } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ /* If choosing CEIL results in a frame duration that is
+ * closer to the mid point of the range.
+ * Choose CEIL
+ */
frames_to_insert = mid_point_frames_ceil;
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
- delta_from_mid_point_in_us_1;
} else {
+ /* If choosing FLOOR results in a frame duration that is
+ * closer to the mid point of the range.
+ * Choose FLOOR
+ */
frames_to_insert = mid_point_frames_floor;
- delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
- delta_from_mid_point_in_us_2;
}
/* Prefer current frame multiplier when BTR is enabled unless it drifts
* too far from the midpoint
*/
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+ delta_from_mid_point_in_us_1;
+ } else {
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+ delta_from_mid_point_in_us_2;
+ }
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
{
int ret = 0;
uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
};
static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
{
+ int ret;
int throttler_idx, throtting_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
- arcturus_get_smu_metrics_data(smu,
- METRICS_THROTTLER_STATUS,
- &throttler_status);
+ ret = arcturus_get_smu_metrics_data(smu,
+ METRICS_THROTTLER_STATUS,
+ &throttler_status);
+ if (ret)
+ return;
memset(log_buf, 0, sizeof(log_buf));
for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != clock) {
+ if (clock && smu10_data->deep_sleep_dcefclk != clock) {
smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->dcf_actual_hard_min_freq &&
- smu10_data->dcf_actual_hard_min_freq != clock) {
+ if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->f_actual_hard_min_freq &&
- smu10_data->f_actual_hard_min_freq != clock) {
+ if (clock && smu10_data->f_actual_hard_min_freq != clock) {
smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
case AMDGPU_PP_SENSOR_GPU_POWER:
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
- if ((data->vr_config & 0xff) == 0x2)
+ if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
+ (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
else
static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v2_information *pp_table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
struct amdgpu_device *adev = hwmgr->adev;
- int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
+ /* compare them in unit celsius degree */
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ /*
+ * As a common sense, usSoftwareShutdownTemp should be bigger
+ * than ThotspotLimit. For any invalid usSoftwareShutdownTemp,
+ * we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP
+ * to avoid false alarms.
+ */
+ if ((tdp_table->usSoftwareShutdownTemp >
+ range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) {
+ if (high > tdp_table->usSoftwareShutdownTemp)
+ high = tdp_table->usSoftwareShutdownTemp;
+ }
if (low > high)
return -EINVAL;
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
(~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
(~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
- int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
+ /* compare them in unit celsius degree */
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (high > pptable_information->us_software_shutdown_temp)
+ high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
- uint64_t features_enabled;
- int i;
- bool enabled;
- int ret = 0;
+ int i, ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
- ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
- PP_ASSERT_WITH_CODE(!ret,
- "[DisableAllSMUFeatures] Failed to get enabled smc features!",
- return ret);
-
- for (i = 0; i < GNLD_FEATURES_MAX; i++) {
- enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
- true : false;
- data->smu_features[i].enabled = enabled;
- data->smu_features[i].supported = enabled;
- }
+ for (i = 0; i < GNLD_FEATURES_MAX; i++)
+ data->smu_features[i].enabled = 0;
return 0;
}
data->uvd_power_gated = true;
data->vce_power_gated = true;
-
- if (data->smu_features[GNLD_DPM_UVD].enabled)
- data->uvd_power_gated = false;
-
- if (data->smu_features[GNLD_DPM_VCE].enabled)
- data->vce_power_gated = false;
}
static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
{
- uint64_t features_enabled;
- uint64_t features_to_enable;
- uint64_t features_to_disable;
- int ret = 0;
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ uint64_t features_enabled, features_to_enable, features_to_disable;
+ int i, ret = 0;
+ bool enabled;
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
return -EINVAL;
return ret;
}
+ /* Update the cached feature enablement state */
+ ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
+ true : false;
+ data->smu_features[i].enabled = enabled;
+ }
+
return 0;
}
static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
- int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
- if (low < range->min)
- low = range->min;
- if (high > range->max)
- high = range->max;
+ /* compare them in unit celsius degree */
+ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (high > pptable_information->us_software_shutdown_temp)
+ high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
{
int ret = 0;
uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
+ FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
| FEATURE_MASK(FEATURE_DPM_FCLK_BIT)
+ | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_DS_FCLK_BIT)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
+ if (adev->vcn.num_vcn_inst > 1) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
0x10000, NULL);
if (ret)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
if (ret)
return ret;
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
+ if (adev->vcn.num_vcn_inst > 1) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
0x10000, NULL);
if (ret)
{
int ret = 0;
uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
return ret;
}
+static int sienna_cichlid_run_btc(struct smu_context *smu)
+{
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
+}
+
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
.mode1_reset = smu_v11_0_mode1_reset,
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .run_btc = sienna_cichlid_run_btc,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
};
#include "cgs_common.h"
#include "atombios.h"
#include "pppcielanes.h"
+#include "smu7_smumgr.h"
#include "smu/smu_7_0_1_d.h"
#include "smu/smu_7_0_1_sh_mask.h"
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = ci_send_msg_to_smc,
.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.get_offsetof = ci_get_offsetof,
malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
- /* fall through */
+ fallthrough;
case 1:
malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
- /* fall through */
+ fallthrough;
case 1:
malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
break;
case MW_RESTART:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
- /* fall through - to a new start */
+ fallthrough; /* to a new start */
case MW_START:
/* writeback started, need to emulate one-shot mode */
hw->disable_memwrite(hwdev);
ast->dp501_fw_addr = NULL;
}
}
- /* fallthrough */
+ fallthrough;
case 0x0c:
ast->tx_chip_type = AST_TX_DP501;
}
switch (data_type) {
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
- fallthrough;
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
if (xfer->msg->rx_len > 1) {
/* read second byte */
}
fallthrough;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
- fallthrough;
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
if (xfer->msg->rx_len > 0) {
/* read first byte */
switch (hparms->channels) {
case 7 ... 8:
conf0 |= HDMI_AUD_CONF0_I2S_EN3;
- /* Fall-thru */
+ fallthrough;
case 5 ... 6:
conf0 |= HDMI_AUD_CONF0_I2S_EN2;
- /* Fall-thru */
+ fallthrough;
case 3 ... 4:
conf0 |= HDMI_AUD_CONF0_I2S_EN1;
/* Fall-thru */
DRM_DEV_ERROR(pdata->dev,
"Unexpected max rate (%#x); assuming 5.4 GHz\n",
(int)dpcd_val);
- /* fall through */
+ fallthrough;
case DP_LINK_BW_5_4:
rate_valid[7] = 1;
- /* fall through */
+ fallthrough;
case DP_LINK_BW_2_7:
rate_valid[4] = 1;
- /* fall through */
+ fallthrough;
case DP_LINK_BW_1_62:
rate_valid[1] = 1;
break;
#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
}
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
}
unlock:
- DRM_MODESET_LOCK_ALL_END(ctx, err);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
if (err)
return ERR_PTR(err);
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
- DRM_MODESET_LOCK_ALL_END(ctx, err);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
drm_atomic_state_put(state);
return err;
switch (map->type) {
case _DRM_REGISTERS:
iounmap(map->handle);
- /* FALLTHROUGH */
+ fallthrough;
case _DRM_FRAME_BUFFER:
arch_phys_wc_del(map->mtrr);
break;
crtc->gamma_size, &ctx);
out:
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
return -EACCES;
- mutex_lock(&crtc->dev->mode_config.mutex);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
fb = NULL;
mode = NULL;
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
- mutex_unlock(&crtc->dev->mode_config.mutex);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
case DP_DS_16BPC:
return 16;
}
- /* fall through */
+ fallthrough;
default:
return 0;
}
crtc = conn_state->crtc;
- if (WARN_ON(!crtc))
- return -EINVAL;
+ if (!crtc)
+ continue;
if (!drm_dp_mst_dsc_aux_for_port(pos->port))
continue;
out_unref:
drm_mode_object_put(obj);
out:
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
break;
}
drm_property_change_valid_put(prop, ref);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
return ret;
}
default:
WARN(1, "Invalid aspect ratio (0%x) on mode\n",
in->picture_aspect_ratio);
- /* fall through */
+ fallthrough;
case HDMI_PICTURE_ASPECT_NONE:
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
break;
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret);
return ret;
}
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
- gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
- gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
+
+ /*
+ * Reading these two registers on GC600 rev 0x19 result in a
+ * unhandled fault: external abort on non-linefetch
+ */
+ if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
+ }
/*
* !!!! HACK ALERT !!!!
u32 dma_addr;
int change;
+ /* block scheduler */
+ drm_sched_stop(&gpu->sched, sched_job);
+
/*
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
if (dma_fence_is_signaled(submit->out_fence))
- return;
+ goto out_no_timeout;
/*
* If the GPU is still making forward progress on the front-end (which
change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
- return;
+ goto out_no_timeout;
}
- /* block scheduler */
- drm_sched_stop(&gpu->sched, sched_job);
-
if(sched_job)
drm_sched_increase_karma(sched_job);
drm_sched_resubmit_jobs(&gpu->sched);
+out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
}
switch (length) {
case 3:
reg |= payload[2] << 16;
- /* Fall through */
+ fallthrough;
case 2:
reg |= payload[1] << 8;
- /* Fall through */
+ fallthrough;
case 1:
reg |= payload[0];
exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
payload[1] = reg >> 16;
++xfer->rx_done;
}
- /* Fall through */
+ fallthrough;
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
payload[0] = reg >> 8;
switch (length) {
case 3:
payload[2] = (reg >> 16) & 0xff;
- /* Fall through */
+ fallthrough;
case 2:
payload[1] = (reg >> 8) & 0xff;
- /* Fall through */
+ fallthrough;
case 1:
payload[0] = reg & 0xff;
}
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
- fbi->screen_base = exynos_gem->kvaddr + offset;
+ fbi->screen_buffer = exynos_gem->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
unsigned int flags;
unsigned long size;
void *cookie;
- void __iomem *kvaddr;
+ void *kvaddr;
dma_addr_t dma_addr;
unsigned long dma_attrs;
struct sg_table *sgt;
break;
case DRM_FORMAT_ARGB8888:
alpha = DCU_LAYER_AB_WHOLE_FRAME;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
bpp = FSL_DCU_ARGB8888;
break;
case DRM_FORMAT_ARGB4444:
alpha = DCU_LAYER_AB_WHOLE_FRAME;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_XRGB4444:
bpp = FSL_DCU_ARGB4444;
break;
case DRM_FORMAT_ARGB1555:
alpha = DCU_LAYER_AB_WHOLE_FRAME;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_XRGB1555:
bpp = FSL_DCU_ARGB1555;
break;
switch (intel_dsi->pixel_format) {
default:
MISSING_CASE(intel_dsi->pixel_format);
- /* fallthrough */
+ fallthrough;
case MIPI_DSI_FMT_RGB565:
tmp |= PIX_FMT_RGB565;
break;
switch (intel_dsi->video_mode_format) {
default:
MISSING_CASE(intel_dsi->video_mode_format);
- /* fallthrough */
+ fallthrough;
case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
tmp |= VIDEO_MODE_SYNC_EVENT;
break;
switch (pipe) {
default:
MISSING_CASE(pipe);
- /* fallthrough */
+ fallthrough;
case PIPE_A:
tmp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
drm_dbg_kms(&dev_priv->drm,
"VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp1_wakeup_time);
- /* fallthrough */
+ fallthrough;
case 2:
dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
break;
drm_dbg_kms(&dev_priv->drm,
"VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp2_tp3_wakeup_time);
- /* fallthrough */
+ fallthrough;
case 2:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
break;
switch (child->hdmi_max_data_rate) {
default:
MISSING_CASE(child->hdmi_max_data_rate);
- /* fall through */
+ fallthrough;
case HDMI_MAX_DATA_RATE_PLATFORM:
max_tmds_clock = 0;
break;
default:
drm_err(&dev_priv->drm,
"Unknown pnv display core clock 0x%04x\n", gcfgc);
- /* fall through */
+ fallthrough;
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
cdclk_config->cdclk = 133333;
break;
switch (cdclk) {
default:
MISSING_CASE(cdclk);
- /* fall through */
+ fallthrough;
case 337500:
val |= LCPLL_CLK_FREQ_337_5_BDW;
break;
drm_WARN_ON(&dev_priv->drm,
cdclk != dev_priv->cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
- /* fall through */
+ fallthrough;
case 308571:
case 337500:
freq_select = CDCLK_FREQ_337_308;
switch (dssm) {
default:
MISSING_CASE(dssm);
- /* fall through */
+ fallthrough;
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
cdclk_config->ref = 24000;
break;
drm_WARN_ON(&dev_priv->drm,
cdclk != dev_priv->cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
- /* fall through */
+ fallthrough;
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
- /* fall through */
+ fallthrough;
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
- bool ret;
+ bool ret = true;
u32 expected_val = 0;
if (!icl_combo_phy_enabled(dev_priv, phy))
DCC_MODE_SELECT_CONTINUOSLY);
}
- ret = cnl_verify_procmon_ref_values(dev_priv, phy);
+ ret &= cnl_verify_procmon_ref_values(dev_priv, phy);
if (phy_is_master(dev_priv, phy)) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
break;
default:
MISSING_CASE(lane_count);
- /* fall-through */
+ fallthrough;
case 4:
lane_mask = PWR_UP_ALL_LANES;
break;
break;
default:
MISSING_CASE(lane_count);
- /* fall-through */
+ fallthrough;
case 4:
lane_mask = PWR_UP_ALL_LANES;
break;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
- /* fallthrough */
+ fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
*pipe_mask = BIT(PIPE_A);
pipe_config->hdmi_scrambling = true;
if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
pipe_config->hdmi_high_tmds_clock_ratio = true;
- /* fall through */
+ fallthrough;
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
case I915_FORMAT_MOD_Y_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
return 128;
- /* fall through */
+ fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
return 64;
- /* fall through */
+ fallthrough;
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
return 128;
- /* fall through */
+ fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
case 1:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_semiplanar_uv_plane(fb, color_plane))
return intel_tile_row_size(fb, color_plane);
- /* Fall-through */
+ fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
if (INTEL_GEN(dev_priv) >= 12 &&
is_semiplanar_uv_plane(fb, color_plane))
return intel_tile_row_size(fb, color_plane);
- /* Fall-through */
+ fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
case DRM_FORMAT_ARGB16161616F:
if (INTEL_GEN(dev_priv) >= 11)
break;
- /* fall through */
+ fallthrough;
default:
drm_dbg_kms(&dev_priv->drm,
"[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
break;
default:
MISSING_CASE(ddi_pll_sel);
- /* fall through */
+ fallthrough;
case PORT_CLK_SEL_NONE:
return;
}
drm_WARN(dev, 1,
"unknown pipe linked to transcoder %s\n",
transcoder_name(panel_transcoder));
- /* fall through */
+ fallthrough;
case TRANS_DDI_EDP_INPUT_A_ONOFF:
force_thru = true;
- /* fall through */
+ fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
case INTEL_OUTPUT_DDI:
if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
break;
- /* else, fall through */
+ fallthrough;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
if (any_ms && !check_digital_port_conflicts(state)) {
drm_dbg_kms(&dev_priv->drm,
"rejecting conflicting digital port configuration\n");
- ret = EINVAL;
+ ret = -EINVAL;
goto fail;
}
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
- struct intel_encoder *encoder =
- intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_encoder *encoder;
+
+ encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!encoder)
+ return -ENODEV;
if (connector->status != connector_status_connected)
return -ENODEV;
},
},
{
+ .name = "TC cold off",
+ .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
+ .ops = &tgl_tc_cold_off_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.hsw.irq_pipe_mask = BIT(PIPE_D),
},
},
- {
- .name = "TC cold off",
- .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
- .ops = &tgl_tc_cold_off_ops,
- .id = DISP_PW_ID_NONE,
- },
};
static const struct i915_power_well_desc rkl_power_wells[] = {
};
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
- { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
{}
};
refclk = dev_priv->dpll.ref_clks.nssc;
break;
}
- /* fall through */
+ fallthrough;
case WRPLL_REF_PCH_SSC:
/*
* We could calculate spread here, but our checking
switch (dev_priv->dpll.ref_clks.nssc) {
default:
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
- /* fall-through */
+ fallthrough;
case 19200:
*pll_params = tgl_tbt_pll_19_2MHz_values;
break;
switch (dev_priv->dpll.ref_clks.nssc) {
default:
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
- /* fall-through */
+ fallthrough;
case 19200:
case 38400:
*pll_params = icl_tbt_pll_19_2MHz_values;
switch (div1) {
default:
MISSING_CASE(div1);
- /* fall through */
+ fallthrough;
case 2:
hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
break;
/* Fill up the empty slots in sha_text and write it out */
sha_empty = sizeof(sha_text) - sha_leftovers;
- for (j = 0; j < sha_empty; j++)
- sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
+ for (j = 0; j < sha_empty; j++) {
+ u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
+ sha_text |= ksv[j] << off;
+ }
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
/* Write 32 bits of text */
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
- sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
+ sha_text |= bstatus[0] << 8 | bstatus[1];
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
return ret;
return ret;
sha_idx += sizeof(sha_text);
}
+
+ /*
+ * Terminate the SHA-1 stream by hand. For the other leftover
+ * cases this is appended by the hardware.
+ */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
} else if (sha_leftovers == 3) {
- /* Write 32 bits of text */
+ /* Write 32 bits of text (filled from LSB) */
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
- sha_text |= bstatus[0] << 24;
+ sha_text |= bstatus[0];
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
- /* Write 8 bits of text, 24 bits of M0 */
+ /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, bstatus[1]);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ u32 repeater_ctl;
int ret;
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
return -ETIMEDOUT;
}
+ repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+
ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) {
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
case DRM_MODE_SCALE_NONE:
WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
- /* fall through */
+ fallthrough;
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->crtc_hdisplay;
default:
drm_WARN(&dev_priv->drm, 1,
"unknown pixel multiplier specified\n");
- /* fall through */
+ fallthrough;
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
- /* fall through */
+ fallthrough;
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
case DRM_FORMAT_RGB565:
if (INTEL_GEN(dev_priv) >= 11)
break;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_C8:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
case DRM_FORMAT_ABGR8888:
if (is_ccs_modifier(modifier))
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_XVYU2101010:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_C8:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
return false;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case DRM_FORMAT_ABGR8888:
if (is_ccs_modifier(modifier))
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_P016:
if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
return true;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
return true;
- /* fall through */
+ fallthrough;
default:
return false;
}
switch (lane_mask) {
default:
MISSING_CASE(lane_mask);
- /* fall-through */
+ fallthrough;
case 0x1:
case 0x2:
case 0x4:
switch (err) {
default:
WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
- /* fallthrough */
+ fallthrough;
case -EIO: /* shmemfs failure from swap device */
case -EFAULT: /* purged object */
case -ENODEV: /* bad object, how did you get here! */
switch (type) {
default:
MISSING_CASE(type);
- /* fallthrough - to use PAGE_KERNEL anyway */
+ fallthrough; /* to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
default:
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
- /* fall through */
+ fallthrough;
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
case 4:
if (!IS_G4X(i915))
break;
- /* fall through */
+ fallthrough;
case 5:
g4x_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
default:
MISSING_CASE(INTEL_GEN(i915));
- /* fall-through */
+ fallthrough;
case 11:
case 12:
icl_get_stolen_reserved(i915, uncore,
GFP_KERNEL |
__GFP_NORETRY |
__GFP_NOWARN);
- /*
- * Using __get_user_pages_fast() with a read-only
- * access is questionable. A read-only page may be
- * COW-broken, and then this might end up giving
- * the wrong side of the COW..
- *
- * We may or may not care.
- */
if (pvec) {
/* defer to worker if malloc fails */
if (!i915_gem_object_is_readonly(obj))
break;
default:
MISSING_CASE(class);
- /* fall through */
+ fallthrough;
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
switch (vma->ggtt_view.type) {
default:
GEM_BUG_ON(vma->ggtt_view.type);
- /* fall through */
+ fallthrough;
case I915_GGTT_VIEW_NORMAL:
vma->pages = vma->obj->mm.pages;
return 0;
*/
default:
GEM_BUG_ON(engine->id);
- /* fallthrough */
+ fallthrough;
case RCS0:
hwsp = RENDER_HWS_PGA_GEN7;
break;
{
u8 *cfg_base = vgpu_cfg_space(vgpu);
u8 mask, new, old;
+ pci_power_t pwr;
int i = 0;
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
/* For other configuration space directly copy as it is. */
if (i < bytes)
memcpy(cfg_base + off + i, src + i, bytes - i);
+
+ if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
+ pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
+ & PCI_PM_CTRL_STATE_MASK);
+ if (pwr == PCI_D3hot)
+ vgpu->d3_entered = true;
+ gvt_dbg_core("vgpu-%d power status changed to %d\n",
+ vgpu->id, pwr);
+ }
}
/**
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
+ u8 next;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
+
+ /* PM Support */
+ vgpu->cfg_space.pmcsr_off = 0;
+ if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
+ next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
+ do {
+ if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
+ vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
+ break;
+ }
+ next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
+ } while (next);
+ }
}
/**
return create_scratch_page_tree(vgpu);
}
-static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
+
#endif /* _GVT_GTT_H_ */
struct intel_vgpu_cfg_space {
unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+ u32 pmcsr_off;
};
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
struct intel_vgpu_submission submission;
struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES];
+ /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
+ bool d3_entered;
struct dentry *debugfs;
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
- /* fall through */
+ fallthrough;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
return PTR_ERR_OR_ZERO(mm);
intel_gvt_deactivate_vgpu(vgpu);
mutex_lock(&vgpu->vgpu_lock);
+ vgpu->d3_entered = false;
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
+ vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- intel_vgpu_invalidate_ppgtt(vgpu);
+ if (engine_mask == ALL_ENGINES)
+ intel_vgpu_invalidate_ppgtt(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr) {
- intel_vgpu_reset_gtt(vgpu);
+ if(!vgpu->d3_entered) {
+ intel_vgpu_invalidate_ppgtt(vgpu);
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+ }
+ intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_reset_resource(vgpu);
}
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
- vgpu->pv_notified = false;
+ /*
+ * PCI_D0 is set before dmlr, so reset d3_entered here
+ * after done using.
+ */
+ if(vgpu->d3_entered)
+ vgpu->d3_entered = false;
+ else
+ vgpu->pv_notified = false;
}
}
return dst;
}
+static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
+ const u32 cmd)
+{
+ return desc->cmd.value == (cmd & desc->cmd.mask);
+}
+
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length)
* allowed mask/value pair given in the whitelist entry.
*/
if (reg->mask) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr);
return false;
}
- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
reg_addr);
return false;
}
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
+ if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
break;
}
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
ret = check_bbstart(cmd, offset, length, batch_length,
batch_addr, shadow_addr,
jump_whitelist);
switch (engine->id) {
default:
MISSING_CASE(engine->id);
- /* fall through */
+ fallthrough;
case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
container_of(event->pmu, typeof(*i915), pmu.base);
drm_WARN_ON(&i915->drm, event->parent);
-
- module_put(THIS_MODULE);
}
static int
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
/* Requires a mutex for sampling! */
return -ENODEV;
- /* Fall-through. */
+ fallthrough;
case I915_PMU_REQUESTED_FREQUENCY:
if (INTEL_GEN(i915) < 6)
return -ENODEV;
if (ret)
return ret;
- if (!event->parent) {
- __module_get(THIS_MODULE);
+ if (!event->parent)
event->destroy = i915_pmu_event_destroy;
- }
return 0;
}
if (!pmu->base.attr_groups)
goto err_attr;
+ pmu->base.module = THIS_MODULE;
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
#include "../i915_selftest.h"
#include "i915_random.h"
-#define SZ_8G (1ULL << 33)
-
static void __igt_dump_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block,
bool buddy)
static void igt_mm_config(u64 *size, u64 *chunk_size)
{
I915_RND_STATE(prng);
- u64 s, ms;
+ u32 s, ms;
/* Nothing fancy, just try to get an interesting bit pattern */
prandom_seed_state(&prng, i915_selftest.random_seed);
- s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
- ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
- s = max(s & -ms, ms);
+ /* Let size be a random number of pages up to 8 GB (2M pages) */
+ s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+ /* Let the chunk size be a random power of 2 less than size */
+ ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
+ /* Round size down to the chunk size */
+ s &= -ms;
- *chunk_size = ms;
- *size = s;
+ /* Convert from pages to bytes */
+ *chunk_size = (u64)ms << 12;
+ *size = (u64)s << 12;
}
static int igt_buddy_alloc_smoke(void *arg)
drm_mode_config_cleanup(&i915->drm);
out:
+ i915_params_free(&i915->params);
put_device(&i915->drm.pdev->dev);
i915->drm.pdev = NULL;
}
i915->drm.pdev = pdev;
drmm_add_final_kfree(&i915->drm, i915);
+ i915_params_copy(&i915->params, &i915_modparams);
+
intel_runtime_pm_init_early(&i915->runtime_pm);
/* Using the global GTT may ask questions about KMS users, so prepare */
if (fb->pitches[1] != fb->pitches[2])
return -EINVAL;
- /* fall-through */
+ fallthrough;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
ubo = drm_plane_state_to_ubo(state);
/* YTR is forbidden for non XBGR formats */
if (modifier & AFBC_FORMAT_MOD_YTR)
return -EINVAL;
- /* fall through */
+ fallthrough;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return MAFBC_FMT_RGBA8888;
priv->viu.vd1_addr2,
priv->viu.vd1_stride2,
priv->viu.vd1_height2);
- /* fallthrough */
+ fallthrough;
case 2:
gem = drm_fb_cma_get_gem_obj(fb, 1);
priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
priv->viu.vd1_addr1,
priv->viu.vd1_stride1,
priv->viu.vd1_height1);
- /* fallthrough */
+ fallthrough;
case 1:
gem = drm_fb_cma_get_gem_obj(fb, 0);
priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
- /* Fall through */
+ fallthrough;
case GMU_IDLE_STATE_SPTP:
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
GMU_PWR_COL_HYST);
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
- /* fall-thru */
+ fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
interlock[NV50_DISP_INTERLOCK_OVLY] |
NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
- NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+ NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE),
+
+ SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
return PUSH_KICK(push);
}
#include "disp.h"
#include "head.h"
-#include <nvif/push507c.h>
+#include <nvif/pushc37b.h>
#include <nvhw/class/clc37d.h>
dmac->push->end = dmac->push->bgn;
dmac->max = 0x1000/4 - 1;
+ /* EVO channels are affected by a HW bug where the last 12 DWORDs
+ * of the push buffer aren't able to be used safely.
+ */
+ if (disp->oclass < GV100_DISP)
+ dmac->max -= 12;
+
args->pushbuf = nvif_handle(&dmac->_push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \
PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) | \
NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2), \
- "jump 0x%08x - %s", (u32)(o), __func__); \
+ " jump 0x%08x - %s", (u32)(o), __func__); \
} while(0)
#endif
switch (venc_mode) {
default:
WARN_ON_ONCE(1);
- /* Fall-through */
+ fallthrough;
case VENC_MODE_PAL:
venc->config = &venc_config_pal_trm;
break;
if (omap_state->manually_updated)
return;
- spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_on(crtc);
+
ret = drm_crtc_vblank_get(crtc);
WARN_ON(ret != 0);
+ spin_lock_irq(&crtc->dev->event_lock);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
- /* fall through */
+ fallthrough;
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
((idx_value >> 21) & 0xF));
return -EINVAL;
}
- /* Fall through. */
+ fallthrough;
case 6:
track->cb[i].cpp = 4;
break;
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
- /* Fall through. */
+ fallthrough;
case R300_TX_FORMAT_DXT3:
case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 1;
default:
/* force to 1 pipe */
num_pipes = 1;
- /* fall through */
+ fallthrough;
case 1:
tmp = (0 << 1);
break;
return -EINVAL;
}
}
- /* fall through */
+ fallthrough;
case V_0280A0_CLEAR_ENABLE:
{
uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
break;
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
is_array = true;
- /* fall through */
+ fallthrough;
case V_038000_SQ_TEX_DIM_2D_MSAA:
array_check.nsamples = 1 << llevel;
llevel = 0;
/* get matching reference and feedback divider */
*ref_div = min(max(den/post_div, 1u), ref_div_max);
- *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+ *fb_div = max(nom * *ref_div * post_div / den, 1u);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
if (p->rdev->family >= CHIP_PALM)
return 0;
- /* fall through */
+ fallthrough;
default:
DRM_ERROR("UVD codec not supported by hardware %d!\n",
stream_type);
si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
- /* fall through */
+ fallthrough;
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
WREG32(RS_DQ_RD_RET_CONF, 0x3f);
WREG32(MC_CONFIG, 0x1f);
- /* fall through */
+ fallthrough;
case CHIP_RV670:
case CHIP_RV635:
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
case SAVAGE_PRIM_TRILIST_201:
reorder = 1;
prim = SAVAGE_PRIM_TRILIST;
- /* fall through */
+ fallthrough;
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
ret = -EINVAL;
goto done;
}
- /* fall through */
+ fallthrough;
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
switch (info->channels) {
case 8:
audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
- /* fall through */
+ fallthrough;
case 6:
audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
- /* fall through */
+ fallthrough;
case 4:
audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
- /* fall through */
+ fallthrough;
case 2:
audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
break;
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_LVDS:
is_lvds = true;
- /* Fallthrough */
+ fallthrough;
case DRM_MODE_ENCODER_DSI:
case DRM_MODE_ENCODER_NONE:
channel = 0;
/* R and B components are only 5 bits deep */
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
- /* Fall through */
+ fallthrough;
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
/* Fall through: enable dithering */
ret = sun6i_dsi_dcs_read(dsi, msg);
break;
}
- /* Else, fall through */
+ fallthrough;
default:
ret = -EINVAL;
default:
WARN_ON_ONCE(1);
- /* fallthrough */
+ fallthrough;
case 4:
max = 4;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
- /* fallthrough */
+ fallthrough;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
if (unlikely(ret != 0))
return ret;
}
- /* fall through */
+ fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break;
switch (vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(pdev, vsg);
- /* fall through */
+ fallthrough;
case dr_via_desc_pages_alloc:
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
free_page((unsigned long)vsg->desc_pages[i]);
}
kfree(vsg->desc_pages);
- /* fall through */
+ fallthrough;
case dr_via_pages_locked:
unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
(vsg->direction == DMA_FROM_DEVICE));
- /* fall through */
+ fallthrough;
case dr_via_pages_alloc:
vfree(vsg->pages);
- /* fall through */
+ fallthrough;
default:
vsg->state = dr_via_sg_init;
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
+ dma_fence_put(&out_fence->f);
virtio_gpu_notify(vgdev);
return 0;
}
sg_free_table(shmem->pages);
+ kfree(shmem->pages);
shmem->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base);
}
switch (backend_state) {
case XenbusStateReconfiguring:
- /* fall through */
case XenbusStateReconfigured:
- /* fall through */
case XenbusStateInitialised:
break;
break;
case XenbusStateUnknown:
- /* fall through */
case XenbusStateClosed:
if (xb_dev->state == XenbusStateClosed)
break;
#include <drm/drm_probe_helper.h>
#include <xen/balloon.h>
+#include <xen/xen.h>
#include "xen_drm_front.h"
#include "xen_drm_front_gem.h"
* allocate ballooned pages which will be used to map
* grant references provided by the backend
*/
- ret = alloc_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
if (ret < 0) {
DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
xen_obj->num_pages, ret);
} else {
if (xen_obj->pages) {
if (xen_obj->be_alloc) {
- free_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ xen_free_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
gem_free_pages_array(xen_obj);
} else {
drm_gem_put_pages(&xen_obj->base,
switch (fmt) {
default:
WARN_ON(1);
- /* fall-through */
+ fallthrough;
case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
case MEDIA_BUS_FMT_RGB565_1X16:
}
/*
+ * Compute the size of a report.
+ */
+static size_t hid_compute_report_size(struct hid_report *report)
+{
+ if (report->size)
+ return ((report->size - 1) >> 3) + 1;
+
+ return 0;
+}
+
+/*
* Create a report. 'data' has to be allocated using
* hid_alloc_report_buf() so that it has proper size.
*/
if (report->id > 0)
*data++ = report->id;
- memset(data, 0, ((report->size - 1) >> 3) + 1);
+ memset(data, 0, hid_compute_report_size(report));
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->device, report->field[n], data);
}
csize--;
}
- rsize = ((report->size - 1) >> 3) + 1;
+ rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
};
module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644);
-static struct hid_device_id cougar_id_table[] = {
+static const struct hid_device_id cougar_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
+ input_free_device(input);
return ret;
}
if (ret) {
hid_err(hdev, "Failed to register elan input device: %d\n",
ret);
+ input_mt_destroy_slots(input);
input_free_device(input);
return ret;
}
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
+#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
+#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
+#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
+#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
#define USB_DEVICE_ID_SAITEK_X52 0x075c
+#define USB_DEVICE_ID_SAITEK_X52_2 0x0255
+#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
}
mapped:
+ /* Mapping failed, bail out */
+ if (!bit)
+ return;
+
if (device->driver->input_mapped &&
device->driver->input_mapped(device, hidinput, field, usage,
&bit, &max) < 0) {
* but it does have a separate power-on (reset) value.
*/
g15->leds[i].cdev.name = "g15::power_on_backlight_val";
- /* fall through */
+ fallthrough;
case LG_G15_KBD_BRIGHTNESS:
g15->leds[i].cdev.brightness_set_blocking =
lg_g510_kbd_led_set;
workitem.type = WORKITEM_TYPE_EMPTY;
break;
}
- /* fall-through */
+ fallthrough;
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
workitem.quad_id_msb =
dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB];
return rdesc;
}
-static struct hid_device_id macally_id_table[] = {
+static const struct hid_device_id macally_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD) },
{ }
{
switch (usage->hid & HID_USAGE_PAGE) {
case 0xff070000:
- /* fall-through */
case HID_UP_DIGITIZER:
/* ignore those axis */
return -1;
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- /* fall-through */
case HID_GD_Y:
- /* fall-through */
case HID_GD_RFKILL_BTN:
/* ignore those axis */
return -1;
.driver_data = MS_SURFACE_DIAL },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
.driver_data = MS_QUIRK_FF },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS),
+ .driver_data = MS_QUIRK_FF },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+ if (!*bit)
+ return -1;
input_set_capability(hi->input, EV_KEY, code);
return 1;
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
switch (report->id) {
case RMI_READ_DATA_REPORT_ID:
- /* fall-through */
case RMI_ATTN_REPORT_ID:
return;
}
case kone_mouse_event_switch_profile:
kone->actual_dpi = kone->profiles[event->value - 1].
startup_dpi;
- /* fall through */
+ fallthrough;
case kone_mouse_event_osd_profile:
kone->actual_profile = event->value;
break;
}
break;
}
- /* FALL THROUGH */
+ fallthrough;
case VID_PID(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET):
case VID_PID(USB_VENDOR_ID_HUION,
case WIIMOTE_EXT_GUITAR:
return sprintf(buf, "guitar\n");
case WIIMOTE_EXT_UNKNOWN:
- /* fallthrough */
default:
return sprintf(buf, "unknown\n");
}
case WIIMOTE_DEV_PENDING:
return sprintf(buf, "pending\n");
case WIIMOTE_DEV_UNKNOWN:
- /* fallthrough */
default:
return sprintf(buf, "unknown\n");
}
dev_err(&client->dev, "failed to change power setting.\n");
set_pwr_exit:
+
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests.
+ * According to Goodix Windows even waits 60 ms after (other?)
+ * PWR_ON requests. Testing has confirmed that several devices
+ * will not work properly without a delay after a PWR_ON request.
+ */
+ if (!ret && power_state == I2C_HID_PWR_ON)
+ msleep(60);
+
return ret;
}
if (ret)
goto out_unlock;
- /*
- * The HID over I2C specification states that if a DEVICE needs time
- * after the PWR_ON request, it should utilise CLOCK stretching.
- * However, it has been observered that the Windows driver provides a
- * 1ms sleep between the PWR_ON and RESET requests and that some devices
- * rely on this.
- */
- usleep_range(1000, 5000);
-
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/string.h>
-#include <linux/timekeeping.h>
#include <linux/usb.h>
set_bit(HID_NO_BANDWIDTH, &usbhid->iofl);
} else {
clear_bit(HID_NO_BANDWIDTH, &usbhid->iofl);
-
- if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
- /*
- * In case events are generated while nobody was
- * listening, some are released when the device
- * is re-opened. Wait 50 msec for the queue to
- * empty before allowing events to go through
- * hid.
- */
- usbhid->input_start_time =
- ktime_add_ms(ktime_get_coarse(), 50);
- }
}
}
spin_unlock_irqrestore(&usbhid->lock, flags);
if (!test_bit(HID_OPENED, &usbhid->iofl))
break;
usbhid_mark_busy(usbhid);
- if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
- if (ktime_before(ktime_get_coarse(),
- usbhid->input_start_time))
- break;
- clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
+ if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
+ hid_input_report(urb->context, HID_INPUT_REPORT,
+ urb->transfer_buffer,
+ urb->actual_length, 1);
+ /*
+ * autosuspend refused while keys are pressed
+ * because most keyboards don't wake up when
+ * a key is released
+ */
+ if (hid_check_keys_pressed(hid))
+ set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
+ else
+ clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
}
- hid_input_report(urb->context, HID_INPUT_REPORT,
- urb->transfer_buffer, urb->actual_length, 1);
- /*
- * autosuspend refused while keys are pressed
- * because most keyboards don't wake up when
- * a key is released
- */
- if (hid_check_keys_pressed(hid))
- set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
- else
- clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
break;
case -EPIPE: /* stall */
usbhid_mark_busy(usbhid);
usb_autopm_put_interface(usbhid->intf);
+ /*
+ * In case events are generated while nobody was listening,
+ * some are released when the device is re-opened.
+ * Wait 50 msec for the queue to empty before allowing events
+ * to go through hid.
+ */
+ if (res == 0)
+ msleep(50);
+
+ clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
+
Done:
mutex_unlock(&usbhid->mutex);
return res;
switch (cmd) {
case HIDIOCGUSAGE:
+ if (uref->usage_index >= field->report_count)
+ goto inval;
uref->value = field->value[uref->usage_index];
if (copy_to_user(user_arg, uref, sizeof(*uref)))
goto fault;
goto goodreturn;
case HIDIOCSUSAGE:
+ if (uref->usage_index >= field->report_count)
+ goto inval;
field->value[uref->usage_index] = uref->value;
goto goodreturn;
break;
case HIDIOCGUCODE:
- /* fall through */
case HIDIOCGUSAGE:
case HIDIOCSUSAGE:
case HIDIOCGUSAGES:
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/timer.h>
struct mutex mutex; /* start/stop/open/close */
spinlock_t lock; /* fifo spinlock */
unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
- ktime_t input_start_time; /* When to start handling input */
struct timer_list io_retry; /* Retry timer */
unsigned long stop_retry; /* Time to give up, in jiffies */
unsigned int retry_delay; /* Delay length in ms */
case 2: /* Mouse with wheel */
input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
- /* fall through */
+ fallthrough;
case 3: /* Mouse without wheel */
wacom->tool[0] = BTN_TOOL_MOUSE;
case 0x04:
wacom_intuos_bt_process_data(wacom, data + i);
i += 10;
- /* fall through */
+ fallthrough;
case 0x03:
wacom_intuos_bt_process_data(wacom, data + i);
i += 10;
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, features->numbered_buttons,
value, i);
- /* fall through*/
+ fallthrough;
default:
do_report = true;
break;
switch (features->type) {
case GRAPHIRE_BT:
__clear_bit(ABS_MISC, input_dev->absbit);
- /* fall through */
+ fallthrough;
case WACOM_MO:
case WACOM_G4:
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
features->distance_fuzz, 0);
- /* fall through */
+ fallthrough;
case GRAPHIRE:
input_set_capability(input_dev, EV_REL, REL_WHEEL);
case INTUOS4S:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_abs_set_res(input_dev, ABS_Z, 287);
- /* fall through */
+ fallthrough;
case INTUOS:
wacom_setup_intuos(wacom_wac);
case TABLETPC:
case TABLETPCE:
__clear_bit(ABS_MISC, input_dev->absbit);
- /* fall through */
+ fallthrough;
case DTUS:
case DTUSX:
case PTU:
__set_bit(BTN_STYLUS2, input_dev->keybit);
- /* fall through */
+ fallthrough;
case PENPARTNER:
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
- /* fall through */
+ fallthrough;
case INTUOS5:
case INTUOS5L:
input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- /* fall through */
+ fallthrough;
case WACOM_27QHDT:
if (wacom_wac->shared->touch->product == 0x32C ||
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
wacom_wac->shared->has_mute_touch_switch = true;
}
- /* fall through */
+ fallthrough;
case MTSCREEN:
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
- /*fall through */
+ fallthrough;
case TABLETPC:
case TABLETPCE:
case INTUOSHT2:
input_dev->evbit[0] |= BIT_MASK(EV_SW);
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
- /* fall through */
+ fallthrough;
case BAMBOO_PT:
case BAMBOO_TOUCH:
__set_bit(KEY_BUTTONCONFIG, input_dev->keybit);
__set_bit(KEY_INFO, input_dev->keybit);
- /* fall through */
+ fallthrough;
case WACOM_21UX2:
case WACOM_BEE:
case INTUOS3:
case INTUOS3L:
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
- /* fall through */
+ fallthrough;
case INTUOS3S:
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
* ID_INPUT_TABLET to be set.
*/
__set_bit(BTN_STYLUS, input_dev->keybit);
- /* fall through */
+ fallthrough;
case INTUOS4:
case INTUOS4L:
/* CMT speech workaround */
if (atomic_read(&ssi->tx_usecnt))
break;
- /* Else, fall through */
+ fallthrough;
case RECEIVING:
mod_timer(&ssi->keep_alive, jiffies +
msecs_to_jiffies(SSIP_KATOUT));
case SEND_READY:
if (atomic_read(&ssi->tx_usecnt) == 0)
break;
- /* Fall through */
+ fallthrough;
/*
* Workaround for cmt-speech in that case
* we relay on audio timers.
case ACTIVE:
dev_err(&cl->device, "Boot info req on active state\n");
ssip_error(cl);
- /* Fall through */
+ fallthrough;
case INIT:
case HANDSHAKE:
spin_lock_bh(&ssi->lock);
break;
case ABORT_RATE_CHANGE:
dev_dbg(&ssi->device, "abort rate change\n");
- /* Fall through */
+ fallthrough;
case POST_RATE_CHANGE:
dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n",
clk_data->old_rate, clk_data->new_rate);
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
- /* fallthrough */
+ fallthrough;
case KVP_OP_GET_IP_INFO:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
spinlock_t lock;
} host_ts;
-static struct timespec64 hv_get_adj_host_time(void)
+static inline u64 reftime_to_ns(u64 reftime)
{
- struct timespec64 ts;
- u64 newtime, reftime;
+ return (reftime - WLTIMEDELTA) * 100;
+}
+
+/*
+ * Hard coded threshold for host timesync delay: 600 seconds
+ */
+static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
+
+static int hv_get_adj_host_time(struct timespec64 *ts)
+{
+ u64 newtime, reftime, timediff_adj;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&host_ts.lock, flags);
reftime = hv_read_reference_counter();
- newtime = host_ts.host_time + (reftime - host_ts.ref_time);
- ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+
+ /*
+ * We need to let the caller know that last update from host
+ * is older than the max allowable threshold. clock_gettime()
+ * and PTP ioctl do not have a documented error that we could
+ * return for this specific case. Use ESTALE to report this.
+ */
+ timediff_adj = reftime - host_ts.ref_time;
+ if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
+ pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
+ (timediff_adj * 100));
+ ret = -ESTALE;
+ }
+
+ newtime = host_ts.host_time + timediff_adj;
+ *ts = ns_to_timespec64(reftime_to_ns(newtime));
spin_unlock_irqrestore(&host_ts.lock, flags);
- return ts;
+ return ret;
}
static void hv_set_host_time(struct work_struct *work)
{
- struct timespec64 ts = hv_get_adj_host_time();
- do_settimeofday64(&ts);
+ struct timespec64 ts;
+
+ if (!hv_get_adj_host_time(&ts))
+ do_settimeofday64(&ts);
}
/*
struct ictimesync_ref_data *refdata;
u8 *time_txf_buf = util_timesynch.recv_buffer;
- vmbus_recvpacket(channel, time_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ /*
+ * Drain the ring buffer and use the last packet to update
+ * host_ts
+ */
+ while (1) {
+ int ret = vmbus_recvpacket(channel, time_txf_buf,
+ HV_HYP_PAGE_SIZE, &recvlen,
+ &requestid);
+ if (ret) {
+ pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
+ ret);
+ break;
+ }
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
- *ts = hv_get_adj_host_time();
-
- return 0;
+ return hv_get_adj_host_time(ts);
}
static struct ptp_clock_info ptp_hyperv_info = {
case 3:
return "+1.5V";
}
- /* fall through */
+ fallthrough;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return "+12V3";
case 3:
return "+1.5";
}
- /* fall through */
+ fallthrough;
case 11:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
ADT7462_PIN28_VOLT &&
case 3:
return 7800;
}
- /* fall through */
+ fallthrough;
case 2:
if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT))
return 62500;
case 3:
return 7800;
}
- /* fall through */
+ fallthrough;
case 11:
case 12:
if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT ==
}
ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
+ if (ret)
+ goto out;
/* newer macbooks report a single 10-bit bigendian value */
if (data_length == 10) {
left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
goto out;
}
left = buffer[2];
+
+ ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
if (ret)
goto out;
- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
right = buffer[2];
out:
to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
- speed = ((buffer[0] << 8 | buffer[1]) >> 2);
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
+
+ speed = ((buffer[0] << 8 | buffer[1]) >> 2);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
}
static ssize_t applesmc_store_fan_speed(struct device *dev,
u8 buffer[2];
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
+
+ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
}
static ssize_t applesmc_store_fan_manual(struct device *dev,
return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
- val = (buffer[0] << 8 | buffer[1]);
if (ret)
goto out;
+ val = (buffer[0] << 8 | buffer[1]);
+
if (input)
val = val | (0x01 << to_index(attr));
else
u32 count;
ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
- count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
- ((u32)buffer[2]<<8) + buffer[3];
-
if (ret)
return ret;
- else
- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
+
+ count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
+ ((u32)buffer[2]<<8) + buffer[3];
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
}
static ssize_t applesmc_key_at_index_read_show(struct device *dev,
switch (id->driver_data) {
case emc1404:
data->groups[2] = &emc1404_group;
- /* fall through */
+ fallthrough;
case emc1403:
data->groups[1] = &emc1403_group;
- /* fall through */
+ fallthrough;
case emc1402:
data->groups[0] = &emc1402_group;
}
data->pwm_auto_point_pwm[nr][0] =
f71882fg_read8(data,
F71882FG_REG_POINT_PWM(nr, 0));
- /* Fall through */
+ fallthrough;
case f71862fg:
data->pwm_auto_point_pwm[nr][1] =
f71882fg_read8(data,
case f71869a:
/* These always have signed auto point temps */
data->auto_point_temp_signed = 1;
- /* Fall through - to select correct fan/pwm reg bank! */
+ fallthrough; /* to select correct fan/pwm reg bank! */
case f71889fg:
case f71889ed:
case f71889a:
case mode_temperature:
if (tmp > 0x8000)
tmp -= 0xffff;
+ tmp *= 100; /* convert to millidegrees celsius */
break;
case mode_voltage_raw:
tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION));
val &= 0x1f;
if (val == 0x1f)
return 0;
- /* fall through */
+ fallthrough;
case 25: /* AMD NPT 0Fh */
val &= 0x3f;
return (val < 32) ? 1550 - 25 * val
case 84: /* VRM 8.4 */
val &= 0x0f;
- /* fall through */
+ fallthrough;
case 82: /* VRM 8.2 */
val &= 0x1f;
return val == 0x1f ? 0 :
if (ret)
return ret;
- /* fall through */
+ fallthrough;
case hwmon_curr_crit:
case hwmon_curr_max:
if (!resistance_uo)
case thermal_cruise:
nct6775_write_value(data, data->REG_TARGET[nr],
data->target_temp[nr]);
- /* fall through */
+ fallthrough;
default:
reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
if (ret < 0)
return ret;
cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
- if (cnt == 0x1fff)
+ if (cnt == 0 || cnt == 0x1fff)
rpm = 0;
else
rpm = 1350000 / cnt;
if (ret < 0)
return ret;
cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
- if (cnt == 0x1fff)
+ if (cnt == 0 || cnt == 0x1fff)
rpm = 0;
else
rpm = 1350000 / cnt;
switch (sensors->freq.version) {
case 2:
show_freq = occ_show_freq_2;
- /* fall through */
+ fallthrough;
case 1:
num_attrs += (sensors->freq.num_sensors * 2);
break;
switch (sensors->power.version) {
case 2:
show_power = occ_show_power_2;
- /* fall through */
+ fallthrough;
case 1:
num_attrs += (sensors->power.num_sensors * 4);
break;
break;
case 3:
show_caps = occ_show_caps_3;
- /* fall through */
+ fallthrough;
case 2:
num_attrs += (sensors->caps.num_sensors * 8);
break;
raa_dmpvr1_2rail,
raa_dmpvr2_1rail,
raa_dmpvr2_2rail,
+ raa_dmpvr2_2rail_nontc,
raa_dmpvr2_3rail,
raa_dmpvr2_hv,
};
info->pages = 1;
info->read_word_data = raa_dmpvr2_read_word_data;
break;
+ case raa_dmpvr2_2rail_nontc:
+ info->func[0] &= ~PMBUS_HAVE_TEMP;
+ info->func[1] &= ~PMBUS_HAVE_TEMP;
+ fallthrough;
case raa_dmpvr2_2rail:
info->pages = 2;
info->read_word_data = raa_dmpvr2_read_word_data;
{"raa228000", raa_dmpvr2_hv},
{"raa228004", raa_dmpvr2_hv},
{"raa228006", raa_dmpvr2_hv},
- {"raa228228", raa_dmpvr2_2rail},
+ {"raa228228", raa_dmpvr2_2rail_nontc},
{"raa229001", raa_dmpvr2_2rail},
{"raa229004", raa_dmpvr2_2rail},
{}
case W83781D_DEFAULT_BETA:
dev_warn(dev, "Sensor type %d is deprecated, please use 4 "
"instead\n", W83781D_DEFAULT_BETA);
- /* fall through */
+ fallthrough;
case 4: /* thermistor */
tmp = w83627hf_read_value(data, W83781D_REG_SCFG1);
w83627hf_write_value(data, W83781D_REG_SCFG1,
dev_warn(dev,
"Sensor type %d is deprecated, please use 4 instead\n",
W83781D_DEFAULT_BETA);
- /* fall through */
+ fallthrough;
case 4: /* thermistor */
tmp = w83781d_read_value(data, W83781D_REG_SCFG1);
w83781d_write_value(data, W83781D_REG_SCFG1,
if (temp_chan >= 4)
break;
data->temp_mode |= 1 << temp_chan;
- /* fall through */
+ fallthrough;
case 0x3: /* Thermistor */
data->has_temp |= 1 << temp_chan;
break;
switch (mode) {
case EDDEVID_IMPL_FULL:
drvdata->edvidsr_present = true;
- /* Fall through */
+ fallthrough;
case EDDEVID_IMPL_EDPCSR_EDCIDSR:
drvdata->edcidsr_present = true;
- /* Fall through */
+ fallthrough;
case EDDEVID_IMPL_EDPCSR:
/*
* In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
return NOTIFY_BAD;
break;
case CPU_PM_EXIT:
- /* fallthrough */
case CPU_PM_ENTER_FAILED:
if (drvdata->state_needs_restore)
etm4_cpu_restore(drvdata);
*/
switch (drvdata->memwidth) {
case TMC_MEM_INTF_WIDTH_32BITS:
- /* fallthrough */
case TMC_MEM_INTF_WIDTH_64BITS:
- /* fallthrough */
case TMC_MEM_INTF_WIDTH_128BITS:
mask = GENMASK(31, 4);
break;
/* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
case STP_PACKET_GERR:
reg += 4;
- /* fall through */
+ fallthrough;
case STP_PACKET_XSYNC:
reg += 8;
- /* fall through */
+ fallthrough;
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
/* mark the last byte */
if (!process_call && (i == msg->len - 1))
- val |= 1 << M_TX_WR_STATUS_SHIFT;
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
*/
addr = i2c_8bit_addr_from_msg(msg);
/* mark it the last byte out */
- val = addr | (1 << M_TX_WR_STATUS_SHIFT);
+ val = addr | BIT(M_TX_WR_STATUS_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev);
break;
case OMAP_I2C_SCHEME_1:
- /* FALLTHROUGH */
default:
omap->regs = (u8 *)reg_map_ip_v2;
rev = (rev << 16) |
case I2C_SMBUS_BYTE:
req.buffer_ra = cpu_to_be64(__pa(&data->byte));
req.size = cpu_to_be32(1);
- /* Fall through */
+ fallthrough;
case I2C_SMBUS_QUICK:
req.type = (read_write == I2C_SMBUS_READ) ?
OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
/* master sent stop */
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
-const struct acpi_device_id *
-i2c_acpi_match_device(const struct acpi_device_id *matches,
- struct i2c_client *client)
-{
- if (!(client && matches))
- return NULL;
-
- return acpi_match_device(matches, &client->dev);
-}
-
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
/*
* These Silead touchscreen controllers only work at 400KHz, for
* or ACPI ID table is supplied for the probing device.
*/
if (!driver->id_table &&
- !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
+ !acpi_driver_match_device(dev, dev->driver) &&
!i2c_of_match_device(dev->driver->of_match_table, client)) {
status = -ENODEV;
goto put_sync_adapter;
}
#ifdef CONFIG_ACPI
-const struct acpi_device_id *
-i2c_acpi_match_device(const struct acpi_device_id *matches,
- struct i2c_client *client);
void i2c_acpi_register_devices(struct i2c_adapter *adap);
int i2c_acpi_get_irq(struct i2c_client *client);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
-static inline const struct acpi_device_id *
-i2c_acpi_match_device(const struct acpi_device_id *matches,
- struct i2c_client *client)
-{
- return NULL;
-}
static inline int i2c_acpi_get_irq(struct i2c_client *client)
{
ret = dw_i2c_clk_cfg(master);
if (ret)
return ret;
- /* fall through */
+ fallthrough;
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
if (!HPT370_ALLOW_ATA100_5 ||
check_in_drive_list(drive, bad_ata100_5))
return ATA_UDMA4;
- /* fall through */
+ fallthrough;
case HPT372 :
case HPT372A:
case HPT372N:
case HPT374 :
if (ata_id_is_sata(drive->id))
mask &= ~0x0e;
- /* fall through */
+ fallthrough;
default:
return mask;
}
case HPT374 :
if (ata_id_is_sata(drive->id))
return 0x00;
- /* fall through */
+ fallthrough;
default:
return 0x07;
}
*/
if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
break;
- /* fall-through */
+ fallthrough;
case DATA_PROTECT:
/*
* No point in retrying after an illegal request or data
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
expiry = ide_cd_expiry;
- /*FALLTHRU*/
+ fallthrough;
default:
timeout = ATAPI_WAIT_PC;
break;
* (maintains previous driver behaviour)
*/
break;
- /* fall through */
+ fallthrough;
case CAPACITY_CURRENT:
/* Normal Zip/LS-120 disks */
if (memcmp(cap_desc, &floppy->cap_desc, 8))
}
/* Early cdrom models used zero */
type = ide_cdrom;
- /* fall through */
+ fallthrough;
case ide_cdrom:
drive->dev_flags |= IDE_DFLAG_REMOVABLE;
#ifdef CONFIG_PPC
return pre_task_out_intr(drive, cmd);
}
handler = task_pio_intr;
- /* fall through */
+ fallthrough;
case ATA_PROT_NODATA:
if (handler == NULL)
handler = task_no_data_intr;
hwif->expiry = dma_ops->dma_timer_expiry;
ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
dma_ops->dma_start(drive);
- /* fall through */
+ fallthrough;
default:
return ide_started;
}
goto abort;
}
cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_OUT:
cmd.protocol = ATA_PROT_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_OUT_DMAQ:
case TASKFILE_OUT_DMA:
cmd.tf_flags |= IDE_TFLAG_WRITE;
goto abort;
}
cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_IN:
cmd.protocol = ATA_PROT_PIO;
- /* fall through */
+ fallthrough;
case TASKFILE_IN_DMAQ:
case TASKFILE_IN_DMA:
nsect = taskin / SECTOR_SIZE;
pci_read_config_byte(dev, 0x09, ®);
if ((reg & 0x0f) != 0x00)
pci_write_config_byte(dev, 0x09, reg&0xf0);
- /* fall through */
+ fallthrough;
case ATA_16:
/* force per drive recovery and active timings
needed on ATA_33 and below chips */
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
- * Set this flag for states where the HW flushes the TLB for us
- * and so we don't need cross-calls to keep it consistent.
- * If this flag is set, SW flushes the TLB, so even if the
- * HW doesn't do the flushing, this flag is safe to use.
- */
-#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
-
-/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
bool tick;
- int cpu = smp_processor_id();
-
- /*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
- */
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
if (!static_cpu_has(X86_FEATURE_ARAT)) {
/*
case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
- /* fall through */
+ fallthrough;
default:
ret = -ENODEV;
goto disable_regulators;
delay_max = 10000; /* large range optimises sleepmode */
break;
}
- /* Fall through */
+ fallthrough;
default:
ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA;
break;
break;
case CPCAP_ADC_BATTI_PI17:
index = req->bank_index;
- /* fallthrough */
+ fallthrough;
default:
req->result += conv_tbl[index].cal_offset;
req->result += conv_tbl[index].align_offset;
case SPS30_READ_AUTO_CLEANING_PERIOD:
buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff);
- /* fall through */
+ fallthrough;
case SPS30_READ_DATA_READY_FLAG:
case SPS30_READ_DATA:
case SPS30_READ_SERIAL:
break;
case CH_MODE_UNUSED:
- /* fall-through */
default:
switch (st->channel_offstate[i]) {
case CH_OFFSTATE_OUT_TRISTATE:
break;
case CH_OFFSTATE_PULLDOWN:
- /* fall-through */
default:
pulldown |= BIT(i);
break;
case IIO_VAL_INT:
/*
* Convert integer scale to fractional scale by
- * setting the denominator (val2) to one, and...
+ * setting the denominator (val2) to one...
*/
*val2 = 1;
ret = IIO_VAL_FRACTIONAL;
- /* fall through */
+ /* ...and fall through. Say it again for GCC. */
+ fallthrough;
case IIO_VAL_FRACTIONAL:
*val *= regulator_get_voltage(dac->vref) / 1000;
*val2 *= dac->max_ohms;
switch (measurements) {
case 3:
MAX30102_COPY_DATA(2);
- /* fall through */
+ fallthrough;
case 2:
MAX30102_COPY_DATA(1);
- /* fall through */
+ fallthrough;
case 1:
MAX30102_COPY_DATA(0);
break;
adis->tx[9] = (value >> 24) & 0xff;
adis->tx[6] = ADIS_WRITE_REG(reg + 2);
adis->tx[7] = (value >> 16) & 0xff;
- /* fall through */
+ fallthrough;
case 2:
adis->tx[4] = ADIS_WRITE_REG(reg + 1);
adis->tx[5] = (value >> 8) & 0xff;
- /* fall through */
+ fallthrough;
case 1:
adis->tx[2] = ADIS_WRITE_REG(reg);
adis->tx[3] = value & 0xff;
adis->tx[2] = ADIS_READ_REG(reg + 2);
adis->tx[3] = 0;
spi_message_add_tail(&xfers[1], &msg);
- /* fall through */
+ fallthrough;
case 2:
adis->tx[4] = ADIS_READ_REG(reg);
adis->tx[5] = 0;
return scnprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
- /* fall through */
+ fallthrough;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
break;
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
- /* fall through */
+ fallthrough;
case IIO_VAL_INT_PLUS_MICRO:
fract_mult = 100000;
break;
SI1145_LED_CURRENT_45mA);
if (ret < 0)
return ret;
- /* fallthrough */
+ fallthrough;
case 2:
ret = i2c_smbus_write_byte_data(client,
SI1145_REG_PS_LED21,
switch (whoami) {
case AK8974_WHOAMI_VALUE_AMI306:
name = "ami306";
- /* fall-through */
+ fallthrough;
case AK8974_WHOAMI_VALUE_AMI305:
ret = regmap_read(ak8974->map, AMI305_VER, &fw);
if (ret)
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* fall through */
+ fallthrough;
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
break;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* fall through */
+ fallthrough;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
cm_enter_timewait(cm_id_priv);
cm_enter_timewait(cm_id_priv);
break;
}
- /* fall through */
+ fallthrough;
default:
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
- /* fall through */
+ fallthrough;
default:
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
case IB_CM_MRA_REP_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
- /* fall through */
+ fallthrough;
default:
pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
- /* fall through */
+ fallthrough;
case IB_QPT_XRC_TGT:
*qp_attr_mask |= IB_QP_TIMEOUT;
qp_attr->timeout = cm_id_priv->av.timeout;
event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case IB_CM_DREQ_ERROR:
- event.status = -ETIMEDOUT; /* fall through */
+ event.status = -ETIMEDOUT;
+ fallthrough;
case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
ret = addr_init();
if (ret) {
- pr_warn("Could't init IB address resolution\n");
+ pr_warn("Couldn't init IB address resolution\n");
goto err_ibnl;
}
switch (ctx->type) {
case RDMA_RW_SIG_MR:
case RDMA_RW_MR:
- /* fallthrough */
for (i = 0; i < ctx->nr_ops; i++) {
rdma_rw_update_lkey(&ctx->reg[i],
ctx->reg[i].wr.wr.opcode !=
case 2:
ib_copy_path_rec_to_user(&resp->ib_route[1],
&route->path_rec[1]);
- /* fall through */
+ fallthrough;
case 1:
ib_copy_path_rec_to_user(&resp->ib_route[0],
&route->path_rec[0]);
case 2:
ib_copy_path_rec_to_user(&resp->ib_route[1],
&route->path_rec[1]);
- /* fall through */
+ fallthrough;
case 1:
ib_copy_path_rec_to_user(&resp->ib_route[0],
&route->path_rec[0]);
return -EOPNOTSUPP;
e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
- /* fall through */
+ fallthrough;
case UVERBS_ATTR_TYPE_PTR_IN:
/* Ensure that any data provided by userspace beyond the known
* struct is zero. Userspace that knows how to use some future
!uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
return -EOPNOTSUPP;
- /* fall through */
+ fallthrough;
case UVERBS_ATTR_TYPE_PTR_OUT:
if (uattr->len < val_spec->u.ptr.min_len ||
(!val_spec->zero_trailing &&
default:
break;
}
- /* fall through */
+ fallthrough;
case IB_WR_SEND_WITH_INV:
rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
break;
struct ib_event event;
unsigned int flags;
- if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+ rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
break;
}
- /* fall thru */
+ fallthrough;
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
{
case MORIBUND:
case CLOSING:
stop_ep_timer(ep);
- /*FALLTHROUGH*/
+ fallthrough;
case FPDU_MODE:
if (ep->com.qp && ep->com.qp->srq) {
srqidx = ABORT_RSS_SRQIDX_G(
send_fw_act_open_req(ep, atid);
return;
}
- /* fall through */
+ fallthrough;
case FW_EADDRINUSE:
set_bit(ACT_RETRY_INUSE, &ep->com.history);
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
break;
}
fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
- /*FALLTHROUGH*/
+ fallthrough;
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
fallthrough;
case 1:
*dest++ = *src++;
- /* fall through */
}
}
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
switch (prev->wr.opcode) {
case IB_WR_TID_RDMA_WRITE:
req = wqe_to_tid_req(prev);
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
-#define HNS_ROCE_RESERVED_SGE 1
-
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
- dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
+ dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
ext_sdb_alept, ext_sdb_alful);
}
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
- if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
if (wr->num_sge < hr_qp->rq.max_gs) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
- dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
}
if (wr->num_sge < srq->max_gs) {
- dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
- dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].len = 0;
+ dseg[i].lkey = cpu_to_le32(0x100);
dseg[i].addr = 0;
}
attr->srq_limit = limit_wl;
attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
+ attr->max_sge = srq->max_gs;
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x0
-#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
-
+#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
return -EINVAL;
}
- hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
- HNS_ROCE_RESERVED_SGE);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
+ cap->max_recv_sge = hr_qp->rq.max_gs;
return 0;
}
spin_lock_init(&srq->lock);
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
+ srq->max_gs = init_attr->attr.max_sge;
if (udata) {
ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
case I40IW_CM_STATE_FIN_WAIT1:
case I40IW_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
- /* fall through */
+ fallthrough;
case I40IW_CM_STATE_TIME_WAIT:
cm_node->state = I40IW_CM_STATE_CLOSED;
i40iw_rem_ref_cm_node(cm_node);
info->out_rdrsp = true;
break;
case I40IW_AE_SOURCE_RSVD:
- /* fallthrough */
default:
break;
}
LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
set_64bit_val(wqe, 56, info->entry[2].data);
- /* fallthrough */
+ fallthrough;
case 2:
set_64bit_val(wqe, 32,
(LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
set_64bit_val(wqe, 40, info->entry[1].data);
- /* fallthrough */
+ fallthrough;
case 1:
set_64bit_val(wqe, 0,
LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
i40iw_cm_disconn(iwqp);
break;
case I40IW_AE_BAD_CLOSE:
- /* fall through */
case I40IW_AE_RESET_SENT:
i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
i40iw_cm_disconn(iwqp);
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
- /* fall through */
+ fallthrough;
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
iwdev->iw_status = 0;
i40iw_port_ibevent(iwdev);
i40iw_destroy_rdma_device(iwdev->iwibdev);
- /* fallthrough */
+ fallthrough;
case IP_ADDR_REGISTERED:
if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
- /* fallthrough */
- /* fallthrough */
+ fallthrough;
case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
- /* fallthrough */
+ fallthrough;
case CEQ_CREATED:
i40iw_dele_ceqs(iwdev);
- /* fallthrough */
+ fallthrough;
case AEQ_CREATED:
i40iw_destroy_aeq(iwdev);
- /* fallthrough */
+ fallthrough;
case IEQ_CREATED:
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
- /* fallthrough */
+ fallthrough;
case ILQ_CREATED:
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
- /* fallthrough */
+ fallthrough;
case CCQ_CREATED:
i40iw_destroy_ccq(iwdev);
- /* fallthrough */
+ fallthrough;
case HMC_OBJS_CREATED:
i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
- /* fallthrough */
+ fallthrough;
case CQP_CREATED:
i40iw_destroy_cqp(iwdev, true);
- /* fallthrough */
+ fallthrough;
case INITIAL_STATE:
i40iw_cleanup_cm_core(&iwdev->cm_core);
if (iwdev->vsi.pestat) {
i40iw_del_init_mem(iwdev);
break;
case INVALID_STATE:
- /* fallthrough */
default:
i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
break;
switch (rsrc->completion) {
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
- /* fall through */
+ fallthrough;
case PUDA_QP_CREATED:
if (!reset)
i40iw_puda_free_qp(rsrc);
i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
- /* fallthrough */
+ fallthrough;
case PUDA_CQ_CREATED:
if (!reset)
i40iw_puda_free_cq(rsrc);
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
- /* Fall through */
+ fallthrough;
case NETDEV_UP:
- /* Fall through */
case NETDEV_CHANGEADDR:
/* Just skip if no need to handle ARP cache */
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
- /* Fall through */
+ fallthrough;
case NETDEV_UP:
- /* Fall through */
case NETDEV_CHANGEADDR:
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
switch (event) {
case NETDEV_DOWN:
iwdev->iw_status = 0;
- /* Fall through */
+ fallthrough;
case NETDEV_UP:
i40iw_port_ibevent(iwdev);
break;
case I40IW_QP_STATE_RTS:
if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
i40iw_send_reset(iwqp->cm_node);
- /* fall through */
+ fallthrough;
case I40IW_QP_STATE_IDLE:
case I40IW_QP_STATE_TERMINATE:
case I40IW_QP_STATE_CLOSING:
switch (ib_wr->opcode) {
case IB_WR_SEND:
- /* fall-through */
case IB_WR_SEND_WITH_INV:
if (ib_wr->opcode == IB_WR_SEND) {
if (ib_wr->send_flags & IB_SEND_SOLICITED)
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
- /* fall-through*/
+ fallthrough;
case IB_WR_RDMA_READ:
if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
err = -EINVAL;
switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
case MLX4_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX4_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX4_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX4_OPCODE_SEND:
case MLX4_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
switch (sa_mad->mad_hdr.method) {
case IB_MGMT_METHOD_SET:
may_create = 1;
- /* fall through */
+ fallthrough;
case IB_SA_METHOD_DELETE:
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
pd = to_mxrcd(init_attr->xrcd)->pd;
xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
- /* fall through */
+ fallthrough;
case IB_QPT_XRC_INI:
if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
return ERR_PTR(-ENOSYS);
init_attr->recv_cq = init_attr->send_cq;
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- /* fall through */
+ fallthrough;
case IB_QPT_UD:
{
err = create_qp_common(pd, init_attr, udata, 0, &qp);
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
- /* fall through */
+ fallthrough;
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
method == IB_MGMT_METHOD_GET)
return process_pma_cmd(dev, port_num, in, out);
- /* fallthrough */
+ fallthrough;
case MLX5_IB_VENDOR_CLASS1:
- /* fallthrough */
case MLX5_IB_VENDOR_CLASS2:
case IB_MGMT_CLASS_CONG_MGMT: {
if (method != IB_MGMT_METHOD_GET &&
break;
case MLX5_EVENT_TYPE_GENERAL_EVENT:
handle_general_event(ibdev, work->param, &ibev);
- /* fall through */
+ fallthrough;
default:
goto out;
}
switch (attr->qp_type) {
case IB_QPT_XRC_INI:
size += sizeof(struct mlx5_wqe_xrc_seg);
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
max(sizeof(struct mlx5_wqe_atomic_seg) +
if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
size += sizeof(struct mlx5_wqe_eth_pad) +
sizeof(struct mlx5_wqe_eth_seg);
- /* fall through */
+ fallthrough;
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
switch ((cur_rate - 1) / req_rate) {
case 0: return MTHCA_RATE_MEMFREE_FULL;
case 1: return MTHCA_RATE_MEMFREE_HALF;
- case 2: /* fall through */
+ case 2:
case 3: return MTHCA_RATE_MEMFREE_QUARTER;
default: return MTHCA_RATE_MEMFREE_EIGHTH;
}
case IB_WR_SEND_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
- /* fall through */
+ fallthrough;
case IB_WR_SEND:
hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
ocrdma_build_send(qp, hdr, wr);
case IB_WR_RDMA_WRITE_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
- /* fall through */
+ fallthrough;
case IB_WR_RDMA_WRITE:
hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
status = ocrdma_build_write(qp, hdr, wr);
break;
case IB_WR_RDMA_READ_WITH_INV:
SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
- /* fallthrough -- same is identical to RDMA READ */
+ fallthrough; /* same is identical to RDMA READ */
case IB_WR_RDMA_READ:
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
state = IB_PORT_ARMED;
break;
case IB_6120_L_STATE_ACTIVE:
- /* fall through */
case IB_6120_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
- default: /* fall through */
+ default:
+ fallthrough;
case IB_6120_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
state = IB_PORT_ARMED;
break;
case IB_7220_L_STATE_ACTIVE:
- /* fall through */
case IB_7220_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
- default: /* fall through */
+ default:
+ fallthrough;
case IB_7220_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
state = IB_PORT_ARMED;
break;
case IB_7322_L_STATE_ACTIVE:
- /* fall through */
case IB_7322_L_STATE_ACT_DEFER:
state = IB_PORT_ACTIVE;
break;
- default: /* fall through */
+ default:
+ fallthrough;
case IB_7322_L_STATE_DOWN:
state = IB_PORT_DOWN;
break;
"Invalid num_vls %u, using 4 VLs\n",
qib_num_cfg_vls);
qib_num_cfg_vls = 4;
- /* fall through */
+ fallthrough;
case 4:
ppd->vls_supported = IB_VL_VL0_3;
break;
/* Bad mkey not a violation below level 2 */
if (ibp->rvp.mkeyprot < 2)
break;
- /* fall through */
+ fallthrough;
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->rvp.mkey_violations != 0xFFFF)
case IB_PORT_NOP:
if (lstate == 0)
break;
- /* FALLTHROUGH */
+ fallthrough;
case IB_PORT_DOWN:
if (lstate == 0)
lstate = QIB_IB_LINKDOWN_ONLY;
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
ret = cc_get_congestion_control_table(ccp, ibdev, port);
goto bail;
- /* FALLTHROUGH */
+ fallthrough;
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) ccp);
ret = cc_set_congestion_control_table(ccp, ibdev, port);
goto bail;
- /* FALLTHROUGH */
+ fallthrough;
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) ccp);
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
- /* FALLTHROUGH */
+ fallthrough;
case OP(ATOMIC_ACKNOWLEDGE):
/*
* We can increment the tail pointer now that the last
*/
if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
qp->s_tail_ack_queue = 0;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_ONLY):
case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */
case OP(RDMA_READ_RESPONSE_FIRST):
qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_READ_RESPONSE_MIDDLE):
qp->s_cur_sge = &qp->s_ack_rdma_sge;
qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
* See qib_restart_rc().
*/
qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
ss = &qp->s_sge;
* See qib_restart_rc().
*/
qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
ss = &qp->s_sge;
if (!ret)
goto rnr_nak;
qp->r_rcv_len = 0;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
case OP(RDMA_WRITE_MIDDLE):
send_middle:
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
- /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
+ fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
* bringing the link up with traffic active on
* 7220, e.g. */
ss->go_s99_running = 1;
- /* fall through -- and start dma engine */
+ fallthrough; /* and start dma engine */
case qib_sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&ppd->sdma_state);
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
goto no_immediate_data;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
- /* FALLTHROUGH */
+ fallthrough;
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
}
- /* FALLTHROUGH */
+ fallthrough;
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
case IB_QPT_GSI:
if (ib_qib_disable_sma)
break;
- /* FALLTHROUGH */
+ fallthrough;
case IB_QPT_UD:
qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
break;
IB_QPS_ERR,
NULL);
if (status) {
- usnic_err("Failed to transistion qp grp %u from %s to %s\n",
+ usnic_err("Failed to transition qp grp %u from %s to %s\n",
qp_grp->grp_id,
usnic_ib_qp_grp_state_to_string
(cur_state),
ret = -EINVAL;
goto err_qp;
}
- /* fall through */
+ fallthrough;
case IB_QPT_RC:
case IB_QPT_UD:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (init_attr->port_num == 0 ||
init_attr->port_num > ibpd->device->phys_port_cnt)
return ERR_PTR(-EINVAL);
- /* fall through */
+ fallthrough;
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_UD:
if ((syn & AETH_TYPE_MASK) != AETH_ACK)
return COMPST_ERROR;
- /* fall through */
+ fallthrough;
/* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
*/
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
- /* fall through */
+ fallthrough;
case TASK_STATE_ARMED:
spin_unlock_irqrestore(&task->state_lock, flags);
return;
switch (wr->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
wr->ex.imm_data = ibwr->ex.imm_data;
- /* fall through */
+ fallthrough;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:
- /* fall through */
case SIW_EPSTATE_LISTENING:
break;
case SIW_EPSTATE_AWAIT_MPAREQ:
- /* fall through */
case SIW_EPSTATE_AWAIT_MPAREP:
siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
break;
case RDMAP_SEND_SE:
case RDMAP_SEND_SE_INVAL:
wqe->rqe.flags |= SIW_WQE_SOLICITED;
- /* Fall through */
+ fallthrough;
case RDMAP_SEND:
case RDMAP_SEND_INVAL:
* DDP segment.
*/
qp->rx_fpdu->first_ddp_seg = 0;
- /* Fall through */
+ fallthrough;
case SIW_GET_DATA_START:
/*
case SIW_OP_SEND_REMOTE_INV:
case SIW_OP_WRITE:
siw_wqe_put_mem(wqe, tx_type);
- /* Fall through */
+ fallthrough;
case SIW_OP_INVAL_STAG:
case SIW_OP_REG_MR:
case SIW_OP_READ:
case SIW_OP_READ_LOCAL_INV:
siw_wqe_put_mem(wqe, tx_type);
- /* Fall through */
+ fallthrough;
case SIW_OP_INVAL_STAG:
case SIW_OP_REG_MR:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
ib_send_cm_drep(cm_id, NULL, 0);
- /* Fall through */
+ fallthrough;
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = ipoib_priv(p->dev);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
- /* Fall through */
+ fallthrough;
default:
return 0;
}
default:
dev_warn_ratelimited(&dev->dev,
"duplicate IP address detected\n");
- /* Fall through */
+ fallthrough;
case 1:
return net_dev;
}
case RDMA_CM_EVENT_REJECTED:
iser_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
break;
- case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
- case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, event->event);
break;
case RDMA_CM_EVENT_REJECTED:
isert_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
- /* fall through */
+ fallthrough;
default:
iscsit_release_cmd(cmd);
break;
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
- /* fall through */
+ fallthrough;
case ISTATE_SEND_REJECT:
case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
vema_get_mac_entries(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_IFACE_UCAST_MACS:
- /* fall through */
case OPA_EM_ATTR_IFACE_MCAST_MACS:
vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
break;
input_report_key(fsia6b->dev,
sw_id++,
sw_state == 0);
- /* fall-through */
+ fallthrough;
case '2':
input_report_key(fsia6b->dev,
sw_id++,
sw_state == 1);
- /* fall-through */
+ fallthrough;
case '1':
input_report_key(fsia6b->dev,
sw_id++,
case GC_MULTI:
input_set_capability(input_dev, EV_KEY, BTN_TRIGGER);
- /* fall through */
break;
case GC_PSX:
case 0x3731: /* PL-710 */
wacom->res_x = 2540;
wacom->res_y = 2540;
- /* fall through */
+ fallthrough;
case 0x3535: /* PL-550 */
case 0x3830: /* PL-800 */
wacom->extra_z_bits = 2;
bootloader = appmode - 0x24;
break;
}
- /* Fall through - for normal case */
+ fallthrough; /* for normal case */
case 0x4c:
case 0x4d:
case 0x5a:
default:
dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n",
pdata->isel);
- /* Fall through */
+ fallthrough;
case 200:
case 0:
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
- depends on X86_64 && PCI && ACPI
+ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * XT, GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling them.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ }
+
/*
* Note: Since iommu_update_intcapxt() leverages
* the IOMMU MMIO access to MSI capability block registers
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
iommu_feature_enable(iommu, CONTROL_GAM_EN);
- /* Fall through */
+ fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
if (!dev_data)
return 0;
- if (dev_data->iommu_v2)
+ /*
+ * Do not identity map IOMMUv2 capable devices when memory encryption is
+ * active, because some of those devices (AMD GPUs) don't have the
+ * encryption bit in their DMA-mask and require remapping.
+ */
+ if (!mem_encrypt_active() && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
struct amd_ir_data *data)
{
+ bool ret;
struct irq_remap_table *table;
struct amd_iommu *iommu;
unsigned long flags;
entry = (struct irte_ga *)table->table;
entry = &entry[index];
- entry->lo.fields_remap.valid = 0;
- entry->hi.val = irte->hi.val;
- entry->lo.val = irte->lo.val;
- entry->lo.fields_remap.valid = 1;
+
+ ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
+ entry->lo.val, entry->hi.val,
+ irte->lo.val, irte->hi.val);
+ /*
+ * We use cmpxchg16 to atomically update the 128-bit IRTE,
+ * and it cannot be updated by the hardware or other processors
+ * behind us, so the return value of cmpxchg16 should be the
+ * same as the old value.
+ */
+ WARN_ON(!ret);
+
if (data)
data->ref = entry;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
struct irq_cfg *cfg = ir_data->cfg;
+ u64 valid = entry->lo.fields_remap.valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || !entry->lo.fields_vapic.guest_mode)
entry->lo.val = 0;
entry->hi.val = 0;
+ entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->irq_dest_mode;
entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
entry->hi.fields.vector = cfg->vector;
might_sleep();
+ /*
+ * When memory encryption is active the device is likely not in a
+ * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
+ */
+ if (mem_encrypt_active())
+ return -ENODEV;
+
if (!amd_iommu_v2_supported())
return -ENODEV;
break;
case CMDQ_OP_CFGI_CD:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
- /* Fallthrough */
+ fallthrough;
case CMDQ_OP_CFGI_STE:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
break;
case CMDQ_OP_TLBI_NH_ASID:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
- /* Fallthrough */
+ fallthrough;
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
*/
return;
case CMDQ_ERR_CERROR_ILL_IDX:
- /* Fallthrough */
default:
break;
}
switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
- /* Fallthrough */
+ fallthrough;
case IDR0_STALL_MODEL_STALL:
smmu->features |= ARM_SMMU_FEAT_STALLS;
}
switch (FIELD_GET(IDR0_TTF, reg)) {
case IDR0_TTF_AARCH32_64:
smmu->ias = 40;
- /* Fallthrough */
+ fallthrough;
case IDR0_TTF_AARCH64:
break;
default:
default:
dev_info(smmu->dev,
"unknown output address size. Truncating to 48-bit\n");
- /* Fallthrough */
+ fallthrough;
case IDR5_OAS_48_BIT:
smmu->oas = 48;
}
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
- gfp);
+ page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
+ gfp, NULL);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
return (level - 1) * LEVEL_STRIDE;
}
-static inline int pfn_level_offset(unsigned long pfn, int level)
+static inline int pfn_level_offset(u64 pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline unsigned long level_mask(int level)
+static inline u64 level_mask(int level)
{
- return -1UL << level_to_offset_bits(level);
+ return -1ULL << level_to_offset_bits(level);
}
-static inline unsigned long level_size(int level)
+static inline u64 level_size(int level)
{
- return 1UL << level_to_offset_bits(level);
+ return 1ULL << level_to_offset_bits(level);
}
-static inline unsigned long align_to_level(unsigned long pfn, int level)
+static inline u64 align_to_level(u64 pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
struct device_domain_info *get_domain_info(struct device *dev)
{
return NULL;
info = dev_iommu_priv_get(dev);
- if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
- info == DEFER_DEVICE_DOMAIN_INFO))
+ if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
return info;
return &context[devfn];
}
-static int iommu_dummy(struct device *dev)
-{
- return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
static bool attach_deferred(struct device *dev)
{
return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
return false;
}
+static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+{
+ struct dmar_drhd_unit *drhd;
+ u32 vtbar;
+ int rc;
+
+ /* We know that this device on this chipset has its own IOMMU.
+ * If we find it under a different IOMMU, then the BIOS is lying
+ * to us. Hope that the IOMMU for this device is actually
+ * disabled, and it needs no translation...
+ */
+ rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
+ if (rc) {
+ /* "can't" happen */
+ dev_info(&pdev->dev, "failed to run vt-d quirk\n");
+ return false;
+ }
+ vtbar &= 0xffff0000;
+
+ /* we know that the this iommu should be at offset 0xa000 from vtbar */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+ pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return true;
+ }
+
+ return false;
+}
+
+static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
+{
+ if (!iommu || iommu->drhd->ignored)
+ return true;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
+ quirk_ioat_snb_local_iommu(pdev))
+ return true;
+ }
+
+ return false;
+}
+
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
u16 segment = 0;
int i;
- if (!dev || iommu_dummy(dev))
+ if (!dev)
return NULL;
if (dev_is_pci(dev)) {
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
if (pdev && segment != drhd->segment)
continue;
}
iommu = NULL;
out:
+ if (iommu_is_dummy(iommu, dev))
+ iommu = NULL;
+
rcu_read_unlock();
return iommu;
{
struct device_domain_info *info;
- if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
+ if (unlikely(attach_deferred(dev)))
return NULL;
/* No lock here, assumes no domain exit in normal case */
iova_cache_put();
}
-static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
-{
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
-
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
-
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
- pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
- add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx) {
+ if (!dmar_map_gfx)
drhd->ignored = 1;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
}
}
switch (type) {
case IOMMU_DOMAIN_DMA:
- /* fallthrough */
case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(0);
if (!dmar_domain) {
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /* Block compatibility-format MSIs */
+ if (sts & DMA_GSTS_CFIS) {
+ iommu->gcmd &= ~DMA_GCMD_CFI;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, !(sts & DMA_GSTS_CFIS), sts);
+ }
+
/*
* With CFI clear in the Global Command register, we should be
* protected from dangerous (i.e. compatibility) interrupts
default:
dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
mem->subtype);
- /* Fall-through */
+ fallthrough;
case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
region = iommu_alloc_resv_region(start, size, 0,
IOMMU_RESV_RESERVED);
for Goldfish based virtual platforms.
config QCOM_PDC
- tristate "QCOM PDC"
+ bool "QCOM PDC"
depends on ARCH_QCOM
select IRQ_DOMAIN_HIERARCHY
help
switch (gpsz) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case GIC_PAGE_SIZE_4K:
psz = SZ_4K;
break;
switch (gpsz) {
default:
gpsz = GIC_PAGE_SIZE_4K;
- /* fall through */
+ fallthrough;
case GIC_PAGE_SIZE_4K:
psz = SZ_4K;
break;
case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
- /* Fall through */
+ fallthrough;
case 6:
write_gicreg(0, ICC_AP0R1_EL1);
- /* Fall through */
+ fallthrough;
case 5:
case 4:
write_gicreg(0, ICC_AP0R0_EL1);
case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
- /* Fall through */
+ fallthrough;
case 6:
write_gicreg(0, ICC_AP1R1_EL1);
- /* Fall through */
+ fallthrough;
case 5:
case 4:
write_gicreg(0, ICC_AP1R0_EL1);
case 4:
writel_relaxed(~0, reg + GPC_IMR1_CORE2);
writel_relaxed(~0, reg + GPC_IMR1_CORE3);
- /* fall through */
+ fallthrough;
case 2:
writel_relaxed(~0, reg + GPC_IMR1_CORE0);
writel_relaxed(~0, reg + GPC_IMR1_CORE1);
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- if (request_irq(parent_irq, intc_cascade, 0,
+ if (request_irq(parent_irq, intc_cascade, IRQF_NO_SUSPEND,
"SoC intc cascade interrupt", NULL))
pr_err("Failed to register SoC intc cascade interrupt\n");
return 0;
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
- /* fall-through */
+ fallthrough;
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
return ret;
}
-IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_cirq)
-IRQCHIP_MATCH("mediatek,mtk-cirq", mtk_cirq_of_init)
-IRQCHIP_PLATFORM_DRIVER_END(mtk_cirq)
+IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
kfree(chip_data);
return ret;
}
-IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_sysirq)
-IRQCHIP_MATCH("mediatek,mt6577-sysirq", mtk_sysirq_of_init)
-IRQCHIP_PLATFORM_DRIVER_END(mtk_sysirq)
+IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
irq_gc_unlock(gc);
}
+/* directly set the target bit without reading first. */
+static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(val, base + reg);
+}
+
static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
raw_spin_lock(&chip_data->rlock);
- stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
+ stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
if (stm32_bank->fpr_ofst != UNDEF_REG)
- stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
+ stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
raw_spin_unlock(&chip_data->rlock);
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
* @vint_mutex: Mutex to protect vint_list
* @base: Base address of the memory mapped IO registers
* @pdev: Pointer to platform device.
+ * @ti_sci_id: TI-SCI device identifier
*/
struct ti_sci_inta_irq_domain {
const struct ti_sci_handle *sci;
struct mutex vint_mutex;
void __iomem *base;
struct platform_device *pdev;
+ u32 ti_sci_id;
};
#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
}
/**
+ * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq.
+ * @inta: IRQ domain corresponding to Interrupt Aggregator
+ * @irq: Hardware irq corresponding to the above irq domain
+ *
+ * Return parent irq number if translation is available else -ENOENT.
+ */
+static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
+ u16 vint_id)
+{
+ struct device_node *np = dev_of_node(&inta->pdev->dev);
+ u32 base, parent_base, size;
+ const __be32 *range;
+ int len;
+
+ range = of_get_property(np, "ti,interrupt-ranges", &len);
+ if (!range)
+ return vint_id;
+
+ for (len /= sizeof(*range); len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ parent_base = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= vint_id && vint_id < base + size)
+ return vint_id - base + parent_base;
+ }
+
+ return -ENOENT;
+}
+
+/**
* ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator
* @domain: IRQ domain corresponding to Interrupt Aggregator
*
struct ti_sci_inta_irq_domain *inta = domain->host_data;
struct ti_sci_inta_vint_desc *vint_desc;
struct irq_fwspec parent_fwspec;
+ struct device_node *parent_node;
unsigned int parent_virq;
- u16 vint_id;
+ u16 vint_id, p_hwirq;
+ int ret;
vint_id = ti_sci_get_free_resource(inta->vint);
if (vint_id == TI_SCI_RESOURCE_NULL)
return ERR_PTR(-EINVAL);
+ p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
+ if (p_hwirq < 0) {
+ ret = p_hwirq;
+ goto free_vint;
+ }
+
vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
- if (!vint_desc)
- return ERR_PTR(-ENOMEM);
+ if (!vint_desc) {
+ ret = -ENOMEM;
+ goto free_vint;
+ }
vint_desc->domain = domain;
vint_desc->vint_id = vint_id;
INIT_LIST_HEAD(&vint_desc->list);
- parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev)));
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = inta->pdev->id;
- parent_fwspec.param[1] = vint_desc->vint_id;
+ parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
+ parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
+
+ if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
+ /* Parent is GIC */
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = p_hwirq - 32;
+ parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+ } else {
+ /* Parent is Interrupt Router */
+ parent_fwspec.param_count = 1;
+ parent_fwspec.param[0] = p_hwirq;
+ }
parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
if (parent_virq == 0) {
- kfree(vint_desc);
- return ERR_PTR(-EINVAL);
+ dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n");
+ ret = -EINVAL;
+ goto free_vint_desc;
+
}
vint_desc->parent_virq = parent_virq;
ti_sci_inta_irq_handler, vint_desc);
return vint_desc;
+free_vint_desc:
+ kfree(vint_desc);
+free_vint:
+ ti_sci_release_resource(inta->vint, vint_id);
+ return ERR_PTR(ret);
}
/**
err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
dev_id, dev_index,
- inta->pdev->id,
+ inta->ti_sci_id,
vint_desc->vint_id,
event_desc->global_event,
free_bit);
inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
HWIRQ_TO_DEVID(hwirq),
HWIRQ_TO_IRQID(hwirq),
- inta->pdev->id,
+ inta->ti_sci_id,
vint_desc->vint_id,
event_desc->global_event,
event_desc->vint_bit);
return ret;
}
- ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id);
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id);
if (ret) {
dev_err(dev, "missing 'ti,sci-dev-id' property\n");
return -EINVAL;
}
- inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
- "ti,sci-rm-range-vint");
+ inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_IA_VINT);
if (IS_ERR(inta->vint)) {
dev_err(dev, "VINT resource allocation failed\n");
return PTR_ERR(inta->vint);
}
- inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
- "ti,sci-rm-range-global-event");
+ inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT);
if (IS_ERR(inta->global_event)) {
dev_err(dev, "Global event resource allocation failed\n");
return PTR_ERR(inta->global_event);
INIT_LIST_HEAD(&inta->vint_list);
mutex_init(&inta->vint_mutex);
+ dev_info(dev, "Interrupt Aggregator domain %d created\n", pdev->id);
+
return 0;
}
#include <linux/of_irq.h>
#include <linux/soc/ti/ti_sci_protocol.h>
-#define TI_SCI_DEV_ID_MASK 0xffff
-#define TI_SCI_DEV_ID_SHIFT 16
-#define TI_SCI_IRQ_ID_MASK 0xffff
-#define TI_SCI_IRQ_ID_SHIFT 0
-#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
- (TI_SCI_DEV_ID_MASK))
-#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK))
-#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \
- TI_SCI_DEV_ID_SHIFT) | \
- ((index) & TI_SCI_IRQ_ID_MASK))
-
/**
* struct ti_sci_intr_irq_domain - Structure representing a TISCI based
* Interrupt Router IRQ domain.
* @sci: Pointer to TISCI handle
- * @dst_irq: TISCI resource pointer representing GIC irq controller.
- * @dst_id: TISCI device ID of the GIC irq controller.
+ * @out_irqs: TISCI resource pointer representing INTR irqs.
+ * @dev: Struct device pointer.
+ * @ti_sci_id: TI-SCI device identifier
* @type: Specifies the trigger type supported by this Interrupt Router
*/
struct ti_sci_intr_irq_domain {
const struct ti_sci_handle *sci;
- struct ti_sci_resource *dst_irq;
- u32 dst_id;
+ struct ti_sci_resource *out_irqs;
+ struct device *dev;
+ u32 ti_sci_id;
u32 type;
};
{
struct ti_sci_intr_irq_domain *intr = domain->host_data;
- if (fwspec->param_count != 2)
+ if (fwspec->param_count != 1)
return -EINVAL;
- *hwirq = TO_HWIRQ(fwspec->param[0], fwspec->param[1]);
+ *hwirq = fwspec->param[0];
*type = intr->type;
return 0;
}
/**
+ * ti_sci_intr_xlate_irq() - Translate hwirq to parent's hwirq.
+ * @intr: IRQ domain corresponding to Interrupt Router
+ * @irq: Hardware irq corresponding to the above irq domain
+ *
+ * Return parent irq number if translation is available else -ENOENT.
+ */
+static int ti_sci_intr_xlate_irq(struct ti_sci_intr_irq_domain *intr, u32 irq)
+{
+ struct device_node *np = dev_of_node(intr->dev);
+ u32 base, pbase, size, len;
+ const __be32 *range;
+
+ range = of_get_property(np, "ti,interrupt-ranges", &len);
+ if (!range)
+ return irq;
+
+ for (len /= sizeof(*range); len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ pbase = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= irq && irq < base + size)
+ return irq - base + pbase;
+ }
+
+ return -ENOENT;
+}
+
+/**
* ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain.
* @domain: Domain to which the irqs belong
* @virq: Linux virtual IRQ to be freed.
unsigned int virq, unsigned int nr_irqs)
{
struct ti_sci_intr_irq_domain *intr = domain->host_data;
- struct irq_data *data, *parent_data;
- u16 dev_id, irq_index;
+ struct irq_data *data;
+ int out_irq;
- parent_data = irq_domain_get_irq_data(domain->parent, virq);
data = irq_domain_get_irq_data(domain, virq);
- irq_index = HWIRQ_TO_IRQID(data->hwirq);
- dev_id = HWIRQ_TO_DEVID(data->hwirq);
+ out_irq = (uintptr_t)data->chip_data;
- intr->sci->ops.rm_irq_ops.free_irq(intr->sci, dev_id, irq_index,
- intr->dst_id, parent_data->hwirq);
- ti_sci_release_resource(intr->dst_irq, parent_data->hwirq);
+ intr->sci->ops.rm_irq_ops.free_irq(intr->sci,
+ intr->ti_sci_id, data->hwirq,
+ intr->ti_sci_id, out_irq);
+ ti_sci_release_resource(intr->out_irqs, out_irq);
irq_domain_free_irqs_parent(domain, virq, 1);
irq_domain_reset_irq_data(data);
}
/**
- * ti_sci_intr_alloc_gic_irq() - Allocate GIC specific IRQ
+ * ti_sci_intr_alloc_parent_irq() - Allocate parent IRQ
* @domain: Pointer to the interrupt router IRQ domain
* @virq: Corresponding Linux virtual IRQ number
* @hwirq: Corresponding hwirq for the IRQ within this IRQ domain
*
- * Returns 0 if all went well else appropriate error pointer.
+ * Returns parent irq if all went well else appropriate error pointer.
*/
-static int ti_sci_intr_alloc_gic_irq(struct irq_domain *domain,
- unsigned int virq, u32 hwirq)
+static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
+ unsigned int virq, u32 hwirq)
{
struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ struct device_node *parent_node;
struct irq_fwspec fwspec;
- u16 dev_id, irq_index;
- u16 dst_irq;
- int err;
-
- dev_id = HWIRQ_TO_DEVID(hwirq);
- irq_index = HWIRQ_TO_IRQID(hwirq);
+ u16 out_irq, p_hwirq;
+ int err = 0;
- dst_irq = ti_sci_get_free_resource(intr->dst_irq);
- if (dst_irq == TI_SCI_RESOURCE_NULL)
+ out_irq = ti_sci_get_free_resource(intr->out_irqs);
+ if (out_irq == TI_SCI_RESOURCE_NULL)
return -EINVAL;
- fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 3;
- fwspec.param[0] = 0; /* SPI */
- fwspec.param[1] = dst_irq - 32; /* SPI offset */
- fwspec.param[2] = intr->type;
+ p_hwirq = ti_sci_intr_xlate_irq(intr, out_irq);
+ if (p_hwirq < 0)
+ goto err_irqs;
+
+ parent_node = of_irq_find_parent(dev_of_node(intr->dev));
+ fwspec.fwnode = of_node_to_fwnode(parent_node);
+
+ if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
+ /* Parent is GIC */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = p_hwirq - 32; /* SPI offset */
+ fwspec.param[2] = intr->type;
+ } else {
+ /* Parent is Interrupt Router */
+ fwspec.param_count = 1;
+ fwspec.param[0] = p_hwirq;
+ }
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (err)
goto err_irqs;
- err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, dev_id, irq_index,
- intr->dst_id, dst_irq);
+ err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci,
+ intr->ti_sci_id, hwirq,
+ intr->ti_sci_id, out_irq);
if (err)
goto err_msg;
- return 0;
+ return p_hwirq;
err_msg:
irq_domain_free_irqs_parent(domain, virq, 1);
err_irqs:
- ti_sci_release_resource(intr->dst_irq, dst_irq);
+ ti_sci_release_resource(intr->out_irqs, out_irq);
return err;
}
struct irq_fwspec *fwspec = data;
unsigned long hwirq;
unsigned int flags;
- int err;
+ int err, p_hwirq;
err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags);
if (err)
return err;
- err = ti_sci_intr_alloc_gic_irq(domain, virq, hwirq);
- if (err)
- return err;
+ p_hwirq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq);
+ if (p_hwirq < 0)
+ return p_hwirq;
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &ti_sci_intr_irq_chip, NULL);
+ &ti_sci_intr_irq_chip,
+ (void *)(uintptr_t)p_hwirq);
return 0;
}
if (!intr)
return -ENOMEM;
+ intr->dev = dev;
ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
&intr->type);
if (ret) {
return ret;
}
- ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dst-id",
- &intr->dst_id);
+ ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dev-id",
+ &intr->ti_sci_id);
if (ret) {
- dev_err(dev, "missing 'ti,sci-dst-id' property\n");
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
return -EINVAL;
}
- intr->dst_irq = devm_ti_sci_get_of_resource(intr->sci, dev,
- intr->dst_id,
- "ti,sci-rm-range-girq");
- if (IS_ERR(intr->dst_irq)) {
+ intr->out_irqs = devm_ti_sci_get_resource(intr->sci, dev,
+ intr->ti_sci_id,
+ TI_SCI_RESASG_SUBTYPE_IR_OUTPUT);
+ if (IS_ERR(intr->out_irqs)) {
dev_err(dev, "Destination irq resource allocation failed\n");
- return PTR_ERR(intr->dst_irq);
+ return PTR_ERR(intr->out_irqs);
}
domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
return -ENOMEM;
}
+ dev_info(dev, "Interrupt Router %d domain created\n", intr->ti_sci_id);
+
return 0;
}
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
- /* fall through */
+ fallthrough;
case AMBA_VENDOR_ARM:
break;
}
* interrupt controller. The actual initialization callback of this
* interrupt controller can check for specific domains as necessary.
*/
- if (par_np && !irq_find_matching_host(np, DOMAIN_BUS_ANY))
+ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
return -EPROBE_DEFER;
return irq_init_cb(np, par_np);
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
return ret;
}
-IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
-IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
-IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
-MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
-MODULE_LICENSE("GPL v2");
+IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
switch (protocol) {
case -1: /* used for init */
bch->state = -1;
- /* fall through */
+ fallthrough;
case ISDN_P_NONE:
if (bch->state == ISDN_P_NONE)
break;
case HFC_IO_MODE_EMBSD:
test_and_set_bit(HFC_CHIP_EMBSD, &hc->chip);
hc->slots = 128; /* required */
- /* fall through */
hc->HFC_outb = HFC_outb_embsd;
hc->HFC_inb = HFC_inb_embsd;
hc->HFC_inw = HFC_inw_embsd;
case (-1): /* used for init */
bch->state = -1;
bch->nr = bc;
- /* fall through */
+ fallthrough;
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0;
switch (protocol) {
case (-1): /* used for init */
bch->state = -1;
- /* fall through */
+ fallthrough;
case (ISDN_P_NONE):
if (bch->state == ISDN_P_NONE)
return 0; /* already in idle state */
dsize--;
break;
}
- /* fall through */
+ fallthrough;
case HDLC_SENDFLAG_ONE:
if (hdlc->bit_shift == 8) {
hdlc->cbin = hdlc->ffvalue >>
release_card(card->sc[i]);
card->sc[i] = NULL;
}
- /* fall through */
+ fallthrough;
default:
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
break;
case PCTRL_CMD_FTM:
p1 = 2;
- /* fall through */
+ fallthrough;
case PCTRL_CMD_FTH:
send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
PCTRL_CMD_SILON, 1, &p1);
send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
PMOD_DTMF, 1, param);
}
- /* fall through */
+ fallthrough;
case ISDN_P_B_MODEM_ASYNC:
ctrl = PMOD_DATAMODEM;
if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
case ISDN_P_B_MODEM_ASYNC:
case ISDN_P_B_T30_FAX:
cmsb |= IOM_CTRL_RCV;
- /* fall through */
+ fallthrough;
case ISDN_P_B_L2DTMF:
if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
cmsb |= IOM_CTRL_RCV;
ich->is->name, hh->id);
ret = -EINVAL;
}
- /* fall through */
+ fallthrough;
default:
pr_info("%s: %s unknown prim(%x,%x)\n",
ich->is->name, __func__, hh->prim, hh->id);
rq.protocol = ISDN_P_NT_S0;
if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
rq.protocol = ISDN_P_NT_E1;
- /* fall through */
+ fallthrough;
case ISDN_P_LAPD_TE:
ch->recv = mISDN_queue_message;
ch->peer = &dev->D.st->own;
switch (type) {
case PBLK_WRITE:
kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
- /* fall through */
+ fallthrough;
case PBLK_WRITE_INT:
pool = &pblk->w_rq_pool;
break;
}
if (hid->name[0])
break;
- /* else fall through */
+ fallthrough;
default:
pr_info("Trying to register unknown ADB device to input layer.\n");
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
- /* fall through */
+ fallthrough;
case SMU_I2C_TRANSFER_STDSUB:
if (cmd->info.sublen > 3)
return -EINVAL;
ca->sb.njournal_buckets;
atomic_set(&ja->discard_in_flight, DISCARD_READY);
- /* fallthrough */
+ fallthrough;
case DISCARD_READY:
if (ja->discard_idx == ja->last_idx)
case 'y': \
case 'z': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'e': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'p': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 't': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'g': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'm': \
u++; \
- /* fall through */ \
+ fallthrough; \
case 'k': \
u++; \
if (e++ == cp) \
return -EINVAL; \
- /* fall through */ \
+ fallthrough; \
case '\n': \
case '\0': \
if (*e == '\n') \
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(cmd->bm);
+ r = PTR_ERR(cmd->bm);
+ cmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(cmd, may_format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(cmd->bm);
+ cmd->bm = NULL;
+ }
return r;
}
u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
struct skcipher_request *req;
struct scatterlist src, dst;
- struct crypto_wait wait;
+ DECLARE_CRYPTO_WAIT(wait);
int err;
req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
u8 *es, *ks, *data, *data2, *data_offset;
struct skcipher_request *req;
struct scatterlist *sg, *sg2, src, dst;
- struct crypto_wait wait;
+ DECLARE_CRYPTO_WAIT(wait);
int i, r;
req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
case -EBUSY:
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
- /* fall through */
+ fallthrough;
/*
* The request is queued and processed asynchronously,
* completion function kcryptd_async_done() will be called.
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
if (ic->mode == 'B') {
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
}
goto err;
}
+ if (ic->mode == 'B') {
+ sector_t start, end;
+ start = (range.logical_sector >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ end = ((range.logical_sector + range.n_sectors) >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
+ }
+
advance_and_next:
cond_resched();
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ unsigned long flags;
+
+ if (!atomic_read(&m->pg_init_in_progress))
+ goto skip;
+
+ spin_lock_irqsave(&m->lock, flags);
+ if (atomic_read(&m->pg_init_in_progress) &&
+ !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
+ spin_unlock_irqrestore(&m->lock, flags);
- if (atomic_read(&m->pg_init_in_progress))
flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
+ multipath_wait_for_pg_init_completion(m);
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ spin_lock_irqsave(&m->lock, flags);
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ }
+ spin_unlock_irqrestore(&m->lock, flags);
}
-
+skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
flush_work(&m->process_queued_bios);
flush_work(&m->trigger_event);
case SCSI_DH_RETRY:
/* Wait before retrying. */
delay_retry = true;
- /* fall through */
+ fallthrough;
case SCSI_DH_IMM_RETRY:
case SCSI_DH_RES_TEMP_UNAVAIL:
if (pg_init_limit_reached(m, pgpath))
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(pmd->bm);
+ r = PTR_ERR(pmd->bm);
+ pmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(pmd, format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(pmd->bm);
+ pmd->bm = NULL;
+ }
return r;
}
}
pmd_write_lock_in_core(pmd);
- if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
+ if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
r = __commit_transaction(pmd);
if (r < 0)
DMWARN("%s: __commit_transaction() failed, error = %d",
pfn_t pfn;
int id;
struct page **pages;
+ sector_t offset;
wc->memory_vmapped = false;
goto err1;
}
+ offset = get_start_sect(wc->ssd_dev->bdev);
+ if (offset & (PAGE_SIZE / 512 - 1)) {
+ r = -EINVAL;
+ goto err1;
+ }
+ offset >>= PAGE_SHIFT - 9;
+
id = dax_read_lock();
- da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
+ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
if (da < 0) {
wc->memory_map = NULL;
r = da;
i = 0;
do {
long daa;
- daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
+ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
NULL, &pfn);
if (daa <= 0) {
r = daa ? daa : -EINVAL;
switch (r) {
case DM_ENDIO_REQUEUE:
error = BLK_STS_DM_REQUEUE;
- /*FALLTHRU*/
+ fallthrough;
case DM_ENDIO_DONE:
break;
case DM_ENDIO_INCOMPLETE:
pername = "raid0";
break;
}
- /* FALL THROUGH */
+ fallthrough;
case 1: /* the first device is numeric */
str = str1;
- /* FALL THROUGH */
+ fallthrough;
case 0:
md_setup_args[ent].level = LEVEL_NONE;
pername="super-block";
case 0:
md_bitmap_file_set_bit(bitmap, offset);
md_bitmap_count_page(&bitmap->counts, offset, 1);
- /* fall through */
+ fallthrough;
case 1:
*bmc = 2;
}
void *p;
int r;
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
struct buffer_aux *aux;
void *p;
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
int dm_bm_flush(struct dm_block_manager *bm)
{
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
return dm_bufio_write_dirty_buffers(bm->bufio);
bool dm_bm_is_read_only(struct dm_block_manager *bm)
{
- return bm->read_only;
+ return (bm ? bm->read_only : true);
}
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
- bm->read_only = true;
+ if (bm)
+ bm->read_only = true;
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
void dm_bm_set_read_write(struct dm_block_manager *bm)
{
- bm->read_only = false;
+ if (bm)
+ bm->read_only = false;
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
break;
}
dev = &sh->dev[s->failed_num[0]];
- /* fall through */
+ fallthrough;
case check_state_compute_result:
sh->check_state = check_state_idle;
if (!dev)
/* we have 2-disk failure */
BUG_ON(s->failed != 2);
- /* fall through */
+ fallthrough;
case check_state_compute_result:
sh->check_state = check_state_idle;
/*
* The value should not be bigger than PAGE_SIZE. It requires to
- * be multiple of DEFAULT_STRIPE_SIZE.
+ * be multiple of DEFAULT_STRIPE_SIZE and the value should be power
+ * of two.
*/
- if (new % DEFAULT_STRIPE_SIZE != 0 || new > PAGE_SIZE || new == 0)
+ if (new % DEFAULT_STRIPE_SIZE != 0 ||
+ new > PAGE_SIZE || new == 0 ||
+ new != roundup_pow_of_two(new))
return -EINVAL;
err = mddev_lock(mddev);
tpg->vdownsampling[1] = 1;
tpg->hdownsampling[1] = 1;
tpg->planes = 2;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGB332:
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:
tpg->buffers = 3;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
tpg->vdownsampling[1] = 2;
case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YVU422M:
tpg->buffers = 3;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV422P:
tpg->vdownsampling[1] = 1;
tpg->vdownsampling[2] = 1;
case V4L2_PIX_FMT_NV16M:
case V4L2_PIX_FMT_NV61M:
tpg->buffers = 2;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
tpg->vdownsampling[1] = 1;
case V4L2_PIX_FMT_NV12M:
case V4L2_PIX_FMT_NV21M:
tpg->buffers = 2;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
tpg->vdownsampling[1] = 2;
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_ARGB444:
buf[0][offset] = (g_u_s << 4) | b_v;
break;
case V4L2_PIX_FMT_RGBX444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGBA444:
buf[0][offset] = (b_v << 4) | (alpha >> 4);
buf[0][offset + 1] = (r_y_h << 4) | g_u_s;
break;
case V4L2_PIX_FMT_XBGR444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ABGR444:
buf[0][offset] = (g_u_s << 4) | r_y_h;
buf[0][offset + 1] = (alpha & 0xf0) | b_v;
break;
case V4L2_PIX_FMT_BGRX444:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_BGRA444:
buf[0][offset] = (r_y_h << 4) | (alpha >> 4);
buf[0][offset + 1] = (b_v << 4) | g_u_s;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_ARGB555:
buf[0][offset] = (g_u_s << 5) | b_v;
break;
case V4L2_PIX_FMT_RGBX555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGBA555:
buf[0][offset] = (g_u_s << 6) | (b_v << 1) |
((alpha & 0x80) >> 7);
break;
case V4L2_PIX_FMT_XBGR555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ABGR555:
buf[0][offset] = (g_u_s << 5) | r_y_h;
buf[0][offset + 1] = (alpha & 0x80) | (b_v << 2)
break;
case V4L2_PIX_FMT_BGRX555:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_BGRA555:
buf[0][offset] = (g_u_s << 6) | (r_y_h << 1) |
((alpha & 0x80) >> 7);
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ARGB555X:
buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3);
buf[0][offset + 1] = (g_u_s << 5) | b_v;
case V4L2_PIX_FMT_HSV32:
case V4L2_PIX_FMT_XYUV32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_AYUV32:
break;
case V4L2_PIX_FMT_RGBX32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGBA32:
buf[0][offset] = r_y_h;
buf[0][offset + 1] = g_u_s;
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_VUYX32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_VUYA32:
buf[0][offset] = b_v;
break;
case V4L2_PIX_FMT_BGRX32:
alpha = 0;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_BGRA32:
buf[0][offset] = alpha;
buf[0][offset + 1] = b_v;
h->priv->ule_sndu_type_1 = 1;
h->ts_remain -= 1;
h->from_where += 1;
- /* fallthrough */
+ fallthrough;
case 0:
h->new_ts = 1;
h->ts += TS_SZ;
deb_info("attempting to download firmware\n");
if ((ret = bcm3510_init_cold(st)) < 0)
return ret;
- /* fall-through */
+ fallthrough;
case JDEC_EEPROM_LOAD_WAIT:
deb_info("firmware is loaded\n");
bcm3510_check_firmware_version(st);
if (state->identity.p1g)
state->dc = dc_p1g_table;
- /* fall through */
+ fallthrough;
case CT_TUNER_STEP_0:
dprintk("Start/continue DC calibration for %s path\n",
(state->dc->i == 1) ? "I" : "Q");
switch (c->hierarchy) {
case HIERARCHY_NONE:
deb_setf("hierarchy: none\n");
- /* fall through */
+ fallthrough;
case HIERARCHY_1:
deb_setf("hierarchy: alpha=1\n");
wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1);
if (state->version != SOC7090)
reg_1280 &= ~((1 << 11));
reg_1280 &= ~(1 << 6);
- /* fall-through */
+ fallthrough;
case DIB7000P_POWER_INTERFACE_ONLY:
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */
/* TODO power up either SDIO or I2C */
pr_err("error %d\n", rc);
goto rw_error;
}
- /* fallthrough */
+ fallthrough;
case SIO_HI_RA_RAM_CMD_BRDCTRL:
rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_2__A, cmd->param2, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
}
- /* fallthrough */
+ fallthrough;
case SIO_HI_RA_RAM_CMD_NULL:
/* No parameters */
break;
/* coef = 188/204 */
max_bit_rate =
(ext_attr->curr_symbol_rate / 8) * nr_bits * 188;
- /* fall-through - as b/c Annex A/C need following settings */
+ fallthrough; /* as b/c Annex A/C need following settings */
case DRX_STANDARD_ITU_B:
rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0);
if (rc != 0) {
if (!ext_attr->has_smatx)
return -EIO;
switch (uio_cfg->mode) {
- case DRX_UIO_MODE_FIRMWARE_SMA: /* fall through */
- case DRX_UIO_MODE_FIRMWARE_SAW: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE_SMA:
+ case DRX_UIO_MODE_FIRMWARE_SAW:
case DRX_UIO_MODE_READWRITE:
ext_attr->uio_sma_tx_mode = uio_cfg->mode;
break;
if (!ext_attr->has_smarx)
return -EIO;
switch (uio_cfg->mode) {
- case DRX_UIO_MODE_FIRMWARE0: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE0:
case DRX_UIO_MODE_READWRITE:
ext_attr->uio_sma_rx_mode = uio_cfg->mode;
break;
if (!ext_attr->has_gpio)
return -EIO;
switch (uio_cfg->mode) {
- case DRX_UIO_MODE_FIRMWARE0: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE0:
case DRX_UIO_MODE_READWRITE:
ext_attr->uio_gpio_mode = uio_cfg->mode;
break;
}
ext_attr->uio_irqn_mode = uio_cfg->mode;
break;
- case DRX_UIO_MODE_FIRMWARE0: /* fall through */
+ case DRX_UIO_MODE_FIRMWARE0:
default:
return -EINVAL;
break;
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 4:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_3__A, *(cmd->parameter + 3), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 3:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_2__A, *(cmd->parameter + 2), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 2:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_1__A, *(cmd->parameter + 1), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 1:
rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_0__A, *(cmd->parameter + 0), 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 0:
/* do nothing */
break;
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 3:
rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_2__A, cmd->result + 2, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 2:
rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_1__A, cmd->result + 1, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 1:
rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_0__A, cmd->result + 0, 0);
if (rc != 0) {
pr_err("error %d\n", rc);
goto rw_error;
- } /* fallthrough */
+ }
+ fallthrough;
case 0:
/* do nothing */
break;
Sound carrier is already 3Mhz above centre frequency due
to tuner setting so now add an extra shift of 1MHz... */
fm_frequency_shift = 1000;
- /*fall through */
+ fallthrough;
case DRX_STANDARD_ITU_B:
case DRX_STANDARD_NTSC:
case DRX_STANDARD_PAL_SECAM_BG:
(standard == DRX_STANDARD_NTSC)) {
switch (channel->bandwidth) {
case DRX_BANDWIDTH_6MHZ:
- case DRX_BANDWIDTH_UNKNOWN: /* fall through */
+ case DRX_BANDWIDTH_UNKNOWN:
channel->bandwidth = DRX_BANDWIDTH_6MHZ;
break;
- case DRX_BANDWIDTH_8MHZ: /* fall through */
- case DRX_BANDWIDTH_7MHZ: /* fall through */
+ case DRX_BANDWIDTH_8MHZ:
+ case DRX_BANDWIDTH_7MHZ:
default:
return -EINVAL;
}
}
switch (channel->constellation) {
- case DRX_CONSTELLATION_QAM16: /* fall through */
- case DRX_CONSTELLATION_QAM32: /* fall through */
- case DRX_CONSTELLATION_QAM64: /* fall through */
- case DRX_CONSTELLATION_QAM128: /* fall through */
+ case DRX_CONSTELLATION_QAM16:
+ case DRX_CONSTELLATION_QAM32:
+ case DRX_CONSTELLATION_QAM64:
+ case DRX_CONSTELLATION_QAM128:
case DRX_CONSTELLATION_QAM256:
bandwidth_temp = channel->symbolrate * bw_rolloff_factor;
bandwidth = bandwidth_temp / 100;
}
break;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
rc = set_qam_channel(demod, channel, tuner_freq_offset);
if (rc != 0) {
SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK;
break;
#endif
- case DRX_STANDARD_UNKNOWN: /* fallthrough */
+ case DRX_STANDARD_UNKNOWN:
default:
return -EIO;
}
*/
switch (prev_standard) {
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
rc = power_down_qam(demod, false);
if (rc != 0) {
case DRX_STANDARD_UNKNOWN:
/* Do nothing */
break;
- case DRX_STANDARD_AUTO: /* fallthrough */
+ case DRX_STANDARD_AUTO:
default:
return -EINVAL;
}
switch (*standard) {
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
do {
u16 dummy;
goto rw_error;
}
break;
- case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_L: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */
- case DRX_STANDARD_NTSC: /* fallthrough */
+ case DRX_STANDARD_PAL_SECAM_BG:
+ case DRX_STANDARD_PAL_SECAM_DK:
+ case DRX_STANDARD_PAL_SECAM_I:
+ case DRX_STANDARD_PAL_SECAM_L:
+ case DRX_STANDARD_PAL_SECAM_LP:
+ case DRX_STANDARD_NTSC:
case DRX_STANDARD_FM:
rc = power_down_atv(demod, ext_attr->standard, true);
if (rc != 0) {
case DRX_STANDARD_UNKNOWN:
/* Do nothing */
break;
- case DRX_STANDARD_AUTO: /* fallthrough */
+ case DRX_STANDARD_AUTO:
default:
return -EIO;
}
ext_attr->vsb_pre_saw_cfg = *pre_saw;
break;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
ext_attr->qam_pre_saw_cfg = *pre_saw;
break;
ext_attr = (struct drxj_data *) demod->my_ext_attr;
switch (afe_gain->standard) {
- case DRX_STANDARD_8VSB: /* fallthrough */
+ case DRX_STANDARD_8VSB: fallthrough;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
#endif
/* Do nothing */
ext_attr->vsb_pga_cfg = gain * 13 + 140;
break;
#ifndef DRXJ_VSB_ONLY
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_B: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_B:
case DRX_STANDARD_ITU_C:
ext_attr->qam_pga_cfg = gain * 13 + 140;
break;
switch (deviceId) {
case 4:
state->diversity = 1;
- /* fall through */
+ fallthrough;
case 3:
case 7:
state->PGA = 1;
break;
case 6:
state->diversity = 1;
- /* fall through */
+ fallthrough;
case 5:
case 8:
break;
switch (p->transmission_mode) {
default: /* Not set, detect it automatically */
operationMode |= SC_RA_RAM_OP_AUTO_MODE__M;
- /* fall through - try first guess DRX_FFTMODE_8K */
+ fallthrough; /* try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K;
if (state->type_A) {
switch (p->modulation) {
default:
operationMode |= SC_RA_RAM_OP_AUTO_CONST__M;
- /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
+ fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64;
if (state->type_A) {
break;
default:
operationMode |= SC_RA_RAM_OP_AUTO_RATE__M;
- /* fall through */
+ fallthrough;
case FEC_2_3:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_2_3;
if (state->type_A)
switch (p->bandwidth_hz) {
case 0:
p->bandwidth_hz = 8000000;
- /* fall through */
+ fallthrough;
case 8000000:
/* (64/7)*(8/8)*1000000 */
bandwidth = DRXD_BANDWIDTH_8MHZ_IN_HZ;
goto error;
state->m_operation_mode = OM_NONE;
break;
- case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_A:
case OM_QAM_ITU_C:
status = mpegts_stop(state);
if (status < 0)
if (status < 0)
goto error;
break;
- case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_A:
case OM_QAM_ITU_C:
dprintk(1, ": DVB-C Annex %c\n",
(state->m_operation_mode == OM_QAM_ITU_A) ? 'A' : 'C');
fec_oc_rcn_ctl_rate = 0xC00000;
static_clk = state->m_dvbt_static_clk;
break;
- case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_A:
case OM_QAM_ITU_C:
fec_oc_tmd_mode = 0x0004;
fec_oc_rcn_ctl_rate = 0xD2B4EE; /* good for >63 Mb/s */
case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
- /* fall through - All commands using 1 parameters */
+ fallthrough; /* All commands using 1 parameters */
case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
case OFDM_SC_RA_RAM_CMD_USER_IO:
status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
- /* fall through - All commands using 0 parameters */
+ fallthrough; /* All commands using 0 parameters */
case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
case OFDM_SC_RA_RAM_CMD_NULL:
/* Write command */
case TRANSMISSION_MODE_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
- /* fall through - try first guess DRX_FFTMODE_8K */
+ fallthrough; /* try first guess DRX_FFTMODE_8K */
case TRANSMISSION_MODE_8K:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
break;
default:
case GUARD_INTERVAL_AUTO:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
- /* fall through - try first guess DRX_GUARD_1DIV4 */
+ fallthrough; /* try first guess DRX_GUARD_1DIV4 */
case GUARD_INTERVAL_1_4:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
break;
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
/* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
/* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
- /* fall through */
+ fallthrough;
case HIERARCHY_1:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
break;
case QAM_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
- /* fall through - try first guess DRX_CONSTELLATION_QAM64 */
+ fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */
case QAM_64:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
break;
WR16(dev_addr, OFDM_EC_SB_PRIOR__A,
OFDM_EC_SB_PRIOR_HI));
break;
- case DRX_PRIORITY_UNKNOWN: /* fall through */
+ case DRX_PRIORITY_UNKNOWN:
default:
status = -EINVAL;
goto error;
case FEC_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
- /* fall through - try first guess DRX_CODERATE_2DIV3 */
+ fallthrough; /* try first guess DRX_CODERATE_2DIV3 */
case FEC_2_3:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
break;
switch (state->props.bandwidth_hz) {
case 0:
state->props.bandwidth_hz = 8000000;
- /* fall through */
+ fallthrough;
case 8000000:
bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A,
default:
pr_warn("IF=%d KHz is not supported, 3250 assumed\n",
if_freq_khz);
- /* fallthrough */
+ fallthrough;
case 3250: /* 3.25Mhz */
nco1 = 0x34;
nco2 = 0x00;
if (op->hierarchy == HIERARCHY_AUTO ||
op->hierarchy == HIERARCHY_NONE)
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
default:
break;
}
- /* Fall through */
+ fallthrough;
case SYS_DVBS:
switch ((enum MXL_HYDRA_MODULATION_E)
reg_data[DMD_MODULATION_SCHEME_ADDR]) {
switch (reg&0xff) {
case 0x06:
if (reg & 0x1000) usK = 3 << 24;
- /* fall through */
+ fallthrough;
case 0x43: /* QAM64 */
c = 150204167;
break;
default:
dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
__func__, KHz);
- /* fall through */
+ fallthrough;
case 5380:
case 44000:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
break;
default:
c->bandwidth_hz = 8000000;
- /* fall through */
+ fallthrough;
case 8000000:
zl10353_single_write(fe, MCLK_RATIO, 0x75);
zl10353_single_write(fe, 0x64, 0x36);
if (c->hierarchy == HIERARCHY_AUTO ||
c->hierarchy == HIERARCHY_NONE)
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
config VIDEO_MAX9286
tristate "Maxim MAX9286 GMSL deserializer support"
depends on I2C && I2C_MUX
- depends on OF
+ depends on OF_GPIO
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2
- depends on V4L2_FWNODE
+ select V4L2_FWNODE
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
- /* fall-through */
+ fallthrough;
case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
- /* fall-through */
+ fallthrough;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
dvb_unregister_frontend(dvb->fe2);
if (dvb->fe)
dvb_unregister_frontend(dvb->fe);
- /* fallthrough */
+ fallthrough;
case 0x30:
dvb_module_release(dvb->i2c_client[0]);
dvb->i2c_client[0] = NULL;
dvb_frontend_detach(dvb->fe);
dvb->fe = NULL;
dvb->fe2 = NULL;
- /* fallthrough */
+ fallthrough;
case 0x20:
dvb_net_release(&dvb->dvbnet);
- /* fallthrough */
+ fallthrough;
case 0x12:
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&dvb->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&dvb->mem_frontend);
- /* fallthrough */
+ fallthrough;
case 0x11:
dvb_dmxdev_release(&dvb->dmxdev);
- /* fallthrough */
+ fallthrough;
case 0x10:
dvb_dmx_release(&dvb->demux);
- /* fallthrough */
+ fallthrough;
case 0x01:
break;
}
osc24 = 0;
else
osc24 = 1;
- /* fall-through */
+ fallthrough;
case DDB_TUNER_DVBCT2_SONY_P:
case DDB_TUNER_DVBC2T2_SONY_P:
case DDB_TUNER_ISDBT_SONY_P:
break;
case DDB_TUNER_DVBC2T2I_SONY:
osc24 = 1;
- /* fall-through */
+ fallthrough;
case DDB_TUNER_DVBCT2_SONY:
case DDB_TUNER_DVBC2T2_SONY:
case DDB_TUNER_ISDBT_SONY:
ret = ddb_ci_attach(port, ci_bitrate);
if (ret < 0)
break;
- /* fall-through */
+ fallthrough;
case DDB_PORT_LOOP:
ret = dvb_register_device(port->dvb[0].adap,
&port->dvb[0].dev,
ddb_input_init(port, 4 + i, 1, 4 + i);
ddb_output_init(port, i);
break;
- } /* fallthrough */
+ }
+ fallthrough;
case DDB_OCTOPUS:
ddb_input_init(port, 2 * i, 0, 2 * i);
ddb_input_init(port, 2 * i + 1, 1, 2 * i + 1);
default:
case 2:
destroy_workqueue(ddb_wq);
- /* fall-through */
+ fallthrough;
case 1:
ddb_class_destroy();
break;
mutex_unlock(&meye.lock);
return -EINTR;
}
- /* fall through */
+ fallthrough;
case MEYE_BUF_DONE:
meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
break;
}
- /* fall through */
+ fallthrough;
case DATA_TS_RECORD:
case DATA_PES_RECORD:
break;
}
}
- /* fall-thru */
+ fallthrough;
case 0x0008: // Hauppauge/TT DVB-T
// Grundig 29504-401
break;
case OSD_SetRow:
dc->y1 = dc->y0;
- /* fall through */
+ fallthrough;
case OSD_SetBlock:
ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data);
break;
case DSM_CC_STREAM :
case ISO13522_STREAM:
p->done = 1;
- /* fall through */
+ fallthrough;
case PRIVATE_STREAM1:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
* but so far it has been only confirmed for this type
*/
budget_av->reinitialise_demod = 1;
- /* fall through */
+ fallthrough;
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBS_EASYWATCH_1:
if (saa->pci->subsystem_vendor == 0x1894) {
break;
}
}
- /* fall through */
+ fallthrough;
case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262)
{
struct dvb_frontend *fe;
break;
}
}
- /* fall through */
+ fallthrough;
case 0x101c: { /* TT S2-1600 */
const struct stv6110x_devctl *ctl;
break;
case V4L2_PIX_FMT_RGB565:
dataswap ^= 1;
- /* fall through */
+ fallthrough;
case V4L2_PIX_FMT_RGB565X:
row_coeff = 2;
break;
default:
pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n",
__func__, bus_fmt);
- /* fall through */
+ fallthrough;
case SH_VOU_BUS_8BIT:
return 1;
case SH_VOU_BUS_16BIT:
u32 val = cal_read(cal, offset);
val &= ~mask;
- val |= FIELD_PREP(mask, value);
+ val |= (value << __ffs(mask)) & mask;
cal_write(cal, offset, val);
}
si476x_phase_diversity_mode_to_idx(enum si476x_phase_diversity_mode mode)
{
switch (mode) {
- default: /* FALLTHROUGH */
+ default:
+ fallthrough;
case SI476X_PHDIV_DISABLED:
return SI476X_IDX_PHDIV_DISABLED;
case SI476X_PHDIV_PRIMARY_COMBINING:
index = BAND_AM;
break;
}
- /* Fall through */
+ fallthrough;
default:
return -EINVAL;
}
case BPF_FUNC_trace_printk:
if (perfmon_capable())
return bpf_get_trace_printk_proto();
- /* fall through */
+ fallthrough;
default:
return NULL;
}
struct gpio_desc *gpio;
unsigned int carrier;
unsigned int duty_cycle;
- /* we need a spinlock to hold the cpu while transmitting */
- spinlock_t lock;
};
static const struct of_device_id gpio_ir_tx_of_match[] = {
static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
- unsigned long flags;
ktime_t edge;
s32 delta;
int i;
- spin_lock_irqsave(&gpio_ir->lock, flags);
+ local_irq_disable();
edge = ktime_get();
}
gpiod_set_value(gpio_ir->gpio, 0);
-
- spin_unlock_irqrestore(&gpio_ir->lock, flags);
}
static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
- unsigned long flags;
ktime_t edge;
/*
* delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
space = DIV_ROUND_CLOSEST((100 - gpio_ir->duty_cycle) *
(NSEC_PER_SEC / 100), gpio_ir->carrier);
- spin_lock_irqsave(&gpio_ir->lock, flags);
+ local_irq_disable();
edge = ktime_get();
edge = last;
}
}
-
- spin_unlock_irqrestore(&gpio_ir->lock, flags);
}
static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
unsigned int count)
{
struct gpio_ir *gpio_ir = dev->priv;
+ unsigned long flags;
+ local_irq_save(flags);
if (gpio_ir->carrier)
gpio_ir_tx_modulated(gpio_ir, txbuf, count);
else
gpio_ir_tx_unmodulated(gpio_ir, txbuf, count);
+ local_irq_restore(flags);
return count;
}
gpio_ir->carrier = 38000;
gpio_ir->duty_cycle = 50;
- spin_lock_init(&gpio_ir->lock);
rc = devm_rc_register_device(&pdev->dev, rcdev);
if (rc < 0)
case 6:
if (!data->toggle)
return RC6_MODE_6A;
- /* fall through */
+ fallthrough;
default:
return RC6_MODE_UNKNOWN;
}
}
data->state = STATE_FINISHED;
- /* Fall through */
+ fallthrough;
case STATE_FINISHED:
if (ev.pulse)
goto mem_alloc_fail;
ir->pipe_in = pipe;
- ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_ATOMIC, &ir->dma_in);
+ ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_KERNEL, &ir->dma_in);
if (!ir->buf_in)
goto buf_in_alloc_fail;
}
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
old_protocols = *current_protocols;
new_protocols = old_protocols;
return -EINVAL;
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
new_filter = *filter;
if (fattr->mask)
int i;
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
allowed = dev->allowed_wakeup_protocols;
kfree(dev);
}
-#define ADD_HOTPLUG_VAR(fmt, val...) \
- do { \
- int err = add_uevent_var(env, fmt, val); \
- if (err) \
- return err; \
- } while (0)
-
static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
+ int ret = 0;
- if (dev->rc_map.name)
- ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
- if (dev->driver_name)
- ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
- if (dev->device_name)
- ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
+ mutex_lock(&dev->lock);
- return 0;
+ if (!dev->registered)
+ ret = -ENODEV;
+ if (ret == 0 && dev->rc_map.name)
+ ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name);
+ if (ret == 0 && dev->driver_name)
+ ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name);
+ if (ret == 0 && dev->device_name)
+ ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name);
+
+ mutex_unlock(&dev->lock);
+
+ return ret;
}
/*
del_timer_sync(&dev->timer_keyup);
del_timer_sync(&dev->timer_repeat);
- rc_free_rx_device(dev);
-
mutex_lock(&dev->lock);
if (dev->users && dev->close)
dev->close(dev);
dev->registered = false;
mutex_unlock(&dev->lock);
+ rc_free_rx_device(dev);
+
/*
* lirc device should be freed with dev->registered = false, so
* that userspace polling will get notified.
}
ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl,
vicodec_ctrl_stateless_state.id);
+ v4l2_ctrl_request_hdl_put(hdl);
if (!ctrl) {
v4l2_info(&ctx->dev->v4l2_dev,
"Missing required codec control\n");
if (!bw)
bw = 6000000;
/* fall to OFDM handling */
- /* fall through */
+ fallthrough;
case SYS_DMBTH:
case SYS_DVBT:
case SYS_DVBT2:
case USB_SPEED_HIGH:
info("running at HIGH speed.");
break;
- case USB_SPEED_UNKNOWN: /* fall through */
+ case USB_SPEED_UNKNOWN:
default:
err("cannot handle USB speed because it is unknown.");
return -ENODEV;
break;
case CPIA2_CMD_SET_VP_BRIGHTNESS:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_BRIGHTNESS:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_CONTRAST:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_CONTRAST:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VP_SATURATION:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_SATURATION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VP_GPIO_DATA:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_GPIO_DATA:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VP_GPIO_DIRECTION:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_GPIO_DIRECTION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VC_MP_GPIO_DATA:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VC_MP_GPIO_DATA:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION:
cmd.buffer.block_data[0] = param;
- /*fall through */
+ fallthrough;
case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_FLICKER_MODES:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_FLICKER_MODES:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_USER_MODE:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_USER_MODE:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_WAKEUP:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_WAKEUP:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_PW_CONTROL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_PW_CONTROL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_SYSTEM_CTRL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_SYSTEM_CTRL:
cmd.req_mode =
CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
break;
case CPIA2_CMD_SET_VP_SYSTEM_CTRL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_SYSTEM_CTRL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VP_EXP_MODES:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VP_EXP_MODES:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_DEVICE_CONFIG:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_DEVICE_CONFIG:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
break;
case CPIA2_CMD_SET_VC_CONTROL:
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_VC_CONTROL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
this register can also affect
flicker modes */
cmd.buffer.block_data[0] = param;
- /* fall through */
+ fallthrough;
case CPIA2_CMD_GET_USER_EFFECTS:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
CPIA2_VP_SENSOR_FLAGS_500) {
return -EINVAL;
}
- /* Fall through */
+ fallthrough;
case CPIA2_VP_FRAMERATE_15:
case CPIA2_VP_FRAMERATE_12_5:
case CPIA2_VP_FRAMERATE_7_5:
/* The DVB core will handle it */
if (dev->tuner_type == TUNER_ABSENT)
continue;
- /* fall through */
+ fallthrough;
default: /* just to shut up a gcc warning */
ent->function = MEDIA_ENT_F_CONN_RF;
break;
switch (band) {
default:
- deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency);
- /* fall through */
+ deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency);
+ fallthrough;
case BAND_VHF:
- state->dib8000_ops.set_gpio(fe, 3, 0, 1);
- break;
+ state->dib8000_ops.set_gpio(fe, 3, 0, 1);
+ break;
case BAND_UHF:
- state->dib8000_ops.set_gpio(fe, 3, 0, 0);
- break;
+ state->dib8000_ops.set_gpio(fe, 3, 0, 0);
+ break;
}
ret = state->set_param_save(fe);
switch (le16_to_cpu(dev->descriptor.idProduct)) {
case USB_PID_TEVII_S650:
dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
- /* fall through */
+ fallthrough;
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
DW210X_WRITE_MSG);
- /* fall through */
+ fallthrough;
case USB_PID_DW3101:
reset = 0;
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
break;
}
}
- /* fall through */
+ fallthrough;
case 0x2101:
dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2,
DW210X_READ_MSG);
case V4L2_CTRL_TYPE_BOOLEAN:
if (step != 1 || max > 1 || min < 0)
return -ERANGE;
- /* fall through */
+ fallthrough;
case V4L2_CTRL_TYPE_U8:
case V4L2_CTRL_TYPE_U16:
case V4L2_CTRL_TYPE_U32:
p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
- /* fall through */
default:
pr_cont("\n");
break;
p->stepwise.step.denominator);
break;
case V4L2_FRMIVAL_TYPE_CONTINUOUS:
- /* fall through */
default:
pr_cont("\n");
break;
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT_TIME32: {
struct v4l2_event *ev = parg;
- struct v4l2_event_time32 ev32 = {
- .type = ev->type,
- .pending = ev->pending,
- .sequence = ev->sequence,
- .timestamp.tv_sec = ev->timestamp.tv_sec,
- .timestamp.tv_nsec = ev->timestamp.tv_nsec,
- .id = ev->id,
- };
+ struct v4l2_event_time32 ev32;
+
+ memset(&ev32, 0, sizeof(ev32));
+
+ ev32.type = ev->type;
+ ev32.pending = ev->pending;
+ ev32.sequence = ev->sequence;
+ ev32.timestamp.tv_sec = ev->timestamp.tv_sec;
+ ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec;
+ ev32.id = ev->id;
memcpy(&ev32.u, &ev->u, sizeof(ev->u));
memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
case VIDIOC_DQBUF_TIME32:
case VIDIOC_PREPARE_BUF_TIME32: {
struct v4l2_buffer *vb = parg;
- struct v4l2_buffer_time32 vb32 = {
- .index = vb->index,
- .type = vb->type,
- .bytesused = vb->bytesused,
- .flags = vb->flags,
- .field = vb->field,
- .timestamp.tv_sec = vb->timestamp.tv_sec,
- .timestamp.tv_usec = vb->timestamp.tv_usec,
- .timecode = vb->timecode,
- .sequence = vb->sequence,
- .memory = vb->memory,
- .m.userptr = vb->m.userptr,
- .length = vb->length,
- .request_fd = vb->request_fd,
- };
+ struct v4l2_buffer_time32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+
+ vb32.index = vb->index;
+ vb32.type = vb->type;
+ vb32.bytesused = vb->bytesused;
+ vb32.flags = vb->flags;
+ vb32.field = vb->field;
+ vb32.timestamp.tv_sec = vb->timestamp.tv_sec;
+ vb32.timestamp.tv_usec = vb->timestamp.tv_usec;
+ vb32.timecode = vb->timecode;
+ vb32.sequence = vb->sequence;
+ vb32.memory = vb->memory;
+ vb32.m.userptr = vb->m.userptr;
+ vb32.length = vb->length;
+ vb32.request_fd = vb->request_fd;
if (copy_to_user(arg, &vb32, sizeof(vb32)))
return -EFAULT;
break;
case VIDEOBUF_ERROR:
b->flags |= V4L2_BUF_FLAG_ERROR;
- /* fall through */
+ fallthrough;
case VIDEOBUF_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
tick_ps *= div;
break;
case GPMC_CD_FCLK:
- /* FALL-THROUGH */
default:
break;
}
serial mode), then just fall through */
if (msb_read_int_reg(msb, -1))
return 0;
- /* fallthrough */
+ fallthrough;
case MSB_RP_RECEIVE_INT_REQ_RESULT:
intreg = mrq->data[0];
case MSB_RP_RECEIVE_STATUS_REG:
msb->regs.status = *(struct ms_status_register *)mrq->data;
msb->state = MSB_RP_SEND_OOB_READ;
- /* fallthrough */
+ fallthrough;
case MSB_RP_SEND_OOB_READ:
if (!msb_read_regs(msb,
msb->regs.extra_data =
*(struct ms_extra_data_register *) mrq->data;
msb->state = MSB_RP_SEND_READ_DATA;
- /* fallthrough */
+ fallthrough;
case MSB_RP_SEND_READ_DATA:
/* Skip that state if we only read the oob */
msb->state = MSB_WB_RECEIVE_INT_REQ;
if (msb_read_int_reg(msb, -1))
return 0;
- /* fallthrough */
+ fallthrough;
case MSB_WB_RECEIVE_INT_REQ:
intreg = mrq->data[0];
msb->int_polling = false;
msb->state = MSB_WB_SEND_WRITE_DATA;
- /* fallthrough */
+ fallthrough;
case MSB_WB_SEND_WRITE_DATA:
sg_init_table(sg, ARRAY_SIZE(sg));
msb->state = MSB_SC_RECEIVE_INT_REQ;
if (msb_read_int_reg(msb, -1))
return 0;
- /* fallthrough */
+ fallthrough;
case MSB_SC_RECEIVE_INT_REQ:
intreg = mrq->data[0];
case 3:
host->io_word[0] |= buf[off + 2] << 16;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 2:
host->io_word[0] |= buf[off + 1] << 8;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 1:
host->io_word[0] |= buf[off];
host->io_pos++;
case 3:
host->io_word |= buf[off + 2] << 16;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 2:
host->io_word |= buf[off + 1] << 8;
host->io_pos++;
- /* fall through */
+ fallthrough;
case 1:
host->io_word |= buf[off];
host->io_pos++;
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
- /* fall through */
+ fallthrough;
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
- /* fall through */
+ fallthrough;
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
- /* fall through */
+ fallthrough;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
}
}
mpt_findImVolumes(ioc);
- /* fall through */
+ fallthrough;
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
/*
* Allow non-SAS & non-NEXUS_LOSS to drop into below code
*/
- /* Fall through */
+ fallthrough;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
/* Linux handles an unsolicited DID_RESET better
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
scsi_set_resid(sc, 0);
- /* Fall through */
+ fallthrough;
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
switch (divsel) {
case PRCM_DSI_PLLOUT_SEL_PHI_4:
div *= 2;
- /* Fall through */
+ fallthrough;
case PRCM_DSI_PLLOUT_SEL_PHI_2:
div *= 2;
- /* Fall through */
+ fallthrough;
case PRCM_DSI_PLLOUT_SEL_PHI:
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW) / div;
if (val & IQS620_PROX_SETTINGS_4_SAR_EN)
iqs62x->ui_sel = IQS62X_UI_SAR1;
- /* fall through */
+ fallthrough;
case IQS621_PROD_NUM:
ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
case IQS62X_EVENT_UI_LO:
event_data.ui_data = get_unaligned_le16(&event_map[i]);
- /* fall through */
+ fallthrough;
case IQS62X_EVENT_UI_HI:
case IQS62X_EVENT_NONE:
case IQS62X_EVENT_HYST:
event_map[i] <<= iqs62x->dev_desc->hyst_shift;
- /* fall through */
+ fallthrough;
case IQS62X_EVENT_WHEEL:
case IQS62X_EVENT_HALL:
const __be32 *reg;
u64 of_node_addr;
- /* Skip devices 'disabled' by Device Tree */
- if (!of_device_is_available(np))
- return -ENODEV;
-
/* Skip if OF node has previously been allocated to a device */
list_for_each_entry(of_entry, &mfd_of_node_list, list)
if (of_entry->np == np)
if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
+ /* Ignore 'disabled' devices error free */
+ if (!of_device_is_available(np)) {
+ ret = 0;
+ goto fail_alias;
+ }
+
ret = mfd_match_of_node_to_dev(pdev, np, cell);
if (ret == -EAGAIN)
continue;
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
- kfree(cell);
-
platform_device_unregister(pdev);
return 0;
}
MXS_LRADC_TOUCHSCREEN_5WIRE;
break;
}
- /* fall through - to an error message for i.MX23 */
+ fallthrough; /* to an error message for i.MX23 */
default:
dev_err(&pdev->dev,
"Unsupported number of touchscreen wires (%d)\n"
i, r);
}
}
- /* Fall through - as HSIC mode needs utmi_clk */
+ fallthrough; /* as HSIC mode needs utmi_clk */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i])) {
if (!IS_ERR(omap->hsic480m_clk[i]))
clk_disable_unprepare(omap->hsic480m_clk[i]);
- /* Fall through - as utmi_clks were used in HSIC mode */
+ fallthrough; /* as utmi_clks were used in HSIC mode */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i]))
case RAVE_SP_ETX:
case RAVE_SP_DLE:
*dest++ = RAVE_SP_DLE;
- /* FALLTHROUGH */
+ fallthrough;
default:
*dest++ = byte;
}
* deframer buffer
*/
- /* FALLTHROUGH */
+ fallthrough;
case RAVE_SP_EXPECT_ESCAPED_DATA:
if (deframer->length == sizeof(deframer->data)) {
break;
default:
pr_err("Failed to retrieve valid hwlock: %d\n", ret);
- /* fall-through */
+ fallthrough;
case -EPROBE_DEFER:
goto err_regmap;
}
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
- /* fall through */
+ fallthrough;
case 2:
*cp++ = offset >> 8;
- /* fall through */
+ fallthrough;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
- /* fall through */
+ fallthrough;
case 2:
*cp++ = offset >> 8;
- /* fall through */
+ fallthrough;
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
switch (val) {
case 9:
chip->flags |= EE_INSTR_BIT3_IS_ADDR;
- /* fall through */
+ fallthrough;
case 8:
chip->flags |= EE_ADDR1;
break;
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/genalloc.h>
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
struct hl_device *hdev = hpriv->hdev;
struct hl_cb *cb;
phys_addr_t address;
- u32 handle;
+ u32 handle, user_cb_size;
int rc;
handle = vma->vm_pgoff;
}
/* Validation check */
- if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
+ user_cb_size = vma->vm_end - vma->vm_start;
+ if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
dev_err(hdev->dev,
"CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
vma->vm_end - vma->vm_start, cb->size);
goto put_cb;
}
+ if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
+ user_cb_size)) {
+ dev_err(hdev->dev,
+ "user pointer is invalid - 0x%lx\n",
+ vma->vm_start);
+
+ rc = -EINVAL;
+ goto put_cb;
+ }
+
spin_lock(&cb->lock);
if (cb->mmap) {
/* currently it is guaranteed to have only one chunk */
chunk = &cs_chunk_array[0];
+
+ if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ chunk->queue_index);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
q_idx = chunk->queue_index;
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
q_type = hw_queue_prop->type;
static struct dentry *hl_debug_root;
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
- u8 i2c_reg, u32 *val)
+ u8 i2c_reg, long *val)
{
struct armcp_packet pkt;
int rc;
pkt.i2c_reg = i2c_reg;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, (long *) val);
+ 0, val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[32];
- u32 val;
+ long val;
ssize_t rc;
if (*ppos)
return rc;
}
- sprintf(tmp_buf, "0x%02x\n", val);
+ sprintf(tmp_buf, "0x%02lx\n", val);
rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
snprintf(workq_name, 32, "hl-free-jobs-%u", i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
- if (hdev->cq_wq == NULL) {
+ if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
goto out_err;
}
- hl_set_max_power(hdev, hdev->max_power);
+ hl_set_max_power(hdev);
} else {
rc = hdev->asic_funcs->soft_reset_late_init(hdev);
if (rc) {
goto out_disabled;
}
+ /* Need to call this again because the max power might change,
+ * depending on card type for certain ASICs
+ */
+ hl_set_max_power(hdev);
+
/*
* hl_hwmon_init() must be called after device_late_init(), because only
* there we get the information from the device about which
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/slab.h>
+#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
/**
* hl_fw_load_fw_to_device() - Load F/W code to device's memory.
*
dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
+ if (fw_size > FW_FILE_MAX_SIZE) {
+ dev_err(hdev->dev,
+ "FW file size %zu exceeds maximum of %u bytes\n",
+ fw_size, FW_FILE_MAX_SIZE);
+ rc = -EINVAL;
+ goto out;
+ }
+
fw_data = (const u64 *) fw->data;
memcpy_toio(dst, fw_data, fw_size);
* details.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
+ * @card_type: Various ASICs have several card types. This indicates the card
+ * type of the current device.
* @cs_active_cnt: number of active command submissions on this device (active
* means already in H/W queues)
* @major: habanalabs kernel driver major.
u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
+ enum armcp_card_types card_type;
int cs_active_cnt;
u32 major;
u32 high_pll;
*
* Return: true if the area is inside the valid range, false otherwise.
*/
-static inline bool hl_mem_area_inside_range(u64 address, u32 size,
+static inline bool hl_mem_area_inside_range(u64 address, u64 size,
u64 range_start_address, u64 range_end_address)
{
u64 end_address = address + size;
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
u64 hl_get_max_power(struct hl_device *hdev);
-void hl_set_max_power(struct hl_device *hdev, u64 value);
+void hl_set_max_power(struct hl_device *hdev);
int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
total_size = num_pgs << page_shift;
+ if (!total_size) {
+ dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
+ return -EINVAL;
+ }
+
contiguous = args->flags & HL_MEM_CONTIGUOUS;
if (contiguous) {
phys_pg_pack->contiguous = contiguous;
phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
- if (!phys_pg_pack->pages) {
+ if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
rc = -ENOMEM;
goto pages_arr_err;
}
phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
GFP_KERNEL);
- if (!phys_pg_pack->pages) {
+ if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
}
hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
prop->mmu_hop_table_size,
GFP_KERNEL | __GFP_ZERO);
- if (!hdev->mmu_shadow_hop0) {
+ if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) {
rc = -ENOMEM;
goto err_pool_add;
}
}
/* Point to the specified address */
- rc = hl_pci_iatu_write(hdev, offset + 0x14,
+ rc |= hl_pci_iatu_write(hdev, offset + 0x14,
lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, offset + 0x18,
upper_32_bits(pci_region->addr));
rc = hdev->asic_funcs->init_iatu(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to initialize iATU\n");
- goto disable_device;
+ goto unmap_pci_bars;
}
rc = hl_pci_set_dma_mask(hdev);
if (rc)
- goto disable_device;
+ goto unmap_pci_bars;
return 0;
+unmap_pci_bars:
+ hl_pci_bars_unmap(hdev);
disable_device:
pci_clear_master(pdev);
pci_disable_device(pdev);
return result;
}
-void hl_set_max_power(struct hl_device *hdev, u64 value)
+void hl_set_max_power(struct hl_device *hdev)
{
struct armcp_packet pkt;
int rc;
pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.value = cpu_to_le64(value);
+ pkt.value = cpu_to_le64(hdev->max_power);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL);
}
hdev->max_power = value;
- hl_set_max_power(hdev, value);
+ hl_set_max_power(hdev);
out:
return count;
hdev->pm_mng_profile = PM_AUTO;
else
hdev->pm_mng_profile = PM_MANUAL;
+
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
+static inline bool validate_packet_id(enum packet_id id)
+{
+ switch (id) {
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_CP_DMA:
+ case PACKET_REPEAT:
+ case PACKET_MSG_PROT:
+ case PACKET_FENCE:
+ case PACKET_LIN_DMA:
+ case PACKET_NOP:
+ case PACKET_STOP:
+ case PACKET_ARB_POINT:
+ case PACKET_WAIT:
+ case PACKET_LOAD_AND_EXE:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const char * const
gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
"tpc_address_exceed_slm",
prop->num_of_events = GAUDI_EVENT_SIZE;
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
- prop->max_power_default = MAX_POWER_DEFAULT;
+ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 qman_offset;
+ bool enable;
int i;
/* In case we are during debug session, don't enable the clock gate
return;
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
- if (!(hdev->clock_gating_mask &
- (BIT_ULL(gaudi_dma_assignment[i]))))
- continue;
+ enable = !!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i])));
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
- WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
+ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
- QMAN_UPPER_CP_CGM_PWR_GATE_EN);
+ enable ? QMAN_UPPER_CP_CGM_PWR_GATE_EN : 0);
}
for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
- if (!(hdev->clock_gating_mask &
- (BIT_ULL(gaudi_dma_assignment[i]))))
- continue;
+ enable = !!(hdev->clock_gating_mask &
+ (BIT_ULL(gaudi_dma_assignment[i])));
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
- WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset,
+ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
}
- if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
- WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
- }
+ enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0)));
+ WREG32(mmMME0_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
+ WREG32(mmMME0_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
- if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
- WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
- WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
- }
+ enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2)));
+ WREG32(mmMME2_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0);
+ WREG32(mmMME2_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
- if (!(hdev->clock_gating_mask &
- (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
- continue;
+ enable = !!(hdev->clock_gating_mask &
+ (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)));
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
- QMAN_CGM1_PWR_GATE_EN);
+ enable ? QMAN_CGM1_PWR_GATE_EN : 0);
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
- QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0);
qman_offset += TPC_QMAN_OFFSET;
}
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = gaudi_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = gaudi_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
*memory_wrapper_idx = 0xFF;
/* Iterate through memory wrappers, a single bit must be set */
- for (i = 0 ; i > num_mem_regs ; i++) {
+ for (i = 0 ; i < num_mem_regs ; i++) {
err_addr += i * 4;
err_word = RREG32(err_addr);
if (err_word) {
strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+ hdev->card_type = le32_to_cpu(hdev->asic_prop.armcp_info.card_type);
+
+ if (hdev->card_type == armcp_card_type_pci)
+ prop->max_power_default = MAX_POWER_DEFAULT_PCI;
+ else if (hdev->card_type == armcp_card_type_pmc)
+ prop->max_power_default = MAX_POWER_DEFAULT_PMC;
+
+ hdev->max_power = prop->max_power_default;
+
return 0;
}
#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
-#define MAX_POWER_DEFAULT 200000 /* 200W */
+#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */
+#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */
#define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */
}
static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
- u32 size, bool *is_host)
+ u64 size, bool *is_host)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi_device *gaudi = hdev->asic_specific;
return false;
}
+ if (addr > (addr + size)) {
+ dev_err(hdev->dev,
+ "ETR buffer size %llu overflow\n", size);
+ return false;
+ }
+
/* PMMU and HPMMU addresses are equal, check only one of them */
if ((gaudi->hw_cap_initialized & HW_CAP_MMU) &&
hl_mem_area_inside_range(addr, size,
[PACKET_STOP] = sizeof(struct packet_stop)
};
+static inline bool validate_packet_id(enum packet_id id)
+{
+ switch (id) {
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_CP_DMA:
+ case PACKET_MSG_PROT:
+ case PACKET_FENCE:
+ case PACKET_LIN_DMA:
+ case PACKET_NOP:
+ case PACKET_STOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = goya_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
+ if (!validate_packet_id(pkt_id)) {
+ dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
pkt_size = goya_packet_sizes[pkt_id];
cb_parsed_length += pkt_size;
if (cb_parsed_length > parser->user_cb_size) {
}
static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
- u32 size)
+ u64 size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 range_start, range_end;
+ if (addr > (addr + size)) {
+ dev_err(hdev->dev,
+ "ETR buffer size %llu overflow\n", size);
+ return false;
+ }
+
if (hdev->mmu_enable) {
range_start = prop->dmmu.start_addr;
range_end = prop->dmmu.end_addr;
struct hdcp_port_data *data,
struct hdcp2_rep_stream_ready *stream_ready)
{
- struct wired_cmd_repeater_auth_stream_req_in
- verify_mprime_in = { { 0 } };
+ struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out
verify_mprime_out = { { 0 } };
struct mei_cl_device *cldev;
ssize_t byte;
+ size_t cmd_size;
if (!dev || !stream_ready || !data)
return -EINVAL;
cldev = to_mei_cl_device(dev);
- verify_mprime_in.header.api_version = HDCP_API_VERSION;
- verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
- verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
- verify_mprime_in.header.buffer_len =
+ cmd_size = struct_size(verify_mprime_in, streams, data->k);
+ if (cmd_size == SIZE_MAX)
+ return -EINVAL;
+
+ verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
+ if (!verify_mprime_in)
+ return -ENOMEM;
+
+ verify_mprime_in->header.api_version = HDCP_API_VERSION;
+ verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
+ verify_mprime_in->header.buffer_len =
WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
- verify_mprime_in.port.integrated_port_type = data->port_type;
- verify_mprime_in.port.physical_port = (u8)data->fw_ddi;
- verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc;
+ verify_mprime_in->port.integrated_port_type = data->port_type;
+ verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
+ verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc;
+
+ memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
+ drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
- memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
- HDCP_2_2_MPRIME_LEN);
- drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m);
- memcpy(verify_mprime_in.streams, data->streams,
+ memcpy(verify_mprime_in->streams, data->streams,
array_size(data->k, sizeof(*data->streams)));
- verify_mprime_in.k = cpu_to_be16(data->k);
+ verify_mprime_in->k = cpu_to_be16(data->k);
- byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in,
- sizeof(verify_mprime_in));
+ byte = mei_cldev_send(cldev, (u8 *)verify_mprime_in, cmd_size);
+ kfree(verify_mprime_in);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
case SCIFEP_ZOMBIE:
dev_err(scif_info.mdev.this_device,
"SCIFAPI close: zombie state unexpected\n");
- /* fall through */
+ fallthrough;
case SCIFEP_DISCONNECTED:
spin_unlock(&ep->lock);
scif_unregister_all_windows(epd);
ep->port.port = err;
ep->port.node = scif_info.nodeid;
ep->conn_async_state = ASYNC_CONN_IDLE;
- /* Fall through */
+ fallthrough;
case SCIFEP_BOUND:
/*
* If a non-blocking connect has been already initiated
window->unreg_state = OP_IN_PROGRESS;
send_msg = true;
}
- /* fall through */
+ fallthrough;
case OP_IN_PROGRESS:
{
scif_get_window(window, 1);
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
- /* fall through */
+ fallthrough;
default:
BUG();
}
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
- /* fall through */
+ fallthrough;
default:
BUG();
}
if (!xpc_kdebug_ignore)
break;
- /* fall through */
+ fallthrough;
case DIE_MCA_MONARCH_ENTER:
case DIE_INIT_MONARCH_ENTER:
xpc_arch_ops.offline_heartbeat();
if (!xpc_kdebug_ignore)
break;
- /* fall through */
+ fallthrough;
case DIE_MCA_MONARCH_LEAVE:
case DIE_INIT_MONARCH_LEAVE:
xpc_arch_ops.online_heartbeat();
switch (region_size) {
case 128:
max_regions *= 2;
- /* fall through */
+ fallthrough;
case 64:
max_regions *= 2;
- /* fall through */
+ fallthrough;
case 32:
max_regions *= 2;
region_size = 16;
xpc_wakeup_channel_mgr(part);
}
- /* fall through */
+ fallthrough;
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
switch (bus_width) {
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
- /* fall through - Hosts capable of 8-bit can also do 4 bits */
+ fallthrough; /* Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
case 0x600:
case 0x500:
host->caps.has_odd_clk_div = 1;
- /* Fall through */
+ fallthrough;
case 0x400:
case 0x300:
host->caps.has_dma_conf_reg = 1;
host->caps.has_cfg_reg = 1;
host->caps.has_cstor_reg = 1;
host->caps.has_highspeed = 1;
- /* Fall through */
+ fallthrough;
case 0x200:
host->caps.has_rwproof = 1;
host->caps.need_blksz_mul_4 = 0;
host->caps.need_notbusy_for_read_ops = 1;
- /* Fall through */
+ fallthrough;
case 0x100:
host->caps.has_bad_data_ordering = 0;
host->caps.need_reset_after_xfer = 0;
- /* Fall through */
+ fallthrough;
case 0x0:
break;
default:
* then it's harmless for us to allow it.
*/
cmd_reg |= MMCCMD_BSYEXP;
- /* FALLTHROUGH */
+ fallthrough;
case MMC_RSP_R1: /* 48 bits, CRC */
cmd_reg |= MMCCMD_RSPFMT_R1456;
break;
if (smpl_phase >= USE_DLY_MIN_SMPL &&
smpl_phase <= USE_DLY_MAX_SMPL)
use_smpl_dly = 1;
- /* fallthrough */
+ fallthrough;
case MMC_TIMING_UHS_SDR50:
if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL &&
smpl_phase <= ENABLE_SHIFT_MAX_SMPL)
}
prev_state = state = STATE_SENDING_DATA;
- /* fall through */
+ fallthrough;
case STATE_SENDING_DATA:
/*
}
prev_state = state = STATE_DATA_BUSY;
- /* fall through */
+ fallthrough;
case STATE_DATA_BUSY:
if (!dw_mci_clear_pending_data_complete(host)) {
*/
prev_state = state = STATE_SENDING_STOP;
- /* fall through */
+ fallthrough;
case STATE_SENDING_STOP:
if (!dw_mci_clear_pending_cmd_complete(host))
break;
jz_mmc_prepare_data_transfer(host);
- /* fall through */
+ fallthrough;
case JZ4740_MMC_STATE_TRANSFER_DATA:
if (host->use_dma) {
break;
}
jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
- /* fall through */
+ fallthrough;
case JZ4740_MMC_STATE_SEND_STOP:
if (!req->stop)
switch (ios->power_mode) {
case MMC_POWER_OFF:
vdd = 0;
- /* fall through */
+ fallthrough;
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) {
host->error = mmc_regulator_set_ocr(mmc,
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/reset.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
struct pinctrl_state *pins_uhs;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
u32 val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ if (host->reset) {
+ reset_control_assert(host->reset);
+ usleep_range(10, 50);
+ reset_control_deassert(host->reset);
+ }
+
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
if (IS_ERR(host->src_clk_cg))
host->src_clk_cg = NULL;
+ host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "hrst");
+ if (IS_ERR(host->reset))
+ return PTR_ERR(host->reset);
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
case HOST_MODE:
if (host->pdata->flags & TMIO_MMC_HAVE_CBSY)
bit = TMIO_STAT_CMD_BUSY;
- /* fallthrough */
+ fallthrough;
case CTL_SD_CARD_CLK_CTL:
return renesas_sdhi_wait_idle(host, bit);
}
.caps = MMC_CAP_NONREMOVABLE,
};
+struct amd_sdhci_host {
+ bool tuned_clock;
+ bool dll_enabled;
+};
+
/* AMD sdhci reset dll register. */
#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
}
/*
- * For AMD Platform it is required to disable the tuning
- * bit first controller to bring to HS Mode from HS200
- * mode, later enable to tune to HS400 mode.
+ * The initialization sequence for HS400 is:
+ * HS->HS200->Perform Tuning->HS->HS400
+ *
+ * The re-tuning sequence is:
+ * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400
+ *
+ * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400
+ * mode. If we switch to a different mode, we need to disable the tuned clock.
+ * If we have previously performed tuning and switch back to HS200 or
+ * HS400, we can re-enable the tuned clock.
+ *
*/
static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
unsigned int old_timing = host->timing;
+ u16 val;
sdhci_set_ios(mmc, ios);
- if (old_timing == MMC_TIMING_MMC_HS200 &&
- ios->timing == MMC_TIMING_MMC_HS)
- sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
- if (old_timing != MMC_TIMING_MMC_HS400 &&
- ios->timing == MMC_TIMING_MMC_HS400) {
- sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
- sdhci_acpi_amd_hs400_dll(host);
+
+ if (old_timing != host->timing && amd_host->tuned_clock) {
+ if (host->timing == MMC_TIMING_MMC_HS400 ||
+ host->timing == MMC_TIMING_MMC_HS200) {
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+ } else {
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+ }
+
+ /* DLL is only required for HS400 */
+ if (host->timing == MMC_TIMING_MMC_HS400 &&
+ !amd_host->dll_enabled) {
+ sdhci_acpi_amd_hs400_dll(host);
+ amd_host->dll_enabled = true;
+ }
}
}
+static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
+ amd_host->tuned_clock = false;
+
+ err = sdhci_execute_tuning(mmc, opcode);
+
+ if (!err && !host->tuning_err)
+ amd_host->tuned_clock = true;
+
+ return err;
+}
+
static const struct sdhci_ops sdhci_acpi_ops_amd = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
host->mmc_host_ops.set_ios = amd_set_ios;
+ host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
return 0;
}
SDHCI_QUIRK_32BIT_ADMA_SIZE,
.quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+ .priv_size = sizeof(struct amd_sdhci_host),
};
struct sdhci_acpi_uid_slot {
"failed to request card-detect gpio!\n");
return err;
}
- /* fall through */
+ fallthrough;
case ESDHC_CD_CONTROLLER:
/* we have a working card_detect back */
sdhci_dumpregs(mmc_priv(mmc));
}
+static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+ sdhci_reset(host, mask);
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_cqhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
.irq = sdhci_cqhci_irq,
switch (pdata->max_width) {
case 8:
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
- /* Fall through */
+ fallthrough;
case 4:
host->mmc->caps |= MMC_CAP_4_BIT_DATA;
break;
break;
default:
- /* fall-through */
+ fallthrough;
case MMC_SIGNAL_VOLTAGE_330:
ret = pinctrl_select_state(sprd_host->pinctrl,
sprd_host->pins_default);
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
+/*
+ * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
+ * SDMMC hardware data timeout.
+ */
+#define NVQUIRK_HAS_TMCLK BIT(10)
+
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
+ struct clk *tmclk;
bool ddr_signaling;
bool pad_calib_required;
bool pad_control_available;
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK,
.min_tap_delay = 106,
.max_tap_delay = 185,
};
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK |
NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
.min_tap_delay = 84,
.max_tap_delay = 136,
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK,
.min_tap_delay = 96,
.max_tap_delay = 139,
};
goto err_power_req;
}
+ /*
+ * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
+ * timeout clock and SW can choose TMCLK or SDCLK for hardware
+ * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
+ * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
+ *
+ * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
+ * 12Mhz TMCLK which is advertised in host capability register.
+ * With TMCLK of 12Mhz provides maximum data timeout period that can
+ * be achieved is 11s better than using SDCLK for data timeout.
+ *
+ * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
+ * supporting separate TMCLK.
+ */
+
+ if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
+ clk = devm_clk_get(&pdev->dev, "tmclk");
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -EPROBE_DEFER)
+ goto err_power_req;
+
+ dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
+ clk = NULL;
+ }
+
+ clk_set_rate(clk, 12000000);
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "failed to enable tmclk: %d\n", rc);
+ goto err_power_req;
+ }
+
+ tegra_host->tmclk = clk;
+ }
+
clk = devm_clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
rc = PTR_ERR(clk);
err_rst_get:
clk_disable_unprepare(pltfm_host->clk);
err_clk_get:
+ clk_disable_unprepare(tegra_host->tmclk);
err_power_req:
err_parse_dt:
sdhci_pltfm_free(pdev);
reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000);
clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev);
ret = true;
break;
}
- /* fall through */
+ fallthrough;
default:
reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
ret = false;
case MMC_TIMING_UHS_SDR50:
if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
goto out;
break;
case MMC_RSP_R1B:
rc |= TIFM_MMCSD_RSP_BUSY;
- /* fall-through */
+ fallthrough;
case MMC_RSP_R1:
rc |= TIFM_MMCSD_RSP_R1;
break;
host->wait = USDHI6_WAIT_FOR_STOP;
return 0;
}
- /* fall through - Unsupported STOP command. */
+ fallthrough; /* Unsupported STOP command */
default:
dev_err(mmc_dev(host->mmc),
"unsupported stop CMD%d for CMD%d\n",
switch (host->wait) {
default:
dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
- /* fall through - mrq can be NULL, but is impossible. */
+ fallthrough; /* mrq can be NULL, but is impossible */
case USDHI6_WAIT_FOR_CMD:
usdhi6_error_code(host);
if (mrq)
host->offset, data->blocks, data->blksz, data->sg_len,
sg_dma_len(sg), sg->offset);
usdhi6_sg_unmap(host, true);
- /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */
+ fallthrough; /* page unmapped in USDHI6_WAIT_FOR_DATA_END */
case USDHI6_WAIT_FOR_DATA_END:
usdhi6_error_code(host);
data->error = -ETIMEDOUT;
mux->idle_state = idle_state;
break;
}
- /* fall through */
+ fallthrough;
default:
dev_err(dev, "invalid idle-state %d\n", idle_state);
return -EINVAL;
dev->irq = cops_irq(ioaddr, board);
if (dev->irq)
break;
- /* fall through - Once no IRQ found on this port. */
+ fallthrough; /* Once no IRQ found on this port */
case 1:
retval = -EINVAL;
goto err_out;
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 3: /* Node ID */
node = ints[3];
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
switch (ints[0]) {
default: /* ERROR */
pr_info("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 6: /* Timeout */
timeout = ints[6];
- /* Fall through */
+ fallthrough;
case 5: /* CKP value */
clockp = ints[5];
- /* Fall through */
+ fallthrough;
case 4: /* Backplane flag */
backplane = ints[4];
- /* Fall through */
+ fallthrough;
case 3: /* Node ID */
node = ints[3];
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
- /* Fall through */
+ fallthrough;
case 3: /* Mem address */
shmem = ints[3];
- /* Fall through */
+ fallthrough;
case 2: /* IRQ */
irq = ints[2];
- /* Fall through */
+ fallthrough;
case 1: /* IO address */
io = ints[1];
}
/**
* __get_first_agg - get the first aggregator in the bond
- * @bond: the bond we're looking at
+ * @port: the port we're looking at
*
* Return the aggregator of the first slave in @bond, or %NULL if it can't be
* found.
port->actor_oper_port_state &= ~LACP_STATE_EXPIRED;
port->sm_rx_state = AD_RX_PORT_DISABLED;
- /* Fall Through */
+ fallthrough;
case AD_RX_PORT_DISABLED:
port->sm_vars &= ~AD_PORT_MATCHED;
break;
if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
- /*FALLTHROUGH*/
+ fallthrough;
case BOND_AD_STABLE:
case BOND_AD_BANDWIDTH:
if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best))
/**
* ad_agg_selection_logic - select an aggregation group for a team
- * @aggregator: the aggregator we're looking at
+ * @agg: the aggregator we're looking at
* @update_slave_arr: Does slave array need update?
*
* It is assumed that only one aggregator may be selected for a team.
/**
* ad_initialize_port - initialize a given port's parameters
- * @aggregator: the aggregator we're looking at
+ * @port: the port we're looking at
* @lacp_fast: boolean. whether fast periodic should be used
*/
static void ad_initialize_port(struct port *port, int lacp_fast)
/**
* bond_3ad_initiate_agg_selection - initate aggregator selection
* @bond: bonding struct
+ * @timeout: timeout value to set
*
* Set the aggregation selection timer, to initiate an agg selection in
* the very near future. Called during first initialization, and during
/**
* bond_3ad_state_machine_handler - handle state machines timeout
- * @bond: bonding struct to work on
+ * @work: work context to fetch bonding struct to work on from
*
* The state machine handling concept in this module is to check every tick
* which state machine should operate any function. The execution order is
/**
* bond_3ad_handle_link_change - handle a slave's link status change indication
* @slave: slave struct to work on
- * @status: whether the link is now up or down
+ * @link: whether the link is now up or down
*
* Handle reselection of aggregator (if needed) for this port.
*/
/**
* bond_3ad_set_carrier - set link state for bonding master
- * @bond - bonding structure
+ * @bond: bonding structure
*
* if we have an active aggregator, we're up, if not, we're down.
* Presumes that we cannot have an active aggregator if there are
/**
* bond_3ad_update_lacp_rate - change the lacp rate
- * @bond - bonding struct
+ * @bond: bonding struct
*
* When modify lacp_rate parameter via sysfs,
* update actor_oper_port_state of each port.
/**
* alb_set_mac_address
- * @bond:
- * @addr:
+ * @bond: bonding we're working on
+ * @addr: MAC address to set
*
* In TLB mode all slaves are configured to the bond's hw address, but set
* their dev_addr field to different addresses (based on their permanent hw
/**
* bond_vlan_rx_add_vid - Propagates adding an id to slaves
* @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
* @vid: vlan id being added
*/
static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
/**
* bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
* @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
* @vid: vlan id being removed
*/
static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
/**
* change_active_interface - change the active slave into the specified one
* @bond: our bonding struct
- * @new: the new slave to make the active one
+ * @new_active: the new slave to make the active one
*
* Set the new slave to the bond's settings and unset them on the old
* curr_active_slave.
int ret;
ret = __bond_release_one(bond_dev, slave_dev, false, true);
- if (ret == 0 && !bond_has_slaves(bond)) {
+ if (ret == 0 && !bond_has_slaves(bond) &&
+ bond_dev->reg_state != NETREG_UNREGISTERING) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond\n");
bond_remove_proc_entry(bond);
"active " : "backup ") : "",
bond->params.downdelay * bond->params.miimon);
}
- /*FALLTHRU*/
+ fallthrough;
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
bond->params.updelay *
bond->params.miimon);
}
- /*FALLTHRU*/
+ fallthrough;
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
if (bond_time_in_interval(bond, last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
commit++;
+ } else if (slave->link == BOND_LINK_BACK) {
+ bond_propose_link_state(slave, BOND_LINK_FAIL);
+ commit++;
}
continue;
}
continue;
+ case BOND_LINK_FAIL:
+ bond_set_slave_link_state(slave, BOND_LINK_FAIL,
+ BOND_SLAVE_NOTIFY_NOW);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
+
+ /* A slave has just been enslaved and has become
+ * the current active slave.
+ */
+ if (rtnl_dereference(bond->curr_active_slave))
+ RCU_INIT_POINTER(bond->current_arp_slave, NULL);
+ continue;
+
default:
slave_err(bond->dev, slave->dev,
"impossible: link_new_state %d on slave\n",
return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
-
bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && bond_slave_is_up(slave))
before = slave;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
- /* Fallthrough */
+ fallthrough;
case NETDEV_DOWN:
/* Refresh slave-array if applicable!
* If the setup does not use miimon or arpmon (mode-specific!),
return -EINVAL;
mii->phy_id = 0;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG:
/* We do this again just in case we were called by SIOCGMIIREG
* instead of SIOCGMIIPHY.
return ret;
}
+static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
+{
+ if (speed == 0 || speed == SPEED_UNKNOWN)
+ speed = slave->speed;
+ else
+ speed = min(speed, slave->speed);
+
+ return speed;
+}
+
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
struct ethtool_link_ksettings *cmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- unsigned long speed = 0;
struct list_head *iter;
struct slave *slave;
+ u32 speed = 0;
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
*/
bond_for_each_slave(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
- if (slave->speed != SPEED_UNKNOWN)
- speed += slave->speed;
+ if (slave->speed != SPEED_UNKNOWN) {
+ if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
+ speed = bond_mode_bcast_speed(slave,
+ speed);
+ else
+ speed += slave->speed;
+ }
if (cmd->base.duplex == DUPLEX_UNKNOWN &&
slave->duplex != DUPLEX_UNKNOWN)
cmd->base.duplex = slave->duplex;
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
/*
* from: ERROR_ACTIVE, ERROR_WARNING
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
reg_ier = AT91_IRQ_ERRP;
pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
PCIEFD_REG_CAN_CLK_SEL);
- /* fall through */
+ fallthrough;
case CANFD_CLK_SEL_80MHZ:
priv->ucan.can.clock.freq = 80 * 1000 * 1000;
break;
priv->read_reg = sp_read_reg16;
priv->write_reg = sp_write_reg16;
break;
- case 1: /* fallthrough */
+ case 1:
default:
priv->read_reg = sp_read_reg8;
priv->write_reg = sp_write_reg8;
switch (*cmd) {
case 'r':
cf.can_id = CAN_RTR_FLAG;
- /* fallthrough */
+ fallthrough;
case 't':
/* store dlc ASCII value and terminate SFF CAN ID string */
cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
break;
case 'R':
cf.can_id = CAN_RTR_FLAG;
- /* fallthrough */
+ fallthrough;
case 'T':
cf.can_id |= CAN_EFF_FLAG;
/* store dlc ASCII value and terminate EFF CAN ID string */
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF)
priv->can.can_stats.error_warning++;
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF)
new_state = CAN_STATE_ERROR_WARNING;
break;
}
- /* fall through */
+ fallthrough;
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
default:
netdev_warn(netdev, "tx urb submitting failed err=%d\n",
err);
- /* fall through */
+ fallthrough;
case -ENOENT:
/* cable unplugged */
stats->tx_dropped++;
switch (id) {
case PCAN_USBPRO_TXMSG8:
i += 4;
- /* fall through */
+ fallthrough;
case PCAN_USBPRO_TXMSG4:
i += 4;
- /* fall through */
+ fallthrough;
case PCAN_USBPRO_TXMSG0:
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
- /* fallthrough */
+ fallthrough;
case SPEED_1000:
reg |= PORT_OVERRIDE_SPEED_1000M;
break;
return ret;
switch (ret) {
+ case -ETIMEDOUT:
+ return ret;
case -ENOSPC:
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
addr, vid);
switch (lane) {
case 0:
phylink_set(supported, 2500baseX_Full);
- /* fallthrough */
+ fallthrough;
case 1:
phylink_set(supported, 1000baseX_Full);
break;
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
id_mode_dis = 1;
- /* fallthrough */
+ fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
port_mode = EXT_GPHY;
break;
interface = PHY_INTERFACE_MODE_GMII;
if (gbit)
break;
- /* fall through */
+ fallthrough;
case 0:
interface = PHY_INTERFACE_MODE_MII;
break;
case P5_INTF_SEL_PHY_P0:
/* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
val |= MHWTRAP_PHY0_SEL;
- /* fall through */
+ fallthrough;
case P5_INTF_SEL_PHY_P4:
/* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface);
- if (ret && ret != -ENODEV)
+ if (ret && ret != -ENODEV) {
+ of_node_put(mac_np);
return ret;
+ }
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
if (id == 4)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
}
+ of_node_put(mac_np);
of_node_put(phy_node);
break;
}
phylink_set(mask, 100baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_MII) {
- phylink_set(mask, 1000baseT_Half);
+ /* This switch only supports 1G full-duplex. */
phylink_set(mask, 1000baseT_Full);
if (port == 5)
phylink_set(mask, 1000baseX_Full);
break;
case STATS_TYPE_BANK1:
reg = bank1_select;
- /* fall through */
+ fallthrough;
case STATS_TYPE_BANK0:
reg |= s->reg | histogram;
mv88e6xxx_g1_stats_read(chip, reg, &low);
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
help
- This driver supports network switches from the the Vitesse /
+ This driver supports network switches from the Vitesse /
Microsemi / Microchip Ocelot family of switching cores that are
connected to their host CPU via Ethernet.
The following switches are supported:
if (err < 0) {
dev_err(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
+ of_node_put(child);
return err;
}
sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
- for (match = sja1105_dt_ids; match->compatible; match++) {
+ for (match = sja1105_dt_ids; match->compatible[0]; match++) {
const struct sja1105_info *info = match->data;
/* Is what's been probed in our match table at all? */
pr_cont("Forcing 3c5x9b full-duplex mode");
break;
}
- /* fall through */
+ fallthrough;
case 8:
/* set full-duplex mode based on eeprom config setting */
if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
break;
}
- /* fall through */
+ fallthrough;
default:
/* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
static void update_stats(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- u8 rx, tx, up;
+ u8 up;
pr_debug("%s: updating the statistics.\n", dev->name);
dev->stats.tx_packets += (up&0x30) << 4;
/* Rx packets */ inb(ioaddr + 7);
/* Tx deferrals */ inb(ioaddr + 8);
- rx = inw(ioaddr + 10);
- tx = inw(ioaddr + 12);
+ /* rx */ inw(ioaddr + 10);
+ /* tx */ inw(ioaddr + 12);
EL3WINDOW(4);
/* BadSSD */ inb(ioaddr + 12);
switch(cmd) {
case SIOCGMIIPHY: /* Get the address of the PHY in use. */
data->phy_id = phy;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read the specified MII register. */
{
int saved_window;
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
/**
* axnet_tx_timeout - handle transmit time out condition
* @dev: network device which has apparently fallen asleep
+ * @txqueue: unused
*
* Called by kernel when device never acknowledges a transmit has
* completed (or failed) - i.e. never posted a Tx related interrupt.
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
{
switch (subdev) {
/* Mojave */
- case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough;
/* Oasis */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */
- case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF:
+ case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF:
return true;
}
return false;
ap->name);
break;
}
- /* Fall through */
+ fallthrough;
case PCI_VENDOR_ID_SGI:
printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
break;
int i;
for (i = first_index; i < first_index + count; i++) {
- /* Check if napi was initialized before */
- if (!ENA_IS_XDP_INDEX(adapter, i) ||
- adapter->ena_napi[i].xdp_ring)
- netif_napi_del(&adapter->ena_napi[i].napi);
- else
- WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
- adapter->ena_napi[i].xdp_ring);
+ netif_napi_del(&adapter->ena_napi[i].napi);
+
+ WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].xdp_ring);
}
}
{
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
- struct pci_dev *pdev = adapter->pdev;
- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- dev_err(&pdev->dev,
- "device reset schedule while reset bit is off\n");
- return;
- }
rtnl_lock();
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+
+ if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ ena_destroy_device(adapter, false);
+ ena_restore_device(adapter);
+ }
+
rtnl_unlock();
}
}
u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.missed_tx = missed_tx;
+ tx_ring->tx_stats.missed_tx += missed_tx;
u64_stats_update_end(&tx_ring->syncp);
return rc;
netdev->rx_cpu_rmap = NULL;
}
#endif /* CONFIG_RFS_ACCEL */
- del_timer_sync(&adapter->timer_service);
+ /* Make sure timer and reset routine won't be called after
+ * freeing device resources.
+ */
+ del_timer_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
u64_stats_update_begin(&adapter->syncp);
+ /* These stats are accumulated by the device, so the counters indicate
+ * all drops since last reset.
+ */
adapter->dev_stats.rx_drops = rx_drops;
adapter->dev_stats.tx_drops = tx_drops;
u64_stats_update_end(&adapter->syncp);
case SIOCGMIIPHY:
data->phy_id = lp->ext_phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG:
spin_lock_irq(&lp->lock);
/* PTP v2, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* Fall through - to PTP v1, UDP, any kind of event packet */
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
/* PTP v2, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* Fall through - to PTP v1, UDP, Sync packet */
+ fallthrough; /* to PTP v1, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
/* PTP v2, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* Fall through - to PTP v1, UDP, Delay_req packet */
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
XGBE_PORT_MODE_10GBASE_T,
XGBE_PORT_MODE_10GBASE_R,
XGBE_PORT_MODE_SFP,
+ XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG,
XGBE_PORT_MODE_MAX,
};
if (ad_reg & 0x80) {
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
mode = XGBE_MODE_KR;
break;
default:
} else if (ad_reg & 0x20) {
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
mode = XGBE_MODE_KX_1000;
break;
case XGBE_PORT_MODE_1000BASE_X:
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
XGBE_SET_ADV(dlks, 10000baseKR_Full);
break;
case XGBE_PORT_MODE_BACKPLANE_2500:
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
return XGBE_AN_MODE_CL73;
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
case XGBE_PORT_MODE_BACKPLANE_2500:
return XGBE_AN_MODE_NONE;
case XGBE_PORT_MODE_1000BASE_T:
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_switch_bp_mode(pdata);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_switch_bp_2500_mode(pdata);
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_get_bp_mode(speed);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_get_bp_2500_mode(speed);
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_use_bp_mode(pdata, mode);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_use_bp_2500_mode(pdata, mode);
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_valid_speed_bp_mode(speed);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_valid_speed_bp_2500_mode(speed);
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
case XGBE_PORT_MODE_BACKPLANE_2500:
if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
return false;
/* Backplane support */
case XGBE_PORT_MODE_BACKPLANE:
XGBE_SET_SUP(lks, Autoneg);
+ fallthrough;
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, Backplane);
hw_atl_ts_reset_set(self, 0);
}
- err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get,
- self, val, val == 1, 10000U, 500000U);
+ err = readx_poll_timeout(hw_atl_b0_ts_ready_and_latch_high_get, self,
+ val, val == 1, 10000U, 500000U);
if (err)
return err;
if (IS_ERR(data->reset_gpio)) {
error = PTR_ERR(data->reset_gpio);
dev_err(priv->dev, "Failed to request gpio: %d\n", error);
+ mdiobus_free(bus);
return error;
}
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring),
GFP_KERNEL);
- if (!priv->tx_rings)
- return -ENOMEM;
+ if (!priv->tx_rings) {
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
priv->is_lite = params->is_lite;
priv->num_rx_desc_words = params->num_rx_desc_words;
/* BCM 471X/535X family */
case BCMA_CHIP_ID_BCM4716:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- /* fallthrough */
+ fallthrough;
case BCMA_CHIP_ID_BCM47162:
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
switch (bgmac->net_dev->phydev->speed) {
default:
netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n");
- /* fall through */
+ fallthrough;
case SPEED_1000:
val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT;
break;
val |= BNX2_EMAC_MODE_PORT_MII_10M;
break;
}
- /* fall through */
+ fallthrough;
case SPEED_100:
val |= BNX2_EMAC_MODE_PORT_MII;
break;
case SPEED_2500:
val |= BNX2_EMAC_MODE_25G_MODE;
- /* fall through */
+ fallthrough;
case SPEED_1000:
val |= BNX2_EMAC_MODE_PORT_GMII;
break;
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
case SIOCGMIIPHY:
data->phy_id = bp->phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u32 mii_regval;
LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_10TFD:
vars->line_speed = SPEED_10;
break;
case LINK_100TXHD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_100T4:
case LINK_100TXFD:
vars->line_speed = SPEED_100;
case LINK_1000THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_1000TFD:
vars->line_speed = SPEED_1000;
break;
case LINK_2500THD:
vars->duplex = DUPLEX_HALF;
- /* Fall thru */
+ fallthrough;
case LINK_2500TFD:
vars->line_speed = SPEED_2500;
break;
*/
if (!vars->link_up)
break;
- /* fall through */
+ fallthrough;
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
- /* fall through */
+ fallthrough;
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = SPEED_100;
break;
bp->num_queues,
1 + bp->num_cnic_queues);
- /* fall through */
+ fallthrough;
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* fall through */
+ fallthrough;
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* fall through */
+ fallthrough;
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* fall through */
+ fallthrough;
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
- /* fall through */
+ fallthrough;
case EVENT_RING_OPCODE_VF_FLR:
/* Do nothing for now */
return 0;
rc = bnx2x_vf_close(bp, vf);
if (rc)
goto op_err;
- /* Fall through - to release resources */
+ fallthrough; /* to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
if (BNXT_PF(bp))
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
else
static void bnxt_cancel_sp_work(struct bnxt *bp)
{
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
flush_workqueue(bnxt_pf_wq);
- else
+ } else {
cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ }
}
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
break;
case BNXT_FW_HEALTH_REG_TYPE_GRC:
reg_off = fw_health->mapped_regs[reg_idx];
- /* fall through */
+ fallthrough;
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
val = readl(bp->bar0 + reg_off);
break;
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
}
- /* fall through */
+ fallthrough;
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
- /* fall through */
+ fallthrough;
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
return cp + ulp_stat;
}
+/* Check if a default RSS map needs to be setup. This function is only
+ * used on older firmware that does not require reserving RX rings.
+ */
+static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ /* The RSS map is valid for RX rings set to resv_rx_rings */
+ if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
+ hw_resc->resv_rx_rings = bp->rx_nr_rings;
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+ }
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int rx = bp->rx_nr_rings, stat;
int vnic = 1, grp = rx;
- if (bp->hwrm_spec_code < 0x10601)
- return false;
-
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
return true;
+ /* Old firmware does not need RX ring reservations but we still
+ * need to setup a default RSS map when needed. With new firmware
+ * we go through RX ring reservations first and then set up the
+ * RSS map for the successfully reserved RX rings when needed.
+ */
+ if (!BNXT_NEW_RM(bp)) {
+ bnxt_check_rss_tbl_no_rmgr(bp);
+ return false;
+ }
if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
- if (BNXT_NEW_RM(bp) &&
- (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
- (hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5))))
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5)))
return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
if (!tx || !rx || !cp || !grp || !vnic || !stat)
return -ENOMEM;
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
return rc;
}
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
}
- if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
-
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
struct hwrm_temp_monitor_query_input req = {0};
struct hwrm_temp_monitor_query_output *resp;
struct bnxt *bp = dev_get_drvdata(dev);
- u32 temp = 0;
+ u32 len = 0;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
- temp = resp->temp * 1000; /* display millidegree */
+ if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
- return sprintf(buf, "%u\n", temp);
+ if (len)
+ return len;
+
+ return sprintf(buf, "unknown\n");
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
}
}
- bnxt_enable_napi(bp);
- bnxt_debug_dev_init(bp);
-
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
+ bnxt_enable_napi(bp);
+ bnxt_debug_dev_init(bp);
+
if (link_re_init) {
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
bnxt_vf_reps_open(bp);
return 0;
-open_err:
- bnxt_debug_dev_exit(bp);
- bnxt_disable_napi(bp);
-
open_err_irq:
bnxt_del_napi(bp);
case SIOCGMIIPHY:
mdio->phy_id = bp->link_info.phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 mii_regval = 0;
writel(reg_off & BNXT_GRC_BASE_MASK,
bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
- /* fall through */
+ fallthrough;
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
writel(val, bp->bar0 + reg_off);
break;
}
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
}
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_RESET_FW:
bnxt_reset_all(bp);
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
}
pci_set_master(bp->pdev);
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_POLL_FW:
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
rc = __bnxt_hwrm_ver_get(bp, true);
}
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
- /* fall through */
+ fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
while (!rtnl_trylock()) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
unregister_netdev(dev);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5;
+ rc = bnxt_alloc_rss_indir_tbl(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- rc = bnxt_alloc_rss_indir_tbl(bp);
- if (rc)
- goto init_err_pci_clean;
- bnxt_set_dflt_rss_indir_tbl(bp);
-
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
(long)pci_resource_start(pdev, 0), dev->dev_addr);
pcie_print_link_status(pdev);
+ pci_save_state(pdev);
return 0;
init_err_cleanup:
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
if (!err) {
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
int rx, tx, cmn;
- bool sh = false;
-
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- sh = true;
rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS;
- if (sh)
- return (rx + tx + cmn) * bp->cp_nr_rings;
- else
- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
- cmn * bp->cp_nr_rings;
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (BNXT_NEW_RM(bp))
+ if (netif_running(dev) && BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
if (rc != 0)
return rc;
+ if (!dir_entries || !entry_length)
+ return -EIO;
+
/* Insert 2 bytes of directory info (count and size of entries) */
if (len < 2)
return -EINVAL;
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* Fall thru */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
- /* Fall thru */
+ fallthrough;
case XDP_DROP:
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
case CNIC_CTL_FCOE_STATS_GET_CMD:
ulp_type = CNIC_ULP_FCOE;
- /* fall through */
+ fallthrough;
case CNIC_CTL_ISCSI_STATS_GET_CMD:
cnic_hold(dev);
cnic_copy_ulp_stats(dev, ulp_type);
l4kcqe->status, l5kcqe->completion_status);
opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
}
- /* Fall through */
+ fallthrough;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
- /* fall through */
+ fallthrough;
case BCMGENET_STAT_MIB_TX:
offset += BCMGENET_STAT_OFFSET;
- /* fall through */
+ fallthrough;
case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
case ETHER_FLOW:
eth_mask = &cmd->fs.m_u.ether_spec;
/* don't allow mask which isn't valid */
- if (VALIDATE_MASK(eth_mask->h_source) ||
+ if (VALIDATE_MASK(eth_mask->h_dest) ||
VALIDATE_MASK(eth_mask->h_source) ||
VALIDATE_MASK(eth_mask->h_proto)) {
netdev_err(dev, "rxnfc: Unsupported mask\n");
switch (priv->phy_interface) {
case PHY_INTERFACE_MODE_INTERNAL:
phy_name = "internal PHY";
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_MOCA:
/* Irrespective of the actually configured PHY speed (100 or
* 1000) GENETv4 only has an internal GPHY so we will just end
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
- /* fall through */
+ fallthrough;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
- /* fall through */
+ fallthrough;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
- /* fall through */
+ fallthrough;
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
phy_support_asym_pause(phydev);
break;
}
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
MII_TG3_DSP_TAP26_RMRXSTO |
MII_TG3_DSP_TAP26_OPCSINPT;
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- /* Fall through */
+ fallthrough;
case ASIC_REV_5720:
case ASIC_REV_5762:
if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
tp->link_config.speed = SPEED_1000;
break;
}
- /* Fall through */
+ fallthrough;
default:
goto done;
}
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
- /* fall through */
+ fallthrough;
case ANEG_STATE_AN_ENABLE:
ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
if (ap->flags & MR_AN_ENABLE) {
ret = ANEG_TIMER_ENAB;
ap->state = ANEG_STATE_RESTART;
- /* fall through */
+ fallthrough;
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
if (delta > ANEG_STATE_SETTLE_TIME)
ap->state = ANEG_STATE_ACK_DETECT;
- /* fall through */
+ fallthrough;
case ANEG_STATE_ACK_DETECT:
if (ap->ack_match != 0) {
if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
static inline void tg3_reset_task_cancel(struct tg3 *tp)
{
- cancel_work_sync(&tp->reset_task);
- tg3_flag_clear(tp, RESET_TASK_PENDING);
+ if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+ cancel_work_sync(&tp->reset_task);
tg3_flag_clear(tp, TX_RECOVERY_PENDING);
}
switch (limit) {
case 16:
tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
- /* fall through */
+ fallthrough;
case 15:
tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
- /* fall through */
+ fallthrough;
case 14:
tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
- /* fall through */
+ fallthrough;
case 13:
tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
- /* fall through */
+ fallthrough;
case 12:
tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
- /* fall through */
+ fallthrough;
case 11:
tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
- /* fall through */
+ fallthrough;
case 10:
tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
- /* fall through */
+ fallthrough;
case 9:
tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
- /* fall through */
+ fallthrough;
case 8:
tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
- /* fall through */
+ fallthrough;
case 7:
tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
- /* fall through */
+ fallthrough;
case 6:
tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
- /* fall through */
+ fallthrough;
case 5:
tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
- /* fall through */
+ fallthrough;
case 4:
/* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
case 3:
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
err = tg3_init_hw(tp, true);
- if (err)
+ if (err) {
+ tg3_full_unlock(tp);
+ tp->irq_sync = 0;
+ tg3_napi_enable(tp);
+ /* Clear this flag so that tg3_reset_task_cancel() will not
+ * call cancel_work_sync() and wait forever.
+ */
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ dev_close(tp->dev);
goto out;
+ }
tg3_netif_start(tp);
-out:
tg3_full_unlock(tp);
if (!err)
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+out:
rtnl_unlock();
}
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: {
u32 mii_regval;
val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
break;
}
- /* fallthrough */
+ fallthrough;
case 128:
default:
val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
DMA_RWCTRL_WRITE_BNDRY_16);
break;
}
- /* fallthrough */
+ fallthrough;
case 32:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_32 |
DMA_RWCTRL_WRITE_BNDRY_32);
break;
}
- /* fallthrough */
+ fallthrough;
case 64:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_64 |
DMA_RWCTRL_WRITE_BNDRY_64);
break;
}
- /* fallthrough */
+ fallthrough;
case 128:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_128 |
DMA_RWCTRL_WRITE_BNDRY_128);
break;
}
- /* fallthrough */
+ fallthrough;
case 256:
val |= (DMA_RWCTRL_READ_BNDRY_256 |
DMA_RWCTRL_WRITE_BNDRY_256);
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
del_timer(&ioc->ioc_timer);
- /* fall through */
+ fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
case IOCPF_E_INITFAIL:
del_timer(&ioc->iocpf_timer);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
case IOCPF_E_FAIL:
del_timer(&ioc->iocpf_timer);
- /* fall through*/
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
case ENET_E_CHLD_STOPPED:
bna_enet_rx_start(enet);
- /* Fall through */
+ fallthrough;
case ENET_E_FWRESP_PAUSE:
if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
&q1->qpt);
cfg_req->q_cfg[i].qs.rx_buffer_size =
htons((u16)q1->buffer_size);
- /* Fall through */
+ fallthrough;
case BNA_RXP_SINGLE:
/* Large/Single RxQ */
case HWTSTAMP_TX_ONESTEP_SYNC:
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
return -ERANGE;
- /* fall through */
+ fallthrough;
case HWTSTAMP_TX_ON:
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
schedule_timeout_uninterruptible(HZ / 10);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_HOST_OK:
- /* fallthrough */
case OCT_DEV_CONSOLE_INIT_DONE:
/* Remove any consoles */
octeon_remove_consoles(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_IO_QUEUES_DONE:
if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
octeon_free_sc_done_list(oct);
octeon_free_sc_zombie_list(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
if (OCTEON_CN23XX_PF(oct))
octeon_free_ioq_vector(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MBOX_SETUP_DONE:
if (OCTEON_CN23XX_PF(oct))
oct->fn_list.free_mbox(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/* Wait for any pending operations */
}
}
- /* fallthrough */
+ fallthrough;
case OCT_DEV_RESP_LIST_INIT_DONE:
octeon_delete_response_list(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
if (oct->sriov_info.sriov_enabled)
pci_disable_sriov(oct->pci_dev);
#endif
- /* fallthrough */
+ fallthrough;
case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_DISPATCH_INIT_DONE:
octeon_delete_dispatch_list(oct);
cancel_delayed_work_sync(&oct->nic_poll_work.work);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_MAP_DONE:
refcount = octeon_deregister_device(oct);
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_BEGIN_STATE:
/* Nothing to be done here either */
break;
case SIOCSHWTSTAMP:
if (lio->oct_dev->ptp_enable)
return hwtstamp_ioctl(netdev, ifr);
- /* fall through */
+ fallthrough;
default:
return -EOPNOTSUPP;
}
schedule_timeout_uninterruptible(HZ / 10);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_HOST_OK:
- /* fallthrough */
case OCT_DEV_IO_QUEUES_DONE:
if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
octeon_free_sc_done_list(oct);
octeon_free_sc_zombie_list(oct);
- /* fall through */
+ fallthrough;
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
else
cn23xx_vf_ask_pf_to_do_flr(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
octeon_free_ioq_vector(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_MBOX_SETUP_DONE:
oct->fn_list.free_mbox(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
mdelay(100);
octeon_delete_droq(oct, i);
}
- /* fallthrough */
+ fallthrough;
case OCT_DEV_RESP_LIST_INIT_DONE:
octeon_delete_response_list(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
octeon_delete_instr_queue(oct, i);
}
- /* fallthrough */
+ fallthrough;
case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
octeon_free_sc_buffer_pool(oct);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_DISPATCH_INIT_DONE:
octeon_delete_dispatch_list(oct);
cancel_delayed_work_sync(&oct->nic_poll_work.work);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_MAP_DONE:
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
- /* fallthrough */
+ fallthrough;
case OCT_DEV_BEGIN_STATE:
/* Nothing to be done here either */
break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case IPV4_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
return true;
default:
bpf_warn_invalid_xdp_action(action);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
- /* fall through */
+ fallthrough;
case XDP_DROP:
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
!(data->phy_id & 0xe0e0))
data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
data->phy_id & 0x1f);
- /* FALLTHRU */
+ fallthrough;
case SIOCGMIIPHY:
return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
case SIOCCHIOCTL:
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
- /* fall through */
+ fallthrough;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
int cxgb4_thermal_init(struct adapter *adap)
{
struct ch_thermal *ch_thermal = &adap->ch_thermal;
+ char ch_tz_name[THERMAL_NAME_LENGTH];
int num_trip = CXGB4_NUM_TRIPS;
u32 param, val;
int ret;
ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
}
- ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip,
+ snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
+ ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
0, adap,
&cxgb4_thermal_ops,
NULL, 0, 0);
int cxgb4_thermal_remove(struct adapter *adap)
{
- if (adap->ch_thermal.tzdev)
+ if (adap->ch_thermal.tzdev) {
thermal_zone_device_unregister(adap->ch_thermal.tzdev);
+ adap->ch_thermal.tzdev = NULL;
+ }
return 0;
}
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
- /* fall through */
+ fallthrough;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return t4_ofld_send(adap, skb);
case L2T_STATE_RESOLVING:
pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
- flits = DIV_ROUND_UP(pkt_len + sizeof(struct cpl_tx_pkt) +
- sizeof(*wr), sizeof(__be64));
+ flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
+ sizeof(__be64));
ndesc = flits_to_desc(flits);
lb = &pi->ethtool_lb;
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
+ __netif_tx_lock(q->txq, smp_processor_id());
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
- if (unlikely(credits < 0))
+ if (unlikely(credits < 0)) {
+ __netif_tx_unlock(q->txq);
return -ENOMEM;
+ }
wr = (void *)&q->q.desc[q->q.pidx];
memset(wr, 0, sizeof(struct tx_desc));
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ __netif_tx_unlock(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
- /* Fall through */
+ fallthrough;
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
- /* Fall through */
+ fallthrough;
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
- /* Fall through */
+ fallthrough;
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
}
cpl = (void *)p;
}
- /* Fall through */
+ fallthrough;
case CPL_SGE_EGR_UPDATE: {
/*
case ntohs(ETH_P_IPV6):
if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
goto out;
- /* Fall through */
+ fallthrough;
case ntohs(ETH_P_IP):
break;
default:
dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
- netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
if (!netdev) {
dev_err(dev, "Can't allocate ethernet device #%d\n", id);
return -ENOMEM;
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
- clk_disable_unprepare(port->pclk);
- return PTR_ERR(port->reset);
+ ret = PTR_ERR(port->reset);
+ goto unprepare;
}
reset_control_reset(port->reset);
usleep_range(100, 500);
IRQF_SHARED,
port_names[port->id],
port);
- if (ret) {
- clk_disable_unprepare(port->pclk);
- return ret;
- }
+ if (ret)
+ goto unprepare;
ret = register_netdev(netdev);
- if (!ret) {
+ if (ret)
+ goto unprepare;
+
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
netdev_info(netdev,
- "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
- port->irq, &dmares->start,
- &gmacres->start);
- ret = gmac_setup_phy(netdev);
- if (ret)
- netdev_info(netdev,
- "PHY init failed, deferring to ifup time\n");
- return 0;
- }
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
- port->netdev = NULL;
- free_netdev(netdev);
+unprepare:
+ clk_disable_unprepare(port->pclk);
return ret;
}
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
- free_netdev(port->netdev);
return 0;
}
case 3:
dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
- /* fall through */
+ fallthrough;
case 2:
db->dumpblk = dm9000_dumpblk_16bit;
db->outblk = dm9000_outblk_16bit;
case SROM_10BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
- /* fall through */
+ fallthrough;
case SROM_10BASET:
if (lp->params.fdx && !lp->fdx) return -1;
case SROM_100BASETF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
- /* fall through */
+ fallthrough;
case SROM_100BASET:
if (lp->params.fdx && !lp->fdx) return -1;
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = true;
- /* fall through */
+ fallthrough;
case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
data->phy_id = 1;
else
return -ENODEV;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
spin_lock_irq(&np->lock);
break;
}
}
- /* fall through */
+ fallthrough;
case PHY_TYPE_SFP_PLUS_10GB:
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
- /* fall through */
+ fallthrough;
case FQ_TYPE_TX_CONFIRM:
dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
break;
case UDP_V6_FLOW:
if (priv->keygen_in_use)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case IPV4_FLOW:
case IPV6_FLOW:
case SCTP_V4_FLOW:
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
xdp_release_buf(priv, ch, addr);
ch->stats.xdp_drop++;
failed_irq:
failed_init:
fec_ptp_stop(pdev);
- if (fep->reg_phy)
- regulator_disable(fep->reg_phy);
failed_reset:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
failed_clk_ahb:
case 100:
tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
break;
- case 1000: /* fallthrough */
+ case 1000:
default:
tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
break;
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
set_rx_dflt_cfg(port, params);
- /* fall through */
+ fallthrough;
case FMAN_PORT_TYPE_TX:
set_tx_dflt_cfg(port, params, &port->dts_params);
- /* fall through */
+ fallthrough;
default:
set_dflt_cfg(port, params);
}
continue;
err = gfar_parse_group(child, priv, model);
- if (err)
+ if (err) {
+ of_node_put(child);
goto err_grp_init;
+ }
}
} else { /* SQ_SG_MODE */
err = gfar_parse_group(np, priv, model);
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;
- /* FALLTHROUGH */
+ fallthrough;
case SPEED_100:
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
upsmr |= UCC_GETH_UPSMR_RMM;
priv->enet_ver = AE_VERSION_1;
else if (acpi_dev_found(hns_enet_acpi_match[1].id))
priv->enet_ver = AE_VERSION_2;
- else
- return -ENXIO;
+ else {
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
/* try to find port-idx-in-ae first */
ret = acpi_node_get_property_reference(dev->fwnode,
priv->fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_read_prop_fail;
}
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
break;
case MAC_LOOP_PHY_NONE:
ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
- /* fall through */
+ fallthrough;
case MAC_LOOP_NONE:
if (!ret && h->dev->ops->set_loopback) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
+#include <net/geneve.h>
#include "hnae3.h"
#include "hns3_enet.h"
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
* hardware will not do the checksum offload when udp dest port is
- * 4789.
+ * 4789 or 6081.
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation &&
- l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
+ (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+ l4.udp->dest == htons(GENEVE_UDP_PORT))))
return false;
skb_checksum_help(skb);
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
- /* fall through */
+ fallthrough;
case HNS3_OL4_TYPE_NO_TUN:
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S);
* by first decoding the types of errors.
*/
set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
- /* fall through */
+ fallthrough;
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
case HNAE3_GLOBAL_RESET:
- /* fall through */
case HNAE3_IMP_RESET:
ret = hclge_set_rst_done(hdev);
break;
switch (action) {
case MEM_CANCEL_OFFLINE:
pr_info("memory offlining canceled");
- /* Fall through - re-add canceled memory block */
+ fallthrough; /* re-add canceled memory block */
case MEM_ONLINE:
pr_info("memory is going online");
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = dev->phy.address;
- /* Fall through */
+ fallthrough;
case SIOCGMIIREG:
data->val_out = emac_mdio_read(ndev, dev->phy.address,
data->reg_num);
int i, j, rc;
u64 *size_array;
+ if (!adapter->rx_pool)
+ return -1;
+
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
int tx_scrqs;
int i, rc;
+ if (!adapter->tx_pool)
+ return -1;
+
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
adapter->req_tx_entries_per_subcrq !=
- old_num_tx_slots) {
+ old_num_tx_slots ||
+ !adapter->rx_pool ||
+ !adapter->tso_pool ||
+ !adapter->tx_pool) {
release_rx_pools(adapter);
release_tx_pools(adapter);
release_napi(adapter);
} else {
rc = reset_tx_pools(adapter);
if (rc)
+ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ rc);
goto out;
rc = reset_rx_pools(adapter);
if (rc)
+ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ rc);
goto out;
}
ibmvnic_disable_irqs(adapter);
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
- fallthrough;
case e1000_pch_tgp:
case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
__le16 seid;
__le16 vlan_tag;
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
}
/**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+ u16 min)
+{
+ return (aq->api_maj_ver > maj ||
+ (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc &&
- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1)))
- flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
+ if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+ }
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ cmd->valid_flags |=
+ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+ while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ usleep_range(1000, 2000);
+
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
case e1000_i354:
case e1000_i210:
case e1000_i211:
- fallthrough;
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+ igc_ptp_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- /* do hw tstamp init after resetting */
- igc_ptp_init(adapter);
-
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- igc_ptp_reset(adapter);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
}
/* alloc the udl from per cpu ddp pool */
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(pp->dev, prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
ret = MVNETA_XDP_DROPPED;
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
if (info->data & RXH_L4_B_2_3)
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
- /* Fallthrough */
+ fallthrough;
case MVPP22_FLOW_IP4:
case MVPP22_FLOW_IP6:
if (info->data & RXH_L2DA)
}
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
- /* Fall-through */
+ fallthrough;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
phylink_set(mask, 1000baseX_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
- /* Fall-through */
+ fallthrough;
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
if (port->comphy ||
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
- /* Fall through */
+ fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
if (!hw->pool_cnt)
return -EINVAL;
- qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
- hw->pool_cnt, GFP_KERNEL);
+ qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
+ sizeof(struct otx2_pool), GFP_KERNEL);
if (!qset->pool)
return -ENOMEM;
case SIOCGMIIPHY:
data->phy_id = hw->phy_addr;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 val = 0;
spin_lock_bh(&hw->phy_lock);
case SIOCGMIIPHY:
data->phy_id = PHY_ADDR_MARV;
- /* fallthru */
+ fallthrough;
case SIOCGMIIREG: {
u16 val = 0;
case OP_RXCHKSVLAN:
sky2_rx_tag(sky2, length);
- /* fall through */
+ fallthrough;
case OP_RXCHKS:
if (likely(dev->features & NETIF_F_RXCSUM))
sky2_rx_checksum(sky2, status);
if (!MTK_HAS_CAPS(mac->hw->soc->caps,
MTK_GMAC1_TRGMII))
goto err_phy;
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
phylink_set(mask, 1000baseT_Half);
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_SGMII:
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
- /* fall through */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RMII:
case PHY_INTERFACE_MODE_REVMII:
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
if (!buddy->bits[i])
goto err_out_free;
case MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET:
MLXFW_ERR_MSG(mlxfw_dev, extack, "pending reset", err);
break;
- case MLXFW_FSM_STATE_ERR_OK: /* fall through */
+ case MLXFW_FSM_STATE_ERR_OK:
case MLXFW_FSM_STATE_ERR_MAX:
MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
break;
case MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED:
MLXFW_REACT_ERR("fw already activated", err);
break;
- case MLXFW_FSM_REACTIVATE_STATUS_OK: /* fall through */
+ case MLXFW_FSM_REACTIVATE_STATUS_OK:
case MLXFW_FSM_REACTIVATE_STATUS_MAX:
MLXFW_REACT_ERR("unexpected error", err);
break;
/* Here we need to get the module width according to the module type. */
switch (module_type) {
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
return 8;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
return 1;
default:
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
*qsfp = false;
break;
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
*qsfp = true;
break;
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
- case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 ||
module_rev_id >=
*/
fault = 1;
break;
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
+ case MLXSW_REG_MTBR_NO_CONN:
+ case MLXSW_REG_MTBR_NO_TEMP_SENS:
case MLXSW_REG_MTBR_INDEX_NA:
default:
fault = 0;
return MLXSW_REG_SPMS_STATE_FORWARDING;
case BR_STATE_LEARNING:
return MLXSW_REG_SPMS_STATE_LEARNING;
- case BR_STATE_LISTENING: /* fall-through */
- case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
return MLXSW_REG_SPMS_STATE_DISCARDING;
default:
mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
{
switch (type) {
- case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
- case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
default:
return 1;
}
addr_len = 4;
addr_prefix_len = 32;
break;
- case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON(1);
return NULL;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
return 0;
}
- /* fall through */
+ fallthrough;
case RTN_BROADCAST:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
case RTN_BLACKHOLE:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
return 0;
- case RTN_UNREACHABLE: /* fall through */
+ case RTN_UNREACHABLE:
case RTN_PROHIBIT:
/* Packets hitting these routes need to be trapped, but
* can do so with a lower priority than packets directed
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
fib_work->fnh_info.fib_nh);
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
struct fib_nh_notifier_info *fnh_info;
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_NH_ADD: /* fall through */
+ case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
int err;
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
struct fib_notifier_info *info)
{
switch (fib_work->event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
mr_cache_hold(fib_work->men_info.mfc);
break;
- case FIB_EVENT_VIF_ADD: /* fall through */
+ case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
dev_hold(fib_work->ven_info.dev);
router = container_of(nb, struct mlxsw_sp_router, fib_nb);
switch (event) {
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
return notifier_from_errno(err);
- case FIB_EVENT_ENTRY_ADD: /* fall through */
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
goto out;
switch (event) {
- case NETDEV_CHANGEMTU: /* fall through */
+ case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
break;
enum mlxsw_sp_span_trigger_type type;
switch (trigger_entry->trigger) {
- case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
case MLXSW_SP_SPAN_TRIGGER_EGRESS:
type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
break;
- case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */
- case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
case MLXSW_SP_SPAN_TRIGGER_ECN:
type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
break;
uip = be32_to_cpu(addr->addr4);
sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
break;
- case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ case MLXSW_SP_L3_PROTO_IPV6:
default:
WARN_ON(1);
return -EOPNOTSUPP;
fdb_info = &switchdev_work->fdb_info;
mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
break;
- case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
/* These events are only used to potentially update an existing
* SPAN mirror.
switchdev_work->event = event;
switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
- case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
- case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
*/
dev_hold(dev);
break;
- case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
INIT_WORK(&switchdev_work->work,
mlxsw_sp_switchdev_vxlan_fdb_event_work);
case TCP_V4_FLOW:case UDP_V4_FLOW:
case TCP_V6_FLOW:case UDP_V6_FLOW:
rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case IPV4_FLOW: case IPV6_FLOW:
rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
return 0;
switch (state) {
case BR_STATE_FORWARDING:
ocelot->bridge_fwd_mask |= BIT(port);
- /* Fallthrough */
+ fallthrough;
case BR_STATE_LEARNING:
port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
break;
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = np->phy_addr_external;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
/* The phy_id is not enough to uniquely identify
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
itable[j]);
- /* fall through */
+ fallthrough;
case 2:
*data0 |=
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
itable[j]);
- /* fall through */
+ fallthrough;
case 3:
*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
itable[j]);
- /* fall through */
+ fallthrough;
case 4:
*data1 |=
VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
break;
}
#endif
- /* fall through */
+ fallthrough;
case AF_INET:
req_sz = sizeof(struct nfp_crypto_req_add_v4);
ipv6 = false;
case htons(GENEVE_UDP_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
- /* FALLTHROUGH */
+ fallthrough;
default:
return NFP_FL_TUNNEL_NONE;
}
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
break;
}
- /* fall through */
+ fallthrough;
default:
err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
if (tunnel_act)
*tunnel_act = true;
- /* fall through */
+ fallthrough;
case NFP_FL_ACTION_OPCODE_PRE_LAG:
memcpy(act_dst + act_off, act_src + act_off, act_len);
break;
* @flags: options part of the request
* @tun_info.ipv6: dest IPv6 address of active route
* @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info.extra: reserved for future use
* @tun_info: tunnels that have sent traffic in reported period
*/
struct nfp_tun_active_tuns_v6 {
struct route_ip_info_v6 {
struct in6_addr ipv6;
__be32 egress_port;
+ __be32 extra[2];
} tun_info[];
};
val;
case NN_LM_MOD_DEC:
lm_dec = true;
- /* fall through */
+ fallthrough;
case NN_LM_MOD_INC:
if (val) {
pr_err("LM offset in inc/dev mode\n");
continue;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
rxbuf->dma_addr);
switch (maptype) {
case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
bartok = -1;
- /* FALLTHROUGH */
+ fallthrough;
case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
baract = NFP_CPP_ACTION_RW;
if (act == 0)
act = NFP_CPP_ACTION_RW;
- /* FALLTHROUGH */
+ fallthrough;
case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
break;
default:
return 0;
default:
pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type);
- /* fall through */
+ fallthrough;
case NFP_RTSYM_TYPE_OBJECT:
case NFP_RTSYM_TYPE_FUNCTION:
return sym->size;
case SPEED_1000 + HALF_DUPLEX:
netdev_dbg(adapter->netdev,
"Half Duplex is not supported at 1000 Mbps\n");
- /* fall through */
+ fallthrough;
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
netdev_dbg(adapter->netdev,
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = np->phys[0] & 0x1f;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
struct ionic_cq *txcq;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
- u32 work_done = 0;
u32 flags = 0;
- bool unmask;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
if (rx_work_done)
ionic_rx_fill_cb(rxcq->bound_q);
- unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget);
-
- if (unmask && napi_complete_done(napi, rx_work_done)) {
+ if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
flags |= IONIC_INTR_CRED_UNMASK;
DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
- work_done = rx_work_done;
- } else {
- work_done = budget;
}
- if (work_done || flags) {
+ if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags);
DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
- return work_done;
+ return rx_work_done;
}
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
case NETXEN_BRDTYPE_P3_4_GB_MM:
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
- /* fall through */
+ fallthrough;
case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4:
case NETXEN_BRDTYPE_P3_10G_CX4_LP:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
- /* fall through */
+ fallthrough;
case NETXEN_BRDTYPE_P2_SB31_10G:
case NETXEN_BRDTYPE_P3_10G_XFP:
supported |= SUPPORTED_FIBRE;
rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
- /* fall through */
+ fallthrough;
case QED_PCI_ETH:
{
struct qed_eth_pf_params *p_params =
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall through */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
if (rc)
break;
- /* Fall through */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_params->p_tunn,
kfree(int_params->msix_table);
if (force_mode)
goto out;
- /* Fallthrough */
+ fallthrough;
case QED_INT_MODE_MSI:
if (cdev->num_hwfns == 1) {
if (force_mode)
goto out;
}
- /* Fallthrough */
+ fallthrough;
case QED_INT_MODE_INTA:
int_params->out.int_mode = QED_INT_MODE_INTA;
DP_NOTICE(p_hwfn,
"Unknown WoL configuration %02x\n",
p_hwfn->cdev->wol_config);
- /* Fallthrough */
+ fallthrough;
case QED_OV_WOL_DEFAULT:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
break;
case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
p_link->full_duplex = false;
- /* Fall-through */
+ fallthrough;
case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
p_link->speed = 1000;
break;
break;
case FUNC_MF_CFG_PROTOCOL_ROCE:
DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
- /* Fallthrough */
+ fallthrough;
default:
rc = -EINVAL;
}
switch (p_in_params->cmd) {
case DRV_MSG_SET_RESOURCE_VALUE_MSG:
mfw_resc_info.size = p_in_params->resc_max_val;
- /* Fallthrough */
+ fallthrough;
case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
break;
default:
DP_INFO(p_hwfn,
"Resource unlock request for an already released resource [%d]\n",
p_params->resource);
- /* Fallthrough */
+ fallthrough;
case RESOURCE_OPCODE_RELEASED:
p_params->b_released = true;
break;
if (test_bit(QL_LINK_MASTER, &qdev->flags))
ql_port_start(qdev);
qdev->port_link_state = LS_DOWN;
- /* Fall Through */
+ fallthrough;
case LS_DOWN:
if (curr_link_state == LS_UP) {
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
- /* fall through */
+ fallthrough;
case QLCNIC_BRDTYPE_P3P_10G_CX4:
case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
supported |= SUPPORTED_TP;
check_sfp_module = netif_running(adapter->netdev) &&
ahw->has_link_events;
- /* fall through */
+ fallthrough;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
rtl_lock_config_regs(tp);
- /* fall through */
+ fallthrough;
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
flags = PCI_IRQ_LEGACY;
break;
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
- /* fall through */
+ fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
return error;
}
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ /* Bitbang init */
+ priv->mdiobb.ops = &bb_ops;
+
+ /* MII controller setting */
+ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+ if (!priv->mii_bus)
+ return -ENOMEM;
+
+ /* Hook up MII support for ethtool */
+ priv->mii_bus->name = "ravb_mii";
+ priv->mii_bus->parent = dev;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register MDIO bus */
+ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ if (error)
+ goto out_free_bus;
+
+ return 0;
+
+out_free_bus:
+ free_mdio_bitbang(priv->mii_bus);
+ return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+ /* Unregister mdio bus */
+ mdiobus_unregister(priv->mii_bus);
+
+ /* Free bitbang info */
+ free_mdio_bitbang(priv->mii_bus);
+
+ return 0;
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct device *dev = &pdev->dev;
int error;
+ /* MDIO bus init */
+ error = ravb_mdio_init(priv);
+ if (error) {
+ netdev_err(ndev, "failed to initialize MDIO\n");
+ return error;
+ }
+
napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]);
out_napi_off:
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
return error;
}
ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC);
+ ravb_mdio_release(priv);
+
return 0;
}
.ndo_set_features = ravb_set_features,
};
-/* MDIO bus init function */
-static int ravb_mdio_init(struct ravb_private *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- int error;
-
- /* Bitbang init */
- priv->mdiobb.ops = &bb_ops;
-
- /* MII controller setting */
- priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
- if (!priv->mii_bus)
- return -ENOMEM;
-
- /* Hook up MII support for ethtool */
- priv->mii_bus->name = "ravb_mii";
- priv->mii_bus->parent = dev;
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- pdev->name, pdev->id);
-
- /* Register MDIO bus */
- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
- if (error)
- goto out_free_bus;
-
- return 0;
-
-out_free_bus:
- free_mdio_bitbang(priv->mii_bus);
- return error;
-}
-
-/* MDIO bus release function */
-static int ravb_mdio_release(struct ravb_private *priv)
-{
- /* Unregister mdio bus */
- mdiobus_unregister(priv->mii_bus);
-
- /* Free bitbang info */
- free_mdio_bitbang(priv->mii_bus);
-
- return 0;
-}
-
static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
eth_hw_addr_random(ndev);
}
- /* MDIO bus init */
- error = ravb_mdio_init(priv);
- if (error) {
- dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
- }
-
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
- ravb_mdio_release(priv);
-out_dma_free:
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
unregister_netdev(ndev);
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
- ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
rocker_world_fib4_del(rocker, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
rule = fib_work->fr_info.rule;
if (!fib4_rule_default(rule))
fib_work->event = event;
switch (event) {
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
fib_rule_get(fib_work->fr_info.rule);
switchdev_work->event = event;
switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
case TCP_V4_FLOW:
case UDP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
/* Temporarily map new BAR. */
rc = efx_init_io(efx, bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, bar));
if (rc) {
netif_err(efx, probe, efx->net_dev,
/* Put old BAR back. */
rc = efx_init_io(efx, previous_bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, previous_bar));
if (rc) {
netif_err(efx, probe, efx->net_dev,
/* Temporarily map BAR. */
rc = efx_init_io(efx, bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, bar));
if (rc) {
netif_err(efx, probe, efx->net_dev,
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx, fcw.bar,
- DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
+ (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH),
pci_resource_len(efx->pci_dev, fcw.bar));
if (rc)
goto fail;
/* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
* and reprobe after reset to avoid removing filters twice
*/
- down_read(&efx->filter_sem);
+ down_write(&efx->filter_sem);
ef100_filter_table_down(efx);
- up_read(&efx->filter_sem);
+ up_write(&efx->filter_sem);
rc = efx_mcdi_reset(efx, reset_type);
if (rc)
return rc;
netif_device_attach(efx->net_dev);
- down_read(&efx->filter_sem);
+ down_write(&efx->filter_sem);
rc = ef100_filter_table_up(efx);
- up_read(&efx->filter_sem);
+ up_write(&efx->filter_sem);
if (rc)
return rc;
.rx_remove = efx_mcdi_rx_remove,
.rx_write = ef100_rx_write,
.rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
.fini_dmaq = efx_fini_dmaq,
.max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.filter_table_probe = ef100_filter_table_up,
.rx_remove = efx_mcdi_rx_remove,
.rx_write = ef100_rx_write,
.rx_packet = __ef100_rx_packet,
+ .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
.fini_dmaq = efx_fini_dmaq,
.max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
.filter_table_probe = ef100_filter_table_up,
#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
-static bool check_fcs(struct efx_channel *channel, u32 *prefix)
+bool ef100_rx_buf_hash_valid(const u8 *prefix)
+{
+ return PREFIX_FIELD(prefix, RSS_HASH_VALID);
+}
+
+static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
{
u16 rxclass;
u8 l2status;
if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
/* Everything is ok */
- return 0;
+ return false;
if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
channel->n_rx_eth_crc_err++;
- return 1;
+ return true;
}
void __ef100_rx_packet(struct efx_channel *channel)
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
- if (check_fcs(channel, prefix) &&
+ if (ef100_has_fcs_error(channel, prefix) &&
unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
goto out;
#include "net_driver.h"
+bool ef100_rx_buf_hash_valid(const u8 *prefix);
void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event);
void ef100_rx_write(struct efx_rx_queue *rx_queue);
void __ef100_rx_packet(struct efx_channel *channel);
__ef100_rx_packet, __efx_rx_packet,
channel);
}
+static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
+{
+ if (efx->type->rx_buf_hash_valid)
+ return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
+ ef100_rx_buf_hash_valid,
+ prefix);
+ return true;
+}
/* Maximum number of TCP segments we support for soft-TSO */
#define EFX_TSO_MAX_SEGS 100
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EF4_RX_PKT_TCP;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EF4_RX_PKT_CSUMMED;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break;
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
- /* else fall through */
+ fallthrough;
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
is_full = true;
- /* fall through */
+ fallthrough;
case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
__be32 rhost, host1, host2;
case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
is_full = true;
- /* fall through */
+ fallthrough;
case EF4_FILTER_MATCH_LOC_MAC:
spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
EF4_FARCH_FILTER_MAC_WILD);
case EF4_FARCH_FILTER_TCP_FULL:
case EF4_FARCH_FILTER_UDP_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EF4_FARCH_FILTER_TCP_WILD:
case EF4_FARCH_FILTER_UDP_WILD: {
__be32 host1, host2;
case EF4_FARCH_FILTER_MAC_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EF4_FARCH_FILTER_MAC_WILD:
gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
if (is_full)
switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EFX_RX_PKT_TCP;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EFX_RX_PKT_CSUMMED;
- /* fall through */
+ fallthrough;
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break;
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
- /* else fall through */
+ fallthrough;
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
is_full = true;
- /* fall through */
+ fallthrough;
case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
__be32 rhost, host1, host2;
case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
is_full = true;
- /* fall through */
+ fallthrough;
case EFX_FILTER_MATCH_LOC_MAC:
spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
EFX_FARCH_FILTER_MAC_WILD);
case EFX_FARCH_FILTER_TCP_FULL:
case EFX_FARCH_FILTER_UDP_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EFX_FARCH_FILTER_TCP_WILD:
case EFX_FARCH_FILTER_UDP_WILD: {
__be32 host1, host2;
case EFX_FARCH_FILTER_MAC_FULL:
is_full = true;
- /* fall through */
+ fallthrough;
case EFX_FARCH_FILTER_MAC_WILD:
gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
if (is_full)
switch (encap_type & EFX_ENCAP_TYPES_MASK) {
case EFX_ENCAP_TYPE_VXLAN:
vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
- /* fallthrough */
+ fallthrough;
case EFX_ENCAP_TYPE_GENEVE:
COPY_VALUE(ether_type, ETHER_TYPE);
outer_ip_proto = IPPROTO_UDP;
break;
default:
WARN_ON(1);
- /* Fall through */
+ fallthrough;
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irqs_hooked: Channel interrupts are hooked
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
unsigned int timer_quantum_ns;
unsigned int timer_max_ns;
bool irq_rx_adaptive;
+ bool irqs_hooked;
unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us;
u32 msg_enable;
* @rx_write: Write RX descriptors and doorbell
* @rx_defer_refill: Generate a refill reminder event
* @rx_packet: Receive the queued RX buffer on a channel
+ * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash
* @ev_probe: Allocate resources for event queue
* @ev_init: Initialise event queue on the NIC
* @ev_fini: Deinitialise event queue on the NIC
void (*rx_write)(struct efx_rx_queue *rx_queue);
void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
void (*rx_packet)(struct efx_channel *channel);
+ bool (*rx_buf_hash_valid)(const u8 *prefix);
int (*ev_probe)(struct efx_channel *channel);
int (*ev_init)(struct efx_channel *channel);
void (*ev_fini)(struct efx_channel *channel);
#endif
}
+ efx->irqs_hooked = true;
return 0;
fail2:
efx->net_dev->rx_cpu_rmap = NULL;
#endif
+ if (!efx->irqs_hooked)
+ return;
if (EFX_INT_MODE_USE_MSI(efx)) {
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
/* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
}
+ efx->irqs_hooked = false;
}
/* Register dump */
case XDP_ABORTED:
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
- /* Fall through */
+ fallthrough;
case XDP_DROP:
efx_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_drops++;
return;
}
- if (efx->net_dev->features & NETIF_F_RXHASH)
+ if (efx->net_dev->features & NETIF_F_RXHASH &&
+ efx_rx_buf_hash_valid(efx, eh))
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3);
if (csum) {
efx_for_each_channel(channel, efx) {
cancel_delayed_work_sync(&channel->filter_work);
kfree(channel->rps_flow_id);
+ channel->rps_flow_id = NULL;
}
#endif
down_write(&efx->filter_sem);
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = sis_priv->mii->phy_addr;
- /* Fall Through */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
/* Found an external PHY */
break;
}
- /* Else, fall through */
+ fallthrough;
default:
/* Internal media only */
SMC_GET_PHY_ID1(lp, 1, id1);
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
page = virt_to_head_page(xdp->data);
return ERR_PTR(err);
switch (phy_mode) {
- case PHY_INTERFACE_MODE_RGMII: /* Fall through */
- case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */:
- case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */
+ case PHY_INTERFACE_MODE_RGMII:
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
break;
ret = phy_loopback(dev->phydev, true);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case STMMAC_LOOPBACK_MAC:
ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
break;
ret = phy_loopback(dev->phydev, false);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case STMMAC_LOOPBACK_MAC:
stmmac_set_mac_loopback(priv, priv->ioaddr, false);
break;
switch (cls->command) {
case TC_CLSU32_REPLACE_KNODE:
tc_unfill_entry(priv, cls);
- /* Fall through */
+ fallthrough;
case TC_CLSU32_NEW_KNODE:
return tc_config_knode(priv, cls);
case TC_CLSU32_DELETE_KNODE:
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = cp->phy_addr;
- /* Fallthrough... */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
spin_lock_irqsave(&cp->lock, flags);
else
goto unknown_vg_1g_port;
- /* fallthru */
+ fallthrough;
case 0x22:
val = (phy_encode(PORT_TYPE_10G, 0) |
phy_encode(PORT_TYPE_10G, 1) |
else
goto unknown_vg_1g_port;
- /* fallthru */
+ fallthrough;
case 0x13:
if ((lowest_10g & 0x7) == 0)
val = (phy_encode(PORT_TYPE_10G, 0) |
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = gp->mii_phy_addr;
- /* Fallthrough... */
+ fallthrough;
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
if (phy->speed == 10 && phy_interface_is_rgmii(phy))
/* Can be used with in band mode only */
mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
if (phy->duplex)
mac_control |= CPSW_SL_CTL_FULLDUPLEX;
dev_warn(priv->dev,
"Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
phy_modes(phy_mode));
- /* fallthrough */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
dev_warn(priv->dev,
"Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
phy_modes(phy_mode));
- /* fallthrough */
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
- ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
return ret;
}
+ /* reset the return code as pm_runtime_get_sync() can return
+ * non zero values as well.
+ */
+ ret = 0;
for (i = 0; i < cpsw->data.slaves; i++) {
if (cpsw->slaves[i].ndev &&
- vid == cpsw->slaves[i].port_vlan)
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
goto err;
+ }
}
dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
- cpsw_ale_del_vlan(cpsw->ale, vid, 0);
- cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
- HOST_PORT_NUM, ALE_VLAN, vid);
- cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
- cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
+ ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
+ ret);
+ ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
+ ret);
+ cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
+ ret = 0;
err:
pm_runtime_put(cpsw->dev);
return ret;
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
- /* fall through -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
goto drop;
}
switch (cmd) {
case SIOCGMIIPHY: /* get address of MII PHY in use. */
data->phy_id = phy;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* read MII PHY register. */
case IW_AUTH_KEY_MGMT:
if (param->value & IW_AUTH_KEY_MGMT_PSK)
break;
- /* intentionally fall through */
+ fallthrough;
default:
ret = -EOPNOTSUPP;
break;
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
- /* Fall through */
+ fallthrough;
case SPIDER_NET_DESCR_RESPONSE_ERROR:
case SPIDER_NET_DESCR_PROTECTION_ERROR:
show_error = 0;
break;
- case SPIDER_NET_GDDDEN0INT: /* fallthrough */
- case SPIDER_NET_GDCDEN0INT: /* fallthrough */
- case SPIDER_NET_GDBDEN0INT: /* fallthrough */
+ case SPIDER_NET_GDDDEN0INT:
+ case SPIDER_NET_GDCDEN0INT:
+ case SPIDER_NET_GDBDEN0INT:
case SPIDER_NET_GDADEN0INT:
/* someone has set RX_DMA_EN to 0 */
show_error = 0;
* Logging is not needed. */
show_error = 0;
break;
- case SPIDER_NET_GRFDFLLINT: /* fallthrough */
- case SPIDER_NET_GRFCFLLINT: /* fallthrough */
- case SPIDER_NET_GRFBFLLINT: /* fallthrough */
- case SPIDER_NET_GRFAFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFDFLLINT:
+ case SPIDER_NET_GRFCFLLINT:
+ case SPIDER_NET_GRFBFLLINT:
+ case SPIDER_NET_GRFAFLLINT:
case SPIDER_NET_GRMFLLINT:
/* Could happen when rx chain is full */
if (card->ignore_rx_ramfull == 0) {
break;
/* chain end */
- case SPIDER_NET_GDDDCEINT: /* fallthrough */
- case SPIDER_NET_GDCDCEINT: /* fallthrough */
- case SPIDER_NET_GDBDCEINT: /* fallthrough */
+ case SPIDER_NET_GDDDCEINT:
+ case SPIDER_NET_GDCDCEINT:
+ case SPIDER_NET_GDBDCEINT:
case SPIDER_NET_GDADCEINT:
spider_net_resync_head_ptr(card);
spider_net_refill_rx_chain(card);
break;
/* invalid descriptor */
- case SPIDER_NET_GDDINVDINT: /* fallthrough */
- case SPIDER_NET_GDCINVDINT: /* fallthrough */
- case SPIDER_NET_GDBINVDINT: /* fallthrough */
+ case SPIDER_NET_GDDINVDINT:
+ case SPIDER_NET_GDCINVDINT:
+ case SPIDER_NET_GDBINVDINT:
case SPIDER_NET_GDAINVDINT:
/* Could happen when rx chain is full */
spider_net_resync_head_ptr(card);
switch(cmd) {
case SIOCGMIIPHY: /* Get the address of the PHY in use. */
data->phy_id = 0; /* we have only this address */
- /* fall through */
+ fallthrough;
case SIOCGMIIREG: /* Read the specified MII register. */
data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f,
data->reg_num & 0x1f);
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
{
int state ; /* remember last state */
int cond ;
- int oldstate ;
/* We will do the following: */
/* - compute the variable WC_Flag for every port (This is where */
/* - change the portstates */
cem_priv_state (smc, event);
- oldstate = smc->mib.fddiSMTCF_State ;
do {
DB_CFM("CFM : state %s%s event %s",
smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
if (cond != smc->mib.fddiSMTPeerWrapFlag)
smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
-#if 0
/*
- * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired
+ * Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired
* to the primary path.
*/
- /*
- * path change
- */
- if (smc->mib.fddiSMTCF_State != oldstate) {
- smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ;
- }
-#endif
+
#endif /* no SLIM_SMT */
/*
#include <linux/bitrev.h>
#include <linux/etherdevice.h>
-#ifndef lint
-static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
-#endif
-
#ifndef UNUSED
#ifdef lint
#define UNUSED(x) (x) = (x)
*
******************************************************************************/
-#ifndef lint
-static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
-#endif
-
#define HWMTM
#ifndef FDDI
case ACTIONS(PC5_SIGNAL) :
ACTIONS_DONE() ;
- /* fall through */
+ fallthrough;
case PC5_SIGNAL :
if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
break ;
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
ACTIONS_DONE() ;
cmd = 0 ;
- /* fall thru */
+ fallthrough;
case PC6_JOIN :
switch (plc->p_state) {
case PS_ACTIVE:
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
-#endif
-
/*
* FC in SMbuf
*/
return tid & 0x3fffffffL;
}
-
+#ifdef LITTLE_ENDIAN
/*
* table of parameter lengths
*/
} ;
#define N_SMT_PLEN ARRAY_SIZE(smt_pdef)
+#endif
int smt_check_para(struct s_smc *smc, struct smt_header *sm,
const u_short list[])
FJES_RX_STOP_REQ_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
- /* fall through */
+ fallthrough;
case EP_PARTNER_UNSHARE:
case EP_PARTNER_COMPLETE:
default:
goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+ nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure;
}
break;
}
- /* fall through */
+ fallthrough;
default:
if (bc->hdlctx.calibrate <= 0)
case CRC_MODE_SMACK_TEST:
ax->crcmode = CRC_MODE_FLEX_TEST;
printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name);
- // fall through
+ fallthrough;
case CRC_MODE_SMACK:
*p |= 0x80;
crc = swab16(crc16(0, p, len));
case CRC_MODE_FLEX_TEST:
ax->crcmode = CRC_MODE_NONE;
printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name);
- // fall through
+ fallthrough;
case CRC_MODE_FLEX:
*p |= 0x20;
crc = calc_crc_flex(p, len);
ax->dev->name);
break;
case 0:
- /* fall through */
default:
crc_force = 0;
printk(KERN_INFO "mkiss: %s: crc mode is auto.\n",
}
rcu_read_unlock();
- while (unlikely(txq >= ndev->real_num_tx_queues))
+ while (txq >= ndev->real_num_tx_queues)
txq -= ndev->real_num_tx_queues;
return txq;
int rc;
skb->dev = vf_netdev;
- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
+ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
rc = dev_queue_xmit(skb);
if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
kfree(port);
}
+#define IPVLAN_ALWAYS_ON_OFLOADS \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | \
+ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
+
+#define IPVLAN_ALWAYS_ON \
+ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+
#define IPVLAN_FEATURES \
- (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
+ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
+
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+ dev->features |= IPVLAN_ALWAYS_ON;
+ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
+ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
dev->gso_max_size = phy_dev->gso_max_size;
dev->gso_max_segs = phy_dev->gso_max_segs;
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+ features = netdev_increment_features(ipvlan->phy_dev->features,
+ features, features);
+ features |= IPVLAN_ALWAYS_ON;
+ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
+
+ return features;
}
static void ipvlan_change_rx_flags(struct net_device *dev, int change)
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
ipvlan->dev->gso_max_size = dev->gso_max_size;
ipvlan->dev->gso_max_segs = dev->gso_max_segs;
- netdev_features_change(ipvlan->dev);
+ netdev_update_features(ipvlan->dev);
}
break;
case SIOCSHWTSTAMP:
if (!net_eq(dev_net(dev), &init_net))
break;
- /* fall through */
+ fallthrough;
case SIOCGHWTSTAMP:
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ struct nlattr *nla, *head;
+ int rem, len;
+
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
return -EADDRNOTAVAIL;
}
+ if (data[IFLA_MACVLAN_MACADDR_DATA]) {
+ head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
+ len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
+
+ nla_for_each_attr(nla, head, len, rem) {
+ if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
+ nla_len(nla) != ETH_ALEN)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(nla_data(nla)))
+ return -EADDRNOTAVAIL;
+ }
+ }
+
if (data[IFLA_MACVLAN_MACADDR_COUNT])
return -EINVAL;
len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
nla_for_each_attr(nla, head, len, rem) {
- if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
- nla_len(nla) != ETH_ALEN)
- continue;
-
addr = nla_data(nla);
ret = macvlan_hash_add_source(vlan, addr);
if (ret)
switch(cmd) {
case SIOCGMIIPHY:
mii_data->phy_id = mii_if->phy_id;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
mii_data->val_out =
switch (err) {
case 1:
port_count = 1;
- /* fall through */
+ fallthrough;
case 2:
if (id > INT_MAX) {
pr_err("Value of \"id\" is too big.\n");
spin_lock_bh(&data->fib_lock);
switch (event) {
- case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
err = nsim_fib_rule_event(data, info,
event == FIB_EVENT_RULE_ADD);
break;
- case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
err = nsim_fib_event(data, info, event);
break;
switch (tx_interval) {
case 1000: /* 1 second */
- /* fallthrough */
+ fallthrough;
case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
val |= ADIN1300_NRG_PD_TX_EN;
- /* fallthrough */
+ fallthrough;
case ETHTOOL_PHY_EDPD_NO_TX:
break;
default:
switch (words) {
case 3:
dp83640->edata.sec_hi = phy_txts->sec_hi;
- /* fall through */
+ fallthrough;
case 2:
dp83640->edata.sec_lo = phy_txts->sec_lo;
- /* fall through */
+ fallthrough;
case 1:
dp83640->edata.ns_hi = phy_txts->ns_hi;
- /* fall through */
+ fallthrough;
case 0:
dp83640->edata.ns_lo = phy_txts->ns_lo;
}
kfree_skb(skb);
return;
}
- /* fall through */
+ fallthrough;
case HWTSTAMP_TX_ON:
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
if (wol->wolopts & WAKE_MAGICSECURE) {
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]);
val_rxcfg |= DP83867_WOL_SEC_EN;
return ret;
val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL);
- val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
- DP83869_RGMII_RX_CLK_DELAY_EN);
+ val |= (DP83869_RGMII_TX_CLK_DELAY_EN |
+ DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- val |= (DP83869_RGMII_TX_CLK_DELAY_EN |
- DP83869_RGMII_RX_CLK_DELAY_EN);
+ val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
+ DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- val |= DP83869_RGMII_TX_CLK_DELAY_EN;
+ val &= ~DP83869_RGMII_TX_CLK_DELAY_EN;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- val |= DP83869_RGMII_RX_CLK_DELAY_EN;
+ val &= ~DP83869_RGMII_RX_CLK_DELAY_EN;
ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL,
val);
phy->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
phy->supported);
- /* fall through */
+ fallthrough;
case SPEED_100:
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
phy->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
phy->supported);
- /* fall through */
+ fallthrough;
case SPEED_10:
default:
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
return 0;
}
-/* Trigger a read to the spcified MCB */
+/* Trigger a read to the specified MCB */
static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
{
return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
}
-/* Trigger a write to the spcified MCB */
+/* Trigger a write to the specified MCB */
static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
{
return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
switch (cmd) {
case SIOCGMIIPHY:
mii_data->phy_id = phydev->mdio.addr;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
if (mdio_phy_id_is_c45(mii_data->phy_id)) {
case SIOCSHWTSTAMP:
if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
- /* fall through */
+ fallthrough;
default:
return -EOPNOTSUPP;
break;
case MASTER_SLAVE_CFG_MASTER_FORCE:
ctl |= CTL1000_AS_MASTER;
- /* fallthrough */
+ fallthrough;
case MASTER_SLAVE_CFG_SLAVE_FORCE:
ctl |= CTL1000_ENABLE_MASTER;
break;
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
- /* fall through */
+ fallthrough;
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
port = PORT_TP;
break;
}
- /* fallthrough */
+ fallthrough;
case SFF8024_CONNECTOR_SG: /* guess */
case SFF8024_CONNECTOR_HSSDC_II:
case SFF8024_CONNECTOR_NOSEPARATE:
break;
case SFF8024_ECC_100GBASE_CR4:
phylink_set(modes, 100000baseCR4_Full);
- /* fallthrough */
+ fallthrough;
case SFF8024_ECC_25GBASE_CR_S:
case SFF8024_ECC_25GBASE_CR_N:
phylink_set(modes, 25000baseCR_Full);
case hwmon_temp_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_temp_input:
case hwmon_temp_label:
return 0444;
case hwmon_in_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_in_input:
case hwmon_in_label:
return 0444;
case hwmon_curr_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_curr_input:
case hwmon_curr_label:
return 0444;
case hwmon_power_crit:
if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
return 0;
- /* fall through */
+ fallthrough;
case hwmon_power_input:
case hwmon_power_label:
return 0444;
dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
- /* fall through */
+ fallthrough;
case SFP_MOD_WAITDEV:
/* Ensure that the device is attached before proceeding */
if (sfp->sm_dev_state < SFP_DEV_DOWN)
goto insert;
sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0);
- /* fall through */
+ fallthrough;
case SFP_MOD_HPOWER:
/* Enable high power mode */
err = sfp_sm_mod_hpower(sfp, true);
*data_p = (c0 >> 3) & 0x0f;
write_data (dev, 0x10); /* send ACK */
*ns_p = PLIP_NB_1;
- /* fall through */
+ fallthrough;
case PLIP_NB_1:
cx = nibble_timeout;
printk(KERN_DEBUG "%s: receive start\n", dev->name);
rcv->state = PLIP_PK_LENGTH_LSB;
rcv->nibble = PLIP_NB_BEGIN;
- /* fall through */
+ fallthrough;
case PLIP_PK_LENGTH_LSB:
if (snd->state != PLIP_PK_DONE) {
return TIMEOUT;
}
rcv->state = PLIP_PK_LENGTH_MSB;
- /* fall through */
+ fallthrough;
case PLIP_PK_LENGTH_MSB:
if (plip_receive(nibble_timeout, dev,
rcv->state = PLIP_PK_DATA;
rcv->byte = 0;
rcv->checksum = 0;
- /* fall through */
+ fallthrough;
case PLIP_PK_DATA:
lbuf = rcv->skb->data;
rcv->checksum += lbuf[--rcv->byte];
} while (rcv->byte);
rcv->state = PLIP_PK_CHECKSUM;
- /* fall through */
+ fallthrough;
case PLIP_PK_CHECKSUM:
if (plip_receive(nibble_timeout, dev,
return ERROR;
}
rcv->state = PLIP_PK_DONE;
- /* fall through */
+ fallthrough;
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
case PLIP_NB_BEGIN:
write_data (dev, data & 0x0f);
*ns_p = PLIP_NB_1;
- /* fall through */
+ fallthrough;
case PLIP_NB_1:
write_data (dev, 0x10 | (data & 0x0f));
}
write_data (dev, 0x10 | (data >> 4));
*ns_p = PLIP_NB_2;
- /* fall through */
+ fallthrough;
case PLIP_NB_2:
write_data (dev, (data >> 4));
&snd->nibble, snd->length.b.lsb))
return TIMEOUT;
snd->state = PLIP_PK_LENGTH_MSB;
- /* fall through */
+ fallthrough;
case PLIP_PK_LENGTH_MSB:
if (plip_send(nibble_timeout, dev,
snd->state = PLIP_PK_DATA;
snd->byte = 0;
snd->checksum = 0;
- /* fall through */
+ fallthrough;
case PLIP_PK_DATA:
do {
snd->checksum += lbuf[--snd->byte];
} while (snd->byte);
snd->state = PLIP_PK_CHECKSUM;
- /* fall through */
+ fallthrough;
case PLIP_PK_CHECKSUM:
if (plip_send(nibble_timeout, dev,
dev_kfree_skb(snd->skb);
dev->stats.tx_packets++;
snd->state = PLIP_PK_DONE;
- /* fall through */
+ fallthrough;
case PLIP_PK_DONE:
/* Close the connection */
switch (nl->connection) {
case PLIP_CN_CLOSING:
netif_wake_queue (dev);
- /* fall through */
+ fallthrough;
case PLIP_CN_NONE:
case PLIP_CN_SEND:
rcv->state = PLIP_PK_TRIGGER;
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(tun->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
break;
switch (err) {
case XDP_REDIRECT:
*flush = true;
- /* fall through */
+ fallthrough;
case XDP_TX:
return 0;
case XDP_PASS:
config USB_NET_CDC_NCM
tristate "CDC NCM support"
depends on USB_USBNET
+ select USB_NET_CDCETHER
default y
help
This driver provides support for CDC NCM (Network Control Model
switch (speed) {
case SPEED_5000:
aqc111_data->phy_cfg |= AQ_ADV_5G;
- /* fall-through */
+ fallthrough;
case SPEED_2500:
aqc111_data->phy_cfg |= AQ_ADV_2G5;
- /* fall-through */
+ fallthrough;
case SPEED_1000:
aqc111_data->phy_cfg |= AQ_ADV_1G;
- /* fall-through */
+ fallthrough;
case SPEED_100:
aqc111_data->phy_cfg |= AQ_ADV_100M;
/* fall-through */
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
- if (ret < 0) {
+ if (ret < 2) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
default:
dev_warn(&intf->dev,
"Couldn't detect memory size, assuming 32k\n");
- /* fall through */
+ fallthrough;
case 0x87654321:
catc_set_reg(catc, TxBufCount, 4);
catc_set_reg(catc, RxBufCount, 16);
case -ECONNRESET:
case -ESHUTDOWN:
dev->stats.tx_aborted_errors++;
- /* fall through */
+ fallthrough;
default:
dev->stats.tx_errors++;
dev_dbg(&dev->dev, "TX error (%d)\n", status);
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
case -EPIPE:
dev->net->stats.rx_errors++;
lan78xx_defer_kevent(dev, EVENT_RX_HALT);
- /* FALLTHROUGH */
+ fallthrough;
case -ECONNRESET: /* async unlink */
case -ESHUTDOWN: /* hardware gone */
netif_dbg(dev, ifdown, dev->net,
/* data overrun ... flush fifo? */
case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
- /* FALLTHROUGH */
+ fallthrough;
default:
state = rx_cleanup;
return;
default:
netif_info(pegasus, tx_err, net, "TX status %d\n", status);
- /* FALL THROUGH */
+ fallthrough;
case 0:
break;
}
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = pegasus->phy;
- /* fall through */
+ fallthrough;
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
case -ECONNRESET: /* unlink */
case -ESHUTDOWN:
netif_device_detach(tp->netdev);
- /* fall through */
+ fallthrough;
case -ENOENT:
case -EPROTO:
netif_info(tp, intr, tp->netdev,
r8152_mdio_write(tp, MII_BMCR, data);
data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
- /* fall through */
+ fallthrough;
default:
if (data != PHY_STAT_LAN_ON)
tp->ups_info.speed_duplex = NWAY_1000M_FULL;
break;
}
- /* fall through */
+ fallthrough;
default:
ret = -EINVAL;
goto out;
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = dev->phy;
- /* fall through */
+ fallthrough;
case SIOCDEVPRIVATE + 1:
read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]);
break;
if (!usb_endpoint_dir_in(&e->desc))
continue;
intr = 1;
- /* FALLTHROUGH */
+ fallthrough;
case USB_ENDPOINT_XFER_BULK:
break;
default:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
dev->net->stats.rx_over_errors++;
- // FALLTHROUGH
+ fallthrough;
default:
state = rx_cleanup;
continue;
case tx_done:
kfree(entry->urb->sg);
- /* fall through */
+ fallthrough;
case rx_cleanup:
usb_free_urb (entry->urb);
dev_kfree_skb (skb);
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
stats->xdp_drops++;
goto err_xdp;
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
stats->xdp_drops++;
goto xdp_drop;
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
case ESP_V4_FLOW:
if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fallthrough */
+ fallthrough;
case SCTP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
{
struct dlci_local *dlp;
struct frad_local *flp;
- int err;
netif_stop_queue(dev);
dlp = netdev_priv(dev);
flp = netdev_priv(dlp->slave);
- err = (*flp->deactivate)(dlp->slave, dev);
+ (*flp->deactivate)(dlp->slave, dev);
return 0;
}
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
- dev->hard_header_len = 16;
+ dev->hard_header_len = 0;
+ dev->needed_headroom = 0;
dev->addr_len = 0;
dev->header_ops = &hdlc_null_ops;
}
memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
dev->header_ops = &cisco_header_ops;
+ dev->hard_header_len = sizeof(struct hdlc_header);
dev->type = ARPHRD_CISCO;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_on(dev);
{
int result;
+ /* There should be a pseudo header of 1 byte added by upper layers.
+ * Check to make sure it is there before reading it.
+ */
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
- /* X.25 to LAPB */
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
return result;
memcpy(&state(hdlc)->settings, &new_settings, size);
+
+ /* There's no header_ops so hard_header_len should be 0. */
+ dev->hard_header_len = 0;
+ /* When transmitting data:
+ * first we'll remove a pseudo header of 1 byte,
+ * then we'll prepend an LAPB header of at most 3 bytes.
+ */
+ dev->needed_headroom = 3 - 1;
+
dev->type = ARPHRD_X25;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
pr_err("lapb_disconnect_request err: %d\n", err);
- /* Fall thru */
+ fallthrough;
default:
goto drop;
}
skb->dev = dev = lapbeth->ethdev;
+ skb_reset_network_header(skb);
+
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
dev_queue_xmit(skb);
*/
ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+ dev->needed_headroom;
+ ndev->needed_tailroom = dev->needed_tailroom;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
case SDLA_RET_NO_BUFS:
if (cmd == SDLA_INFORMATION_WRITE)
break;
- /* Else, fall through */
+ fallthrough;
default:
netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
if (err != LAPB_OK)
netdev_err(dev, "lapb_disconnect_request error: %d\n",
err);
- /* fall through */
+ fallthrough;
default:
kfree_skb(skb);
return NETDEV_TX_OK;
case I2400M_SS_IDLE:
d_printf(1, dev, "entering BS-negotiated idle mode\n");
- /* Fall through */
+ fallthrough;
case I2400M_SS_DISCONNECTING:
case I2400M_SS_DATA_PATH_CONNECTED:
wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
msleep(10); /* give the device some time */
goto retry;
}
- /* fall through */
+ fallthrough;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
msleep(10); /* give the device some time */
goto retry;
}
- /* fall through */
+ fallthrough;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
msleep(10); /* give the device some time */
goto retry;
}
- /* fall through */
+ fallthrough;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
seq_puts(m, "Hash Algorithm: NONE\n");
- /* FALLTHRU */
+ fallthrough;
default:
return;
}
set_backend_state(be, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
set_backend_state(be, XenbusStateClosed);
device_unregister(&dev->dev);
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
break;
}
- /* fall through */
+ fallthrough;
default:
/* jumbo frame ? */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
break;
}
- /* fall through */
+ fallthrough;
default:
skb_put_u8(skb, 1); /*TG*/
switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) {
case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU:
pr_err("Received a ACK/NACK PDU\n");
- /* fall through */
+ fallthrough;
case ST21NFCA_NFC_DEP_PFB_I_PDU:
info->dep_info.curr_nfc_dep_pni =
ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1);
rc = down_killable(&stcontext->exchange_lock);
if (rc) {
WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n");
- return rc;
+ goto free_skb_resp;
}
rc = st95hf_spi_send(&stcontext->spicontext, skb->data,
dev_err(trf->dev, "%s - Invalid request: %d %d\n",
__func__, trf->state, on);
ret = -EINVAL;
- /* FALLTHROUGH */
+ fallthrough;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
case TRF7970A_ST_WAIT_FOR_RX_DATA:
case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
case TRF7970A_ST_LISTENING:
trf7970a_send_err_upstream(trf, -ECANCELED);
- /* FALLTHROUGH */
+ fallthrough;
case TRF7970A_ST_IDLE:
case TRF7970A_ST_IDLE_RX_BLOCKED:
trf7970a_switch_rf_off(trf);
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
- /* fall through */
+ fallthrough;
case DMA_TRANS_ABORTED:
{
struct ntb_transport_qp *qp = entry->qp;
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
- /* fall through */
+ fallthrough;
case DMA_TRANS_ABORTED:
{
void __iomem *offset =
static struct attribute *nvdimm_firmware_attributes[] = {
&dev_attr_activate.attr,
&dev_attr_result.attr,
+ NULL,
};
static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
}
}
-static inline bool nvme_req_needs_retry(struct request *req)
-{
- if (blk_noretry_request(req))
- return false;
- if (nvme_req(req)->status & NVME_SC_DNR)
- return false;
- if (nvme_req(req)->retries >= nvme_max_retries)
- return false;
- return true;
-}
-
static void nvme_retry_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
blk_mq_delay_kick_requeue_list(req->q, delay);
}
-void nvme_complete_rq(struct request *req)
+enum nvme_disposition {
+ COMPLETE,
+ RETRY,
+ FAILOVER,
+};
+
+static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ if (likely(nvme_req(req)->status == 0))
+ return COMPLETE;
- trace_nvme_complete_rq(req);
+ if (blk_noretry_request(req) ||
+ (nvme_req(req)->status & NVME_SC_DNR) ||
+ nvme_req(req)->retries >= nvme_max_retries)
+ return COMPLETE;
- nvme_cleanup_cmd(req);
+ if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (nvme_is_path_error(nvme_req(req)->status) ||
+ blk_queue_dying(req->q))
+ return FAILOVER;
+ } else {
+ if (blk_queue_dying(req->q))
+ return COMPLETE;
+ }
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ return RETRY;
+}
- if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
- return;
+static inline void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (!blk_queue_dying(req->q)) {
- nvme_retry_req(req);
- return;
- }
- } else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- req_op(req) == REQ_OP_ZONE_APPEND) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- }
nvme_trace_bio_complete(req, status);
blk_mq_end_request(req, status);
}
+
+void nvme_complete_rq(struct request *req)
+{
+ trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
+ if (nvme_req(req)->ctrl->kas)
+ nvme_req(req)->ctrl->comp_seen = true;
+
+ switch (nvme_decide_disposition(req)) {
+ case COMPLETE:
+ nvme_end_req(req);
+ return;
+ case RETRY:
+ nvme_retry_req(req);
+ return;
+ case FAILOVER:
+ nvme_failover_req(req);
+ return;
+ }
+}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
case NVME_CTRL_DELETING:
case NVME_CTRL_DEAD:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
switch (old_state) {
case NVME_CTRL_DELETING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
blk_mq_unfreeze_queue(disk->queue);
}
+static inline bool nvme_first_scan(struct gendisk *disk)
+{
+ /* nvme_alloc_ns() scans the disk prior to adding it */
+ return !(disk->flags & GENHD_FL_UP);
+}
+
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ u32 iob;
+
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
+ iob = ctrl->max_hw_sectors;
+ else
+ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
+
+ if (!iob)
+ return;
+
+ if (!is_power_of_2(iob)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring unaligned IO boundary:%u\n",
+ ns->disk->disk_name, iob);
+ return;
+ }
+
+ if (blk_queue_is_zoned(ns->disk->queue)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring zoned namespace IO boundary\n",
+ ns->disk->disk_name);
+ return;
+ }
+
+ blk_queue_chunk_sectors(ns->queue, iob);
+}
+
static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
struct nvme_ns *ns = disk->private_data;
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
- u32 iob;
/*
* If identify namespace failed, use default 512 byte block size so
return -ENODEV;
}
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
- is_power_of_2(ctrl->max_hw_sectors))
- iob = ctrl->max_hw_sectors;
- else
- iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
-
ns->features = 0;
ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
/* the PI implementation requires metadata equal t10 pi tuple size */
}
}
- if (iob)
- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
+ nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
{
struct nvme_cel *cel, *ret = NULL;
- spin_lock(&ctrl->lock);
+ spin_lock_irq(&ctrl->lock);
list_for_each_entry(cel, &ctrl->cels, entry) {
if (cel->csi == csi) {
ret = cel;
break;
}
}
- spin_unlock(&ctrl->lock);
+ spin_unlock_irq(&ctrl->lock);
return ret;
}
cel->csi = csi;
- spin_lock(&ctrl->lock);
+ spin_lock_irq(&ctrl->lock);
list_add_tail(&cel->entry, &ctrl->cels);
- spin_unlock(&ctrl->lock);
+ spin_unlock_irq(&ctrl->lock);
out:
*log = &cel->log;
return 0;
return 0;
if (a == &dev_attr_hostid.attr && !ctrl->opts)
return 0;
+ if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
+ return 0;
return a->mode;
}
struct nvme_subsystem *subsys = ctrl->subsys;
struct nvme_cel *cel, *next;
- if (subsys && ctrl->instance != subsys->instance)
+ if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
list_for_each_entry_safe(cel, next, &ctrl->cels, entry) {
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
break;
}
up_read(&ctrl->namespaces_rwsem);
+ return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
* which is require to set the queue live in the appropinquate states.
*/
switch (ctrl->state) {
- case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
if (nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
}
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- if (!nvme_end_request(rq, status, result))
+ if (!nvme_try_complete_req(rq, status, result))
nvme_fc_complete_rq(rq);
check_error:
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
dev_err(ctrl->dev,
"FCP Op failed - cmdiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
goto out_on_error;
}
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
dev_err(ctrl->dev,
"FCP Op failed - rspiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
}
atomic_set(&op->state, FCPOP_STATE_IDLE);
}
}
-bool nvme_failover_req(struct request *req)
+void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- u16 status = nvme_req(req)->status;
+ u16 status = nvme_req(req)->status & 0x7ff;
unsigned long flags;
- switch (status & 0x7ff) {
- case NVME_SC_ANA_TRANSITION:
- case NVME_SC_ANA_INACCESSIBLE:
- case NVME_SC_ANA_PERSISTENT_LOSS:
- /*
- * If we got back an ANA error we know the controller is alive,
- * but not ready to serve this namespaces. The spec suggests
- * we should update our general state here, but due to the fact
- * that the admin and I/O queues are not serialized that is
- * fundamentally racy. So instead just clear the current path,
- * mark the the path as pending and kick of a re-read of the ANA
- * log page ASAP.
- */
- nvme_mpath_clear_current_path(ns);
- if (ns->ctrl->ana_log_buf) {
- set_bit(NVME_NS_ANA_PENDING, &ns->flags);
- queue_work(nvme_wq, &ns->ctrl->ana_work);
- }
- break;
- case NVME_SC_HOST_PATH_ERROR:
- case NVME_SC_HOST_ABORTED_CMD:
- /*
- * Temporary transport disruption in talking to the controller.
- * Try to send on a new path.
- */
- nvme_mpath_clear_current_path(ns);
- break;
- default:
- /* This was a non-ANA error so follow the normal error path. */
- return false;
+ nvme_mpath_clear_current_path(ns);
+
+ /*
+ * If we got back an ANA error, we know the controller is alive but not
+ * ready to serve this namespace. Kick of a re-read of the ANA
+ * information page, and just try any other available path for now.
+ */
+ if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ blk_mq_end_request(req, 0);
kblockd_schedule_work(&ns->head->requeue_work);
- return true;
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
int node, struct nvme_ns *old)
{
- struct nvme_ns *ns, *found, *fallback = NULL;
+ struct nvme_ns *ns, *found = NULL;
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
goto out;
}
if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
- fallback = ns;
+ found = ns;
}
- /* No optimized path found, re-check the current path */
+ /*
+ * The loop above skips the current path for round-robin semantics.
+ * Fall back to the current path if either:
+ * - no other optimized path found and current is optimized,
+ * - no other usable path found and current is usable.
+ */
if (!nvme_path_is_disabled(old) &&
- old->ana_state == NVME_ANA_OPTIMIZED) {
- found = old;
- goto out;
- }
- if (!fallback)
+ (old->ana_state == NVME_ANA_OPTIMIZED ||
+ (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
+ return old;
+
+ if (!found)
return NULL;
- found = fallback;
out:
rcu_assign_pointer(head->current_path[node], found);
return found;
return (len >> 2) - 1;
}
-static inline bool nvme_end_request(struct request *req, __le16 status,
+static inline bool nvme_is_ana_error(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool nvme_is_path_error(u16 status)
+{
+ /* check for a status code type of 'path related status' */
+ return (status & 0x700) == 0x300;
+}
+
+/*
+ * Fill in the status and result information from the CQE, and then figure out
+ * if blk-mq will need to use IPI magic to complete the request, and if yes do
+ * so. If not let the caller complete the request without an indirect function
+ * call.
+ */
+static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
-bool nvme_failover_req(struct request *req);
+void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
}
-static inline bool nvme_failover_req(struct request *req)
+static inline void nvme_failover_req(struct request *req)
{
- return false;
}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
- u16 q_depth;
+ u32 q_depth;
int io_sqes;
u32 db_stride;
void __iomem *bar;
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
int ret;
- u16 n;
+ u32 n;
- ret = kstrtou16(val, 10, &n);
+ ret = kstrtou32(val, 10, &n);
if (ret != 0 || n < 2)
return -EINVAL;
- return param_set_ushort(val, kp);
+ return param_set_uint(val, kp);
}
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
- u16 q_depth;
+ u32 q_depth;
u16 cq_vector;
u16 sq_tail;
u16 cq_head;
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- if (!nvme_end_request(req, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result))
nvme_pci_complete_rq(req);
}
switch (dev->ctrl.state) {
case NVME_CTRL_CONNECTING:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- /* fall through */
+ fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
- nvme_dev_disable(dev, true);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ nvme_dev_disable(dev, true);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl);
- nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_DONE;
}
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
- dev->q_depth = min_t(u16, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
- PAGE_SIZE, PAGE_SIZE, 0);
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ struct mutex teardown_lock;
bool use_inline_data;
u32 io_queues[HCTX_MAX_TYPES];
};
if (!new) {
nvme_start_queues(&ctrl->ctrl);
- nvme_wait_freeze(&ctrl->ctrl);
+ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
+ }
blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
ctrl->ctrl.queue_count - 1);
nvme_unfreeze(&ctrl->ctrl);
return 0;
+out_wait_freeze_timed_out:
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q:
if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q);
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset) {
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
if (ctrl->ctrl.queue_count > 1) {
nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
}
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
queue_work(nvme_reset_wq, &ctrl->err_work);
}
if (!refcount_dec_and_test(&req->ref))
return;
- if (!nvme_end_request(rq, req->status, req->result))
+ if (!nvme_try_complete_req(rq, req->status, req->result))
nvme_rdma_complete_rq(rq);
}
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
return 0;
}
+static void nvme_rdma_complete_timed_out(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ /* fence other contexts that may complete the command */
+ mutex_lock(&ctrl->teardown_lock);
+ nvme_rdma_stop_queue(queue);
+ if (!blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+ mutex_unlock(&ctrl->teardown_lock);
+}
+
static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
-
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_rdma_teardown_io_queues(ctrl, false);
- nvme_rdma_teardown_admin_queue(ctrl, false);
+ nvme_rdma_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
nvme_rdma_error_recovery(ctrl);
-
return BLK_EH_RESET_TIMER;
}
return ERR_PTR(-ENOMEM);
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
+ mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ struct mutex teardown_lock;
struct work_struct err_work;
struct delayed_work connect_work;
struct nvme_tcp_request async_req;
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->device, "starting error recovery\n");
queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
}
return -EINVAL;
}
- if (!nvme_end_request(rq, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_complete_rq(rq);
queue->nr_cqe++;
{
union nvme_result res = {};
- if (!nvme_end_request(rq, cpu_to_le16(status << 1), res))
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
nvme_complete_rq(rq);
}
case TCP_LAST_ACK:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
- /* fallthrough */
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
break;
default:
if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
return;
-
__nvme_tcp_stop_queue(queue);
}
if (!new) {
nvme_start_queues(ctrl);
- nvme_wait_freeze(ctrl);
+ if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
+ }
blk_mq_update_nr_hw_queues(ctrl->tagset,
ctrl->queue_count - 1);
nvme_unfreeze(ctrl);
return 0;
+out_wait_freeze_timed_out:
+ nvme_stop_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q:
if (new)
blk_cleanup_queue(ctrl->connect_q);
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
blk_mq_quiesce_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
if (ctrl->admin_tagset) {
if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
+ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
if (ctrl->queue_count <= 1)
- return;
+ goto out;
+ blk_mq_quiesce_queue(ctrl->admin_q);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
+out:
+ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
nvme_tcp_queue_request(&ctrl->async_req, true, true);
}
+static void nvme_tcp_complete_timed_out(struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
+
+ /* fence other contexts that may complete the command */
+ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
+ if (!blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
+}
+
static enum blk_eh_timer_return
nvme_tcp_timeout(struct request *rq, bool reserved)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
-
- dev_warn(ctrl->ctrl.device,
+ dev_warn(ctrl->device,
"queue %d: timeout request %#x type %d\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
- if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ if (ctrl->state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
- nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
+ nvme_tcp_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
- nvme_tcp_error_recovery(&ctrl->ctrl);
-
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
+ nvme_tcp_error_recovery(ctrl);
return BLK_EH_RESET_TIMER;
}
nvme_tcp_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
+ mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
up_write(&nvmet_config_sem);
kfree_rcu(new_model, rcuhead);
+ kfree(new_model_number);
return count;
}
status = NVME_SC_ACCESS_DENIED;
break;
case -EIO:
- /* FALLTHRU */
+ fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
status = NVME_SC_INTERNAL | NVME_SC_DNR;
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
cancel_delayed_work_sync(&ctrl->ka_work);
return;
if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
- spin_lock(&fod->flock);
+ spin_lock_irqsave(&fod->flock, flags);
fod->abort = true;
- spin_unlock(&fod->flock);
+ spin_unlock_irqrestore(&fod->flock, flags);
nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return;
break;
/* Fall-Thru to RSP handling */
- /* FALLTHRU */
+ fallthrough;
case NVMET_FCOP_RSP:
if (fcpreq) {
req->error_loc = offsetof(struct nvme_rw_command, nsid);
break;
case BLK_STS_IOERR:
- /* fallthru */
default:
status = NVME_SC_INTERNAL | NVME_SC_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
return;
}
- if (!nvme_end_request(rq, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_loop_complete_rq(rq);
}
}
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
}
static void nvmet_passthru_req_done(struct request *rq,
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (unlikely(!ns)) {
pr_err("failed to get passthru ns nsid:%u\n", nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
- goto fail_out;
+ goto out;
}
q = ns->queue;
rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(rq)) {
- rq = NULL;
status = NVME_SC_INTERNAL;
- goto fail_out;
+ goto out_put_ns;
}
if (req->sg_cnt) {
ret = nvmet_passthru_map_sg(req, rq);
if (unlikely(ret)) {
status = NVME_SC_INTERNAL;
- goto fail_out;
+ goto out_put_req;
}
}
return;
-fail_out:
+out_put_req:
+ blk_mq_free_request(rq);
+out_put_ns:
if (ns)
nvme_put_ns(ns);
+out:
nvmet_req_complete(req, status);
- blk_put_request(rq);
}
/*
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
switch (req->cmd->common.opcode) {
case nvme_cmd_resv_register:
case nvme_cmd_resv_report:
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
/*
* Passthru all vendor specific commands
*/
schedule_delayed_work(&port->repair_work, 0);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
case RDMA_CM_EVENT_REJECTED:
pr_debug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
nvmet_rdma_queue_connect_fail(cm_id, queue);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
{
+ if (unlikely(!queue->nr_cmds)) {
+ /* We didn't allocate cmds yet, send 0xffff */
+ return USHRT_MAX;
+ }
+
return cmd - queue->cmds;
}
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- cmd = &queue->cmds[data->ttag];
+ if (likely(queue->nr_cmds))
+ cmd = &queue->cmds[data->ttag];
+ else
+ cmd = &queue->connect;
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
* PCI bus specific translator
*/
+static bool of_node_is_pcie(struct device_node *np)
+{
+ bool is_pcie = of_node_name_eq(np, "pcie");
+
+ if (is_pcie)
+ pr_warn_once("%pOF: Missing device_type\n", np);
+
+ return is_pcie;
+}
+
static int of_bus_pci_match(struct device_node *np)
{
/*
* "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
+ *
+ * If none of the device_type match, and that the node name is
+ * "pcie", accept the device as PCI (with a warning).
*/
return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
- of_node_is_type(np, "vci") || of_node_is_type(np, "ht");
+ of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
+ of_node_is_pcie(np);
}
static void of_bus_pci_count_cells(struct device_node *np,
/* Don't error out as we'd break some existing DTs */
continue;
}
+ if (range.cpu_addr == OF_BAD_ADDR) {
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ range.bus_addr, node);
+ continue;
+ }
dma_offset = range.cpu_addr - range.bus_addr;
/* Take lower and upper limits */
* have OPP table for the device, while others don't and
* opp_set_rate() just needs to behave like clk_set_rate().
*/
- if (!_get_opp_count(opp_table))
- return 0;
+ if (!_get_opp_count(opp_table)) {
+ ret = 0;
+ goto put_opp_table;
+ }
if (!opp_table->required_opp_tables && !opp_table->regulators &&
!opp_table->paths) {
ret = _set_opp_bw(opp_table, NULL, dev, true);
if (ret)
- return ret;
+ goto put_opp_table;
if (opp_table->regulator_enabled) {
regulator_disable(opp_table->regulators[0]);
/* Return early if nothing to do */
if (old_freq == freq) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
+ if (!opp_table->required_opp_tables && !opp_table->regulators &&
+ !opp_table->paths) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ ret = 0;
+ goto put_opp_table;
+ }
}
/*
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
-void _opp_remove_all_static(struct opp_table *opp_table)
+bool _opp_remove_all_static(struct opp_table *opp_table)
{
struct dev_pm_opp *opp, *tmp;
+ bool ret = true;
mutex_lock(&opp_table->lock);
- if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+ if (!opp_table->parsed_static_opps) {
+ ret = false;
+ goto unlock;
+ }
+
+ if (--opp_table->parsed_static_opps)
goto unlock;
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
unlock:
mutex_unlock(&opp_table->lock);
+
+ return ret;
}
/**
return;
}
- _opp_remove_all_static(opp_table);
+ /*
+ * Drop the extra reference only if the OPP table was successfully added
+ * with dev_pm_opp_of_add_table() earlier.
+ **/
+ if (_opp_remove_all_static(opp_table))
+ dev_pm_opp_put_opp_table(opp_table);
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
-
- /* Drop reference taken while the OPP table was added */
- dev_pm_opp_put_opp_table(opp_table);
}
/**
/* Routines internal to opp core */
void dev_pm_opp_get(struct dev_pm_opp *opp);
-void _opp_remove_all_static(struct opp_table *opp_table);
+bool _opp_remove_all_static(struct opp_table *opp_table);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
- /* fall through */
+ fallthrough;
default:
/* Terminate from all other modes. */
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
- /* fall through */
+ fallthrough;
case IEEE1284_MODE_COMPAT:
pr_debug("%s: Using compatibility mode\n", port->name);
fn = port->ops->compat_write_data;
if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
return -EIO;
}
- /* fall through - to NIBBLE */
+ fallthrough; /* to NIBBLE */
case IEEE1284_MODE_NIBBLE:
pr_debug("%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
default:
pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
- /* Fall through - Assume 1 */
+ fallthrough; /* Assume 1 */
case 1:
pword = 1;
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
- case IMX6QP: /* FALLTHROUGH */
+ case IMX6QP:
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
- /* FALLTHROUGH */
+ fallthrough;
default:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
dev_err(dev, "pcie_aux clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_aux);
}
- /* fall through */
+ fallthrough;
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx6_pcie->controller_id = 1;
pr_warn("unknown window size %ld - defaulting to 256M\n",
window_size);
window_size = SZ_256M;
- /* fall-through */
+ fallthrough;
case SZ_256M:
val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
break;
break;
case PCI_HEADER_TYPE_BRIDGE:
function = 0x8;
- /* fall through */
+ fallthrough;
case PCI_HEADER_TYPE_MULTIBRIDGE:
/* We assume here that only 1 bus behind the bridge
TO DO: add functionality for several:
switch (ctrl->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&ctrl->button_work);
- /* fall through */
+ fallthrough;
case ON_STATE:
ctrl->state = POWEROFF_STATE;
mutex_unlock(&ctrl->state_lock);
switch (ctrl->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&ctrl->button_work);
- /* fall through */
+ fallthrough;
case OFF_STATE:
ctrl->state = POWERON_STATE;
mutex_unlock(&ctrl->state_lock);
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
struct pci_dev *pdev;
- struct zpci_bus *zbus = zdev->zbus;
int rc;
if (!zpci_fn_configured(zdev->state))
return -EIO;
- pdev = pci_get_slot(zbus->bus, zdev->devfn);
- if (pdev) {
- if (pci_num_vf(pdev))
- return -EBUSY;
-
- pci_stop_and_remove_bus_device_locked(pdev);
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ if (pdev && pci_num_vf(pdev)) {
pci_dev_put(pdev);
+ return -EBUSY;
}
+ zpci_remove_device(zdev);
+
rc = zpci_disable_device(zdev);
if (rc)
return rc;
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
- /* fall through */
+ fallthrough;
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
- /* fall through */
+ fallthrough;
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
mutex_unlock(&p_slot->lock);
return -1;
for (i = 0; i < num_clients; i++) {
- if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
- clients[i]->dma_ops == &dma_virt_ops) {
+#ifdef CONFIG_DMA_VIRT_OPS
+ if (clients[i]->dma_ops == &dma_virt_ops) {
if (verbose)
dev_warn(clients[i],
"cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
return -1;
}
+#endif
pci_client = find_parent_pci_dev(clients[i]);
if (!pci_client) {
* this should never happen because it will be prevented
* by the check in pci_p2pdma_distance_many()
*/
- if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
- dev->dma_ops == &dma_virt_ops))
+#ifdef CONFIG_DMA_VIRT_OPS
+ if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops))
return 0;
+#endif
for_each_sg(sg, s, nents, i) {
paddr = sg_phys(s);
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = true;
- /* Fall-through - force to D0 */
+ fallthrough; /* force to D0 */
default:
pmcsr = 0;
break;
case PCI_D2:
if (pci_no_d1d2(dev))
break;
- /* else, fall through */
+ fallthrough;
default:
target_state = state;
}
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
- /* fall through */
+ fallthrough;
default:
ret = -EINVAL;
break;
case PCI_DEVICE_ID_JMICRON_JMB366:
/* Redirect IDE second PATA port to the right spot */
conf5 |= (1 << 24);
- /* Fall through */
+ fallthrough;
case PCI_DEVICE_ID_JMICRON_JMB361:
case PCI_DEVICE_ID_JMICRON_JMB363:
case PCI_DEVICE_ID_JMICRON_JMB369:
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
- /* else, fall through */
+ fallthrough;
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
additional_mmio_size = pci_hotplug_mmio_size;
additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
}
- /* Fall through */
+ fallthrough;
default:
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
case XenbusStateClosed:
if (xdev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
switch (state->Vcc) {
case 50:
++v;
- /* fall through */
+ fallthrough;
case 33:
++v;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
switch (state->Vpp) {
case 12:
++p;
- /* fall through */
+ fallthrough;
case 33:
case 50:
++p;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
break;
case CCN_TYPE_SBAS:
ccn->sbas_present = 1;
- /* Fall-through */
+ fallthrough;
default:
component = &ccn->node[id];
break;
default:
dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
fld);
- /* Fallthrough */
+ fallthrough;
case 8:
spe_pmu->min_period = 4096;
}
default:
dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
fld);
- /* Fallthrough */
+ fallthrough;
case 2:
spe_pmu->counter_sz = 12;
}
case PHY_MODE_USB_OTG:
case PHY_MODE_USB_HOST:
val |= ULPI_INT_IDGRD;
- /* fall through */
+ fallthrough;
case PHY_MODE_USB_DEVICE:
val |= ULPI_INT_SESS_VALID;
default:
rport->state = OTG_STATE_B_IDLE;
if (!vbus_attach)
rockchip_usb2phy_power_off(rport->phy);
- /* fall through */
+ fallthrough;
case OTG_STATE_B_IDLE:
if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0) {
dev_dbg(&rport->phy->dev, "usb otg host connect\n");
rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
else
rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP;
- /* fall through */
+ fallthrough;
case USB_CHG_STATE_SECONDARY_DONE:
rphy->chg_state = USB_CHG_STATE_DETECTED;
delay = 0;
- /* fall through */
+ fallthrough;
case USB_CHG_STATE_DETECTED:
/* put the controller in normal mode */
property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
dev_dbg(&rport->phy->dev, "FS/LS online\n");
break;
}
- /* fall through */
+ fallthrough;
case PHY_STATE_CONNECT:
if (rport->suspended) {
dev_dbg(&rport->phy->dev, "Connected\n");
input_sync(priv->pwrbtn);
input_report_key(priv->pwrbtn, KEY_POWER, 0);
input_sync(priv->pwrbtn);
- /* fall through */
+ fallthrough;
case EVENT_POWER_PRESS_WAKE:
case EVENT_TIMED_HOST_WAKE:
pm_wakeup_event(priv->pwrbtn->dev.parent,
*value = tmp & 0x1;
return 0;
}
- /* fall through */
+ fallthrough;
default:
return AE_ERROR;
}
status = AMW0_get_u32(value, cap);
break;
}
- /* fall through */
+ fallthrough;
case ACER_WMID:
status = WMID_get_u32(value, cap);
break;
return AMW0_set_u32(value, cap);
}
- /* fall through */
+ fallthrough;
case ACER_WMID:
return WMID_set_u32(value, cap);
case ACER_WMID_v2:
return wmid_v2_set_u32(value, cap);
else if (wmi_has_guid(WMID_GUID2))
return WMID_set_u32(value, cap);
- /* fall through */
+ fallthrough;
default:
return AE_BAD_PARAMETER;
}
switch (unit) {
case KBD_TIMEOUT_DAYS:
value *= 24;
- /* fall through */
+ fallthrough;
case KBD_TIMEOUT_HOURS:
value *= 60;
- /* fall through */
+ fallthrough;
case KBD_TIMEOUT_MINUTES:
value *= 60;
unit = KBD_TIMEOUT_SECONDS;
/* Power button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_POWER:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_POWER:
key_code = KEY_POWER;
break;
/* Home button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_HOME:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_HOME:
key_code = KEY_LEFTMETA;
break;
/* Volume up button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP:
key_code = KEY_VOLUMEUP;
break;
/* Volume down button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN:
pressed = true;
- /*fall through*/
+ fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
key_code = KEY_VOLUMEDOWN;
break;
* AC status changed; can be triggered by plugging or
* unplugging AC adapter, docking or undocking. */
- /* fallthrough */
+ fallthrough;
case TP_HKEY_EV_KEY_NUMLOCK:
case TP_HKEY_EV_KEY_FN:
known_ev = true;
break;
}
- /* fallthrough - to default */
+ fallthrough; /* to default */
default:
known_ev = false;
}
idx -= 8;
}
#endif
- /* fallthrough */
+ fallthrough;
case TPACPI_THERMAL_TPEC_8:
if (idx <= 7) {
if (!acpi_ec_read(t + idx, &tmp))
result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
if (result == TOS_SUCCESS)
pr_notice("Re-enabled hotkeys\n");
- /* Fall through */
+ fallthrough;
default:
retries--;
break;
USB_CH_IP_CUR_LVL_1P5;
break;
}
- /* else, fall through */
+ fallthrough;
case USB_STAT_HM_IDGND:
dev_err(di->dev, "USB Type - Charging not allowed\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
* of 1sec for enabling charging
*/
msleep(1000);
- /* Intentional fall through */
+ fallthrough;
case AB8500_BM_USB_STATE_CONFIGURED:
/*
* USB is configured, enable charging with the charging
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_INITMEASURING);
- /* Intentional fallthrough */
+ fallthrough;
case AB8500_FG_DISCHARGE_INITMEASURING:
/*
* Discard a number of samples during startup.
ab8500_fg_discharge_state_to(di,
AB8500_FG_DISCHARGE_RECOVERY);
- /* Intentional fallthrough */
+ fallthrough;
case AB8500_FG_DISCHARGE_RECOVERY:
sleep_time = di->bm->fg_params->recovery_sleep_timer;
abx500_chargalg_stop_charging(di);
di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
abx500_chargalg_state_to(di, STATE_HANDHELD);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_HANDHELD:
break;
di->maintenance_chg = false;
abx500_chargalg_state_to(di, STATE_SUSPENDED);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_SUSPENDED:
/* CHARGING is suspended */
case STATE_BATT_REMOVED_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_BATT_REMOVED:
if (!di->events.batt_rem)
case STATE_HW_TEMP_PROTECT_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_HW_TEMP_PROTECT:
if (!di->events.main_thermal_prot &&
case STATE_OVV_PROTECT_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_OVV_PROTECT:
if (!di->events.vbus_ovv &&
case STATE_CHG_NOT_OK_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_CHG_NOT_OK:
if (!di->events.mainextchnotok &&
case STATE_SAFETY_TIMER_EXPIRED_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_SAFETY_TIMER_EXPIRED:
/* We exit this state when charger is removed */
case STATE_WAIT_FOR_RECHARGE_INIT:
abx500_chargalg_hold_charging(di);
abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_WAIT_FOR_RECHARGE:
if (di->batt_data.percent <=
di->bm->batt_id].maint_a_cur_lvl);
abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough*/
+ fallthrough;
case STATE_MAINTENANCE_A:
if (di->events.maintenance_timer_expired) {
di->bm->batt_id].maint_b_cur_lvl);
abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough*/
+ fallthrough;
case STATE_MAINTENANCE_B:
if (di->events.maintenance_timer_expired) {
di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
power_supply_changed(di->chargalg_psy);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_TEMP_LOWHIGH:
if (!di->events.btemp_lowhigh)
case STATE_WD_EXPIRED_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_WD_EXPIRED:
if (!di->events.ac_wd_expired &&
case STATE_TEMP_UNDEROVER_INIT:
abx500_chargalg_stop_charging(di);
abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
- /* Intentional fallthrough */
+ fallthrough;
case STATE_TEMP_UNDEROVER:
if (!di->events.btemp_underover)
case 100000:
if (power->axp20x_id == AXP221_ID)
return -EINVAL;
- /* fall through */
+ fallthrough;
case 500000:
case 900000:
val = (900000 - intval) / 400000;
*/
if (ec_device->mkbp_event_supported || port->psy_online)
break;
- /* fall through */
+ fallthrough;
case POWER_SUPPLY_PROP_CURRENT_MAX:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
dev_dbg(chip->dev, "Battery temperature is out of range\n");
- /* Fall through */
+ fallthrough;
case MAX8925_IRQ_VCHG_DC_OVP:
dev_dbg(chip->dev, "Error detection\n");
__set_charger(info, 0);
break;
default:
dev_err(&pdev->dev, "Failed to find USB phy: %d\n", ret);
- /* fall-through */
+ fallthrough;
case -EPROBE_DEFER:
goto err_bat_irq;
break;
case WM8350_IRQ_EXT_USB_FB:
case WM8350_IRQ_EXT_WALL_FB:
wm8350_charger_config(wm8350, policy);
- /* Fall through */
+ fallthrough;
case WM8350_IRQ_EXT_BAT_FB:
power_supply_changed(power->battery);
power_supply_changed(power->usb);
switch (info->monitor_type) {
case PS3AV_MONITOR_TYPE_DVI:
dvi = PS3AV_MODE_DVI;
- /* fall through */
+ fallthrough;
case PS3AV_MONITOR_TYPE_HDMI:
id = ps3av_hdmi_get_id(info);
break;
switch (ch) {
case PS3AV_CMD_AUDIO_NUM_OF_CH_8:
audio->audio_enable[3] = 1;
- /* fall through */
+ fallthrough;
case PS3AV_CMD_AUDIO_NUM_OF_CH_6:
audio->audio_enable[2] = 1;
audio->audio_enable[1] = 1;
- /* fall through */
+ fallthrough;
case PS3AV_CMD_AUDIO_NUM_OF_CH_2:
default:
audio->audio_enable[0] = 1;
return result;
}
-static int idtcm_xfer(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count,
- bool write)
+static int idtcm_xfer_read(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
{
struct i2c_client *client = idtcm->client;
struct i2c_msg msg[2];
int cnt;
- char *fmt = "i2c_transfer failed at %d in %s for %s, at addr: %04X!\n";
+ char *fmt = "i2c_transfer failed at %d in %s, at addr: %04X!\n";
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].buf = ®addr;
msg[1].addr = client->addr;
- msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].flags = I2C_M_RD;
msg[1].len = count;
msg[1].buf = buf;
fmt,
__LINE__,
__func__,
- write ? "write" : "read",
regaddr);
return cnt;
} else if (cnt != 2) {
return 0;
}
+static int idtcm_xfer_write(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ struct i2c_client *client = idtcm->client;
+ /* we add 1 byte for device register */
+ u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
+ int cnt;
+ char *fmt = "i2c_master_send failed at %d in %s, at addr: %04X!\n";
+
+ if (count > IDTCM_MAX_WRITE_COUNT)
+ return -EINVAL;
+
+ msg[0] = regaddr;
+ memcpy(&msg[1], buf, count);
+
+ cnt = i2c_master_send(client, msg, count + 1);
+
+ if (cnt < 0) {
+ dev_err(&client->dev,
+ fmt,
+ __LINE__,
+ __func__,
+ regaddr);
+ return cnt;
+ }
+
+ return 0;
+}
+
static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
{
u8 buf[4];
buf[2] = 0x10;
buf[3] = 0x20;
- err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
+ err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
if (err) {
idtcm->page_offset = 0xff;
err = idtcm_page_offset(idtcm, hi);
if (err)
- goto out;
+ return err;
- err = idtcm_xfer(idtcm, lo, buf, count, write);
-out:
- return err;
+ if (write)
+ return idtcm_xfer_write(idtcm, lo, buf, count);
+
+ return idtcm_xfer_read(idtcm, lo, buf, count);
}
static int idtcm_read(struct idtcm *idtcm,
#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+#define IDTCM_MAX_WRITE_COUNT (512)
+
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
PLL_MODE_MIN = 0,
switch (map->dir) {
case MAP_INBOUND:
rio_unmap_inb_region(mport, map->phys_addr);
- /* fall through */
+ fallthrough;
case MAP_DMA:
dma_free_coherent(mport->dev.parent, map->size,
map->virt_addr, map->phys_addr);
if (rate_count > 0)
break;
- /* fall through */
+ fallthrough;
default:
/* Not supported for this regulator */
return -ENOTSUPP;
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP803_DCDC_FREQ_CTRL;
- /* Fall through - to the check below.*/
+ fallthrough; /* to the check below */
case AXP806_ID:
/*
* AXP806 also have DCDC work frequency setting register at a
*/
if (axp20x->variant == AXP806_ID)
reg = AXP806_DCDC_FREQ_CTRL;
- /* Fall through */
+ fallthrough;
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP806_DCDC_MODE_CTRL2;
- /* Fall through - to the check below. */
+ fallthrough; /* to the check below */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
case EXCLUSIVE_GET:
dev_warn(dev,
"dummy supplies not allowed for exclusive requests\n");
- /* fall through */
+ fallthrough;
default:
return ERR_PTR(-ENODEV);
rdesc->linear_min_sel = 0;
break;
}
- /* Fall through - to the check below.*/
+ fallthrough; /* to the check below */
default:
rdesc->linear_min_sel = vsel_range[0];
switch (info->flags) {
case SMPS_OFFSET_EN:
voltage = 100000;
- /* fall through */
+ fallthrough;
case 0:
switch (index) {
case 0:
dev_info(dev, "received echo reply from %s\n", name);
break;
case RP_MBOX_SUSPEND_ACK:
- /* Fall through */
case RP_MBOX_SUSPEND_CANCEL:
oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
complete(&oproc->pm_comp);
switch (id) {
case IMX8MQ_RESET_PCIEPHY:
- case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */
+ case IMX8MQ_RESET_PCIEPHY2:
/*
* wait for more than 10us to release phy g_rst and
* btnrst
break;
case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
- case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
+ case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN:
+ case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_RESET_N:
+ case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N:
value = assert ? 0 : bit;
break;
}
break;
case GLINK_VERSION_1:
glink->features &= features;
- /* FALLTHROUGH */
+ fallthrough;
default:
qcom_glink_send_version_ack(glink);
break;
break;
glink->features &= features;
- /* FALLTHROUGH */
+ fallthrough;
default:
qcom_glink_send_version(glink);
break;
return -EINVAL;
wdt_margin = new_margin;
wdt_ping();
- /* Fall through */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(wdt_margin, (int __user *)arg);
default:
dev_warn(&pcf85063->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000",
load);
- /* fall through */
+ fallthrough;
case 7000:
break;
case 12500:
default:
dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
load);
- /* fall through */
+ fallthrough;
case 12500:
value |= REG_CONTROL1_CAP_SEL;
break;
default:
dev_warn(&pdev->dev,
"invalid crystal-freq specified in device-tree. Assuming no crystal\n");
- /* fall-through */
+ fallthrough;
case 0:
/* keep XTAL on in low-power mode */
pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP;
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
+ /*
+ * The loop might take long time for platforms with lots of
+ * known devices. Allow scheduling here.
+ */
+ cond_resched();
}
return 0;
}
grp->changed_side = 2;
break;
}
- /* Else, fall through */
+ fallthrough;
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
if (callback)
grp->send_qllc_disc = 1;
- /* Else, fall through */
+ fallthrough;
case MPCG_STATE_XID0IOWAIT:
fsm_deltimer(&grp->timer);
grp->outstanding_xid2 = 0;
if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
break;
- /* Else, fall through */
+ fallthrough;
default:
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
grp->estconnfunc = NULL;
break;
}
- /* Else, fall through */
+ fallthrough;
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
break;
case -EIO:
qeth_schedule_recovery(card);
- /* fall through */
+ fallthrough;
default:
qeth_clear_ipacmd_list(card);
goto err_idx;
card->info.mcl_level[3]);
break;
}
- /* fallthrough */
+ fallthrough;
case QETH_CARD_TYPE_IQD:
if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10000baseT_Full);
- /* fall through */
+ fallthrough;
case SPEED_1000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Full);
1000baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
1000baseT_Half);
- /* fall through */
+ fallthrough;
case SPEED_100:
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
100baseT_Half);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Half);
- /* fall through */
+ fallthrough;
case SPEED_10:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10baseT_Full);
kfree(mac);
break;
}
- /* fall through */
+ fallthrough;
default:
/* for next call to set_rx_mode(): */
mac->disp_flag = QETH_DISP_ADDR_DELETE;
break;
}
addr->ref_counter = 1;
- /* fall through */
+ fallthrough;
default:
/* for next call to set_rx_mode(): */
addr->disp_flag = QETH_DISP_ADDR_DELETE;
return;
}
- del_timer(&req->timer);
+ del_timer_sync(&req->timer);
zfcp_fsf_protstatus_eval(req);
zfcp_fsf_fsfstatus_eval(req);
req->handler(req);
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
req->issued = get_tod_clock();
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
- del_timer(&req->timer);
+ del_timer_sync(&req->timer);
/* lookup request again, list might have changed */
zfcp_reqlist_find_rm(adapter->req_list, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
case REQUEST_SENSE:
/* clear the internal sense magic */
SCp->cmnd[6] = 0;
- /* fall through */
+ fallthrough;
default:
/* OK, get it from the command */
switch(SCp->sc_data_direction) {
case BLOGIC_BAD_CMD_PARAM:
blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
adapter, adapter_status);
- /* fall through */
+ fallthrough;
case BLOGIC_DATA_UNDERRUN:
case BLOGIC_DATA_OVERRUN:
case BLOGIC_NOEXPECT_BUSFREE:
temp6 >>= 1;
switch (temp & 0x3) {
case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
- temp6 |= 0x8000; /* Fall through */
+ temp6 |= 0x8000;
+ fallthrough;
case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
- temp5 |= 0x8000; /* Fall through */
+ temp5 |= 0x8000;
+ fallthrough;
case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
- temp2 |= 0x8000; /* Fall through */
+ temp2 |= 0x8000;
+ fallthrough;
case AUTO_RATE_00: /* Asynchronous */
break;
}
return;
/* Reject message */
- /* Fall through */
+ fallthrough;
default:
/*
* If we get something weird that we aren't expecting,
!(dev->raw_io_64) ||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
- /* fall through */
+ fallthrough;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
/* Issue FIB to tell Firmware to flush it's cache */
if ((aac_cache & 6) != 2)
return aac_synchronize(scsicmd);
- /* fall through */
+ fallthrough;
case INQUIRY:
{
struct inquiry_data inq_data;
SCSI_SENSE_BUFFERSIZE));
break;
}
- /* fall through */
+ fallthrough;
case RESERVE:
case RELEASE:
case REZERO_UNIT:
case START_STOP:
return aac_start_stop(scsicmd);
- /* FALLTHRU */
+ fallthrough;
default:
/*
* Unhandled commands
"enclosure services event");
scsi_device_set_state(device, SDEV_RUNNING);
}
- /* FALLTHRU */
+ fallthrough;
case CHANGE:
if ((channel == CONTAINER_CHANNEL)
&& (!dev->fsa_dev[container].valid)) {
!(aac->raw_io_64) ||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
- /* fall through */
+ fallthrough;
case INQUIRY:
case READ_CAPACITY:
/*
switch (scb->hscb->task_management) {
case SIU_TASKMGMT_ABORT_TASK:
tag = SCB_GET_TAG(scb);
- /* fall through */
+ fallthrough;
case SIU_TASKMGMT_ABORT_TASK_SET:
case SIU_TASKMGMT_CLEAR_TASK_SET:
lun = scb->hscb->lun;
break;
case SIU_TASKMGMT_LUN_RESET:
lun = scb->hscb->lun;
- /* fall through */
+ fallthrough;
case SIU_TASKMGMT_TARGET_RESET:
{
struct ahd_devinfo devinfo;
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case MSG_EXT_WDTR_BUS_8_BIT:
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
break;
case MSG_MESSAGE_REJECT:
response = ahd_handle_msg_reject(ahd, devinfo);
- /* FALLTHROUGH */
+ fallthrough;
case MSG_NOOP:
done = MSGLOOP_MSGCOMPLETE;
break;
ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
#endif
ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
- /* FALLTHROUGH */
+ fallthrough;
case MSG_TERM_IO_PROC:
default:
reject = TRUE;
default:
case 5:
ahd_shutdown(ahd);
- /* FALLTHROUGH */
+ fallthrough;
case 4:
ahd_dmamap_unload(ahd, ahd->shared_data_dmat,
ahd->shared_data_map.dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
ahd->shared_data_map.dmamap);
ahd_dmamap_destroy(ahd, ahd->shared_data_dmat,
ahd->shared_data_map.dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
case 1:
}
ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
}
- /* fall through */
+ fallthrough;
case 6:
{
struct map_node *sg_map;
}
ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
}
- /* fall through */
+ fallthrough;
case 5:
{
struct map_node *hscb_map;
case FLX_CSTAT_OVER:
case FLX_CSTAT_UNDER:
warn_user++;
- /* fall through */
+ fallthrough;
case FLX_CSTAT_INVALID:
case FLX_CSTAT_OKAY:
if (warn_user == 0 && bootverbose == 0)
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in qinfifo\n");
ahd_done_with_status(ahd, scb, status);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_REMOVE:
break;
case SEARCH_PRINT:
printk(" 0x%x", ahd->qinfifo[qinpos]);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_COUNT:
ahd_qinfifo_requeue(ahd, prev_scb, scb);
prev_scb = scb;
if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB pending MK_MSG\n");
ahd_done_with_status(ahd, mk_msg_scb, status);
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_REMOVE:
{
u_int tail_offset;
}
case SEARCH_PRINT:
printk(" 0x%x", SCB_GET_TAG(scb));
- /* FALLTHROUGH */
+ fallthrough;
case SEARCH_COUNT:
break;
}
if ((scb->flags & SCB_ACTIVE) == 0)
printk("Inactive SCB in Waiting List\n");
ahd_done_with_status(ahd, scb, status);
- /* fall through */
+ fallthrough;
case SEARCH_REMOVE:
ahd_rem_wscb(ahd, scbid, prev, next, tid);
*list_tail = prev;
break;
case SEARCH_PRINT:
printk("0x%x ", scbid);
- /* fall through */
+ fallthrough;
case SEARCH_COUNT:
prev = scbid;
break;
case SCSI_STATUS_OK:
printk("%s: Interrupted for status of 0???\n",
ahd_name(ahd));
- /* FALLTHROUGH */
+ fallthrough;
default:
ahd_done(ahd, scb);
break;
fmt3_ins = &instr.format3;
fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
}
- /* fall through */
+ fallthrough;
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
}
fmt1_ins->parity = 0;
- /* fall through */
+ fallthrough;
case AIC_OP_ROL:
{
int i, count;
break;
case CAM_AUTOSENSE_FAIL:
new_status = DID_ERROR;
- /* Fallthrough */
+ fallthrough;
case CAM_SCSI_STATUS_ERROR:
scsi_status = ahd_cmd_get_scsi_status(cmd);
*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case MSG_EXT_WDTR_BUS_8_BIT:
*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
break;
break;
case MSG_MESSAGE_REJECT:
response = ahc_handle_msg_reject(ahc, devinfo);
- /* FALLTHROUGH */
+ fallthrough;
case MSG_NOOP:
done = MSGLOOP_MSGCOMPLETE;
break;
default:
case 5:
ahc_shutdown(ahc);
- /* FALLTHROUGH */
+ fallthrough;
case 4:
ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
ahc->shared_data_dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
ahc->shared_data_dmamap);
ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
ahc->shared_data_dmamap);
- /* FALLTHROUGH */
+ fallthrough;
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
case 1:
}
ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
}
- /* fall through */
+ fallthrough;
case 6:
ahc_dmamap_unload(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
- /* fall through */
+ fallthrough;
case 5:
ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
scb_data->sense_dmamap);
ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
scb_data->sense_dmamap);
- /* fall through */
+ fallthrough;
case 4:
ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
- /* fall through */
+ fallthrough;
case 3:
ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
- /* fall through */
+ fallthrough;
case 2:
ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
scb_data->hscb_dmamap);
ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
scb_data->hscb_dmamap);
- /* fall through */
+ fallthrough;
case 1:
ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
break;
printk("Inactive SCB in Waiting List\n");
ahc_done(ahc, scb);
}
- /* fall through */
+ fallthrough;
case SEARCH_REMOVE:
next = ahc_rem_wscb(ahc, next, prev);
break;
address -= address_offset;
fmt3_ins->address = address;
}
- /* fall through */
+ fallthrough;
case AIC_OP_OR:
case AIC_OP_AND:
case AIC_OP_XOR:
fmt1_ins->opcode = AIC_OP_AND;
fmt1_ins->immediate = 0xff;
}
- /* fall through */
+ fallthrough;
case AIC_OP_ROL:
if ((ahc->features & AHC_ULTRA2) != 0) {
int i, count;
switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS;
- /* fall through*/
+ fallthrough;
default:
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS;
}
switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
default:
switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS;
- /* fall through*/
+ fallthrough;
default:
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS;
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
- /* fall through */
+ fallthrough;
case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */
switch (tcs.dl_opcode) {
default:
res = asd_clear_nexus(task);
- /* fallthrough */
+ fallthrough;
case TC_NO_ERROR:
break;
/* The task hasn't been sent to the device xor
case PCI_DEVICE_ID_ARECA_1202:
case PCI_DEVICE_ID_ARECA_1210:
raid6 = 0;
- /*FALLTHRU*/
+ fallthrough;
case PCI_DEVICE_ID_ARECA_1120:
case PCI_DEVICE_ID_ARECA_1130:
case PCI_DEVICE_ID_ARECA_1160:
msgqueue_flush(&info->scsi.msgs);
msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
info->scsi.phase = PHASE_MSGOUT_EXPECT;
- /* fall through */
+ fallthrough;
case async:
dev->period = info->ifcfg.asyncperiod / 4;
fas216_done(info, DID_ABORT);
break;
}
- /* else, fall through */
+ fallthrough;
default: /* huh? */
printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */
case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */
fas216_stoptransfer(info);
- /* fall through */
+ fallthrough;
case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */
case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */
case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */
case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */
fas216_stoptransfer(info);
- /* fall through */
+ fallthrough;
case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */
case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */
fas216_message(info);
break;
}
- /* else, fall through */
+ fallthrough;
default:
fas216_log(info, 0, "internal phase %s for function done?"
switch (where_from) {
case TYPE_QUEUE:
fas216_allocate_tag(info, SCpnt);
- /* fall through */
+ fallthrough;
case TYPE_OTHER:
fas216_start_command(info, SCpnt);
break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
if (conn->max_xmit_dlength > 65536)
conn->max_xmit_dlength = 65536;
- /* fall through */
+ fallthrough;
default:
return 0;
}
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
error = 1;
- /* fall through */
+ fallthrough;
case UNSOL_DATA_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].data;
break;
case FCP_IODIR_RW:
bfa_stats(itnim, input_reqs);
bfa_stats(itnim, output_reqs);
- /* fall through */
+ fallthrough;
default:
bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
}
case BFI_IOIM_STS_TIMEDOUT:
bfa_stats(ioim->itnim, iocomp_timedout);
- /* fall through */
+ fallthrough;
case BFI_IOIM_STS_ABORTED:
rsp->io_status = BFI_IOIM_STS_ABORTED;
bfa_stats(ioim->itnim, iocomp_aborted);
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
- /* fall through */
+ fallthrough;
case BFA_TSKIM_SM_QRESUME:
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
bfa_tskim_send_abort(tskim);
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /* fall through */
+ fallthrough;
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- /* fall through */
+ fallthrough;
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_fcxp_discard(rport->fcxp);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_FAILED:
if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
rport->plogi_retries++;
* At least go offline when a PLOGI is received.
*/
bfa_fcxp_discard(rport->fcxp);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
}
- /* fall through */
+ fallthrough;
case RPSM_EVENT_ADDRESS_CHANGE:
if (!bfa_fcs_lport_is_online(rport->port)) {
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
- /* fall through */
+ fallthrough;
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
case IOCPF_E_INITFAIL:
bfa_iocpf_timer_stop(ioc);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
case IOCPF_E_FAIL:
bfa_iocpf_timer_stop(ioc);
- /* fall through */
+ fallthrough;
case IOCPF_E_TIMEOUT:
bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_FLASH_COMP:
bfa_timer_stop(&dconf->timer);
- /* fall through */
+ fallthrough;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
dport->test_state = BFA_DPORT_ST_INP;
bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
}
- /* fall thru */
+ fallthrough;
case BFA_DPORT_SM_REQFAIL:
bfa_sm_set_state(dport, bfa_dport_sm_enabled);
break;
case FCOE_KCQE_OPCODE_FCOE_ERROR:
- /* fall thru */
default:
printk(KERN_ERR PFX "unknown opcode 0x%x\n",
kcqe->op_code);
case CSIO_HWE_FW_DLOAD:
csio_set_state(&hw->sm, csio_hws_resetting);
/* Download firmware */
- /* Fall through */
+ fallthrough;
case CSIO_HWE_HBA_RESET:
csio_set_state(&hw->sm, csio_hws_resetting);
break;
case CSIO_LNE_LINK_DOWN:
- /* Fall through */
case CSIO_LNE_DOWN_LINK:
csio_set_state(&ln->sm, csio_lns_uninit);
if (csio_is_phys_ln(ln)) {
csio_q_eqid(hw, i) = CSIO_MAX_QID;
}
- /* fall through */
+ fallthrough;
case CSIO_INGRESS:
if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
csio_wr_cleanup_iq_ftr(hw, i);
int *need_rst)
{
switch (abort_reason) {
- case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_BAD_SYN:
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
case CPL_ERR_XMIT_TIMEDOUT:
int *need_rst)
{
switch (abort_reason) {
- case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_BAD_SYN:
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ?
-EPIPE : -ECONNRESET;
return err;
}
- __kfree_skb(skb);
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
+ __kfree_skb(skb);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- /* fall through */
+ fallthrough;
case UNMAP_TWO:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- /* fall through */
+ fallthrough;
case UNMAP_ONE:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- /* fall through */
+ fallthrough;
case FREE_IRQ:
cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- /* fall through */
+ fallthrough;
case UNDO_NOOP:
/* No action required */
break;
switch (cfg->init_state) {
case INIT_STATE_CDEV:
cxlflash_release_chrdev(cfg);
- /* fall through */
+ fallthrough;
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
- /* fall through */
+ fallthrough;
case INIT_STATE_AFU:
term_afu(cfg);
- /* fall through */
+ fallthrough;
case INIT_STATE_PCI:
cfg->ops->destroy_afu(cfg->afu_cookie);
pci_disable_device(pdev);
- /* fall through */
+ fallthrough;
case INIT_STATE_NONE:
free_mem(cfg);
scsi_host_put(cfg->host);
cxlflash_schedule_async_reset(cfg);
break;
}
- /* fall through - to retry */
+ fallthrough; /* to retry */
case -EAGAIN:
if (++nretry < 2)
goto retry;
- /* fall through - to exit */
+ fallthrough; /* to exit */
default:
break;
}
cfg->state = STATE_NORMAL;
wake_up_all(&cfg->reset_waitq);
ssleep(1);
- /* fall through */
+ fallthrough;
case STATE_RESET:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
break;
- /* fall through */
+ fallthrough;
default:
rc = FAILED;
break;
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
goto retry;
- /* else, fall through */
+ fallthrough;
default:
/* Ideally should not happen */
dev_err(dev, "%s: Device is not ready, state=%d\n",
if (likely(do_ioctl))
break;
- /* fall through */
+ fallthrough;
default:
rc = -EINVAL;
goto out;
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
- /* fall through */
case NOT_READY:
result &= ~SAM_STAT_CHECK_CONDITION;
break;
case UNIT_ATTENTION:
switch (sshdr.asc) {
case 0x29: /* Power on Reset or Device Reset */
- /* fall through */
+ fallthrough;
case 0x2A: /* Device capacity changed */
case 0x3F: /* Report LUNs changed */
/* Retry the command once more */
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
- /* fall through */
case NOT_READY:
break;
case UNIT_ATTENTION:
switch (sshdr.asc) {
case 0x29: /* Power on Reset or Device Reset */
- /* fall through */
+ fallthrough;
case 0x2A: /* Device settings/capacity changed */
rc = read_cap16(sdev, lli);
if (rc) {
if (unlikely(rc))
goto cxlflash_ioctl_exit;
- /* fall through */
+ fallthrough;
case DK_CXLFLASH_MANAGE_LUN:
known_ioctl = true;
if (likely(do_ioctl))
break;
- /* fall through */
+ fallthrough;
default:
rc = -EINVAL;
goto cxlflash_ioctl_exit;
ret = SCSI_DH_OK;
break;
}
- /* Fallthrough */
+ fallthrough;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, sense %x/%x/%x\n",
rc = SCSI_DH_RETRY;
break;
}
- /* fall through */
+ fallthrough;
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending start_stop_unit failed, "
return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
}
- /* fall through */
+ fallthrough;
case FI_ACT_UP: /* Upload the components */
default:
a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
break;
}
- /* fall through */
+ fallthrough;
case ESAS2R_INIT_MSG_GET_INIT:
if (msg == ESAS2R_INIT_MSG_GET_INIT) {
esas2r_hdebug("FAILED");
}
}
- /* fall through */
+ fallthrough;
default:
rq->req_stat = RS_SUCCESS;
case FASHME:
esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
- /* fallthrough... */
+ fallthrough;
case FAS236:
case PCSCSI:
case ESP_EVENT_DATA_IN:
write = 1;
- /* fallthru */
+ fallthrough;
case ESP_EVENT_DATA_OUT: {
struct esp_cmd_entry *ent = esp->active_cmd;
switch (fip->mode) {
default:
LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
- /* fall-through */
+ fallthrough;
case FIP_MODE_AUTO:
LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
- /* fall-through */
+ fallthrough;
case FIP_MODE_FABRIC:
case FIP_MODE_NON_FIP:
mutex_unlock(&fip->ctlr_mutex);
fc_fcoe_set_mac(mac, fh->fh_d_id);
fip->update_mac(lport, mac);
}
- /* fall through */
+ fallthrough;
case ELS_LS_RJT:
op = fr_encaps(fp);
if (op)
frport->enode_mac, 0);
break;
}
- /* fall through */
+ fallthrough;
case FIP_ST_VNMP_START:
LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
"restart VN2VN negotiation\n");
break;
case BOARD_DTC3181E:
hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
+ fallthrough;
case BOARD_NCR53C400A:
case BOARD_HP_C2502:
hostdata->c400_ctl_status = 9;
hisi_hba->hw->get_events(hisi_hba, phy_no);
break;
}
- /* fallthru */
+ fallthrough;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
case WRITE_6:
case WRITE_12:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_6:
case READ_12:
if (*cdb_len == 6) {
switch (cmd->cmnd[0]) {
case WRITE_6:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_6:
first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
(cmd->cmnd[2] << 8) |
break;
case WRITE_10:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_10:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
break;
case WRITE_12:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_12:
first_block =
(((u64) cmd->cmnd[2]) << 24) |
break;
case WRITE_16:
is_write = 1;
- /* fall through */
+ fallthrough;
case READ_16:
first_block =
(((u64) cmd->cmnd[2]) << 56) |
port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
(bsg_request->rqst_data.h_els.port_id[1] << 8) |
bsg_request->rqst_data.h_els.port_id[2];
- /* fall through */
+ fallthrough;
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
bsg_request->rqst_data.h_ct.port_id[2];
- /* fall through */
+ fallthrough;
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
return;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_host_init(vhost);
- /* fall through */
+ fallthrough;
case IBMVFC_MAD_DRIVER_FAILED:
ibmvfc_free_event(evt);
return;
case H_PERMISSION:
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
- /* Fall through */
+ fallthrough;
default:
dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
rc);
break;
case H_CLOSED:
vscsi->flags |= CLIENT_FAILED;
- /* Fall through */
+ fallthrough;
case H_DROPPED:
vscsi->flags |= RESPONSE_Q_DOWN;
- /* Fall through */
+ fallthrough;
case H_REMOTE_PARM:
dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
rc);
case 1: /* Phase 1 - Connected */
imm_connect(dev, CONNECT_EPP_MAYBE);
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!imm_select(dev, scmd_id(cmd))) {
return 0;
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
if (!imm_send_command(cmd))
return 0;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
cmd->SCp.phase++;
if (cmd->SCp.this_residual & 0x01)
cmd->SCp.this_residual++;
- /* fall through */
+ fallthrough;
case 5: /* Phase 5 - Pre-Data transfer stage */
/* Spin lock for BUSY */
if (imm_negotiate(dev))
return 0;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 6: /* Phase 6 - Data transfer stage */
/* Spin lock for BUSY */
return 1;
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 7: /* Phase 7 - Post data transfer stage */
if ((dev->dp) && (dev->rd)) {
}
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 8: /* Phase 8 - Read status/message */
/* Check for data overrun */
break;
case SCU_EVENT_LINK_FAILURE:
scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
- /* fall through */
+ fallthrough;
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
/* Kill all outstanding requests for the device. */
sci_remote_device_terminate_requests(idev);
- /* Fall through - into the default case... */
+ fallthrough; /* into the default case */
default:
clear_bit(IDEV_IO_READY, &idev->flags);
break;
break;
}
- /* fall through - and treat as unhandled... */
+ fallthrough; /* and treat as unhandled */
default:
dev_dbg(scirdev_to_dev(idev),
"%s: device: %p event code: %x: %s\n",
case RNC_DEST_READY:
case RNC_DEST_SUSPENDED_RESUME:
rnc->destination_state = RNC_DEST_READY;
- /* Fall through... */
+ fallthrough;
case RNC_DEST_FINAL:
sci_remote_node_context_resume(rnc, rnc->user_callback,
rnc->user_cookie);
__func__, sci_rnc);
return SCI_FAILURE_INVALID_STATE;
}
- /* Fall through - and handle like SCI_RNC_POSTING */
+ fallthrough; /* and handle like SCI_RNC_POSTING */
case SCI_RNC_RESUMING:
- /* Fall through - and handle like SCI_RNC_POSTING */
+ fallthrough; /* and handle like SCI_RNC_POSTING */
case SCI_RNC_POSTING:
/* Set the destination state to AWAIT - this signals the
* entry into the SCI_RNC_READY state that a suspension
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- /* Fall through - and handle like ABORTING... */
+ fallthrough; /* and handle like ABORTING */
case SCI_REQ_ABORTING:
if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
set_bit(IREQ_PENDING_ABORT, &ireq->flags);
switch (op) {
case ELS_LS_RJT:
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
- /* fall through */
+ fallthrough;
case ELS_LS_ACC:
goto cleanup;
default:
case FC_EOF_T:
if (f_ctl & FC_FC_END_SEQ)
skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
- /* fall through */
+ fallthrough;
case FC_EOF_N:
if (fh->fh_type == FC_TYPE_BLS)
fc_exch_recv_bls(ema->mp, fp);
brp = fc_frame_payload_get(fp, sizeof(*brp));
if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
break;
- /* fall thru */
+ fallthrough;
default:
/*
* we will let the command timeout
"device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
- /* fall through */
+ fallthrough;
case ELS_RJT_UNSUP:
FC_FCP_DBG(fsp, "device does not support REC\n");
rpriv = fsp->rport->dd_data;
FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
- /* fall through */
+ fallthrough;
case -FC_EX_TIMEOUT:
/*
break;
case -FC_EX_CLOSED: /* e.g., link failure */
FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
- /* fall through */
+ fallthrough;
default:
fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
case LPORT_ST_DPRT:
FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
fc_lport_state(lport));
- /* fall thru */
+ fallthrough;
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;
kref_put(&rdata->kref, fc_rport_destroy);
goto busy;
}
- /* fall through */
+ fallthrough;
default:
FC_RPORT_DBG(rdata,
"Reject ELS 0x%02x while in state %s\n",
hdr_lun = scsilun_to_int(&tmf->lun);
if (hdr_lun != task->sc->device->lun)
return 0;
- /* fall through */
+ fallthrough;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
/*
* Fail all SCSI cmd PDUs
sc->result = DID_NO_CONNECT << 16;
break;
}
- /* fall through */
+ fallthrough;
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
"progress\n");
goto success;
}
- /* fall through */
+ fallthrough;
default:
conn->tmf_state = TMF_INITIAL;
goto failed;
iscsi_tcp_data_recv_prep(tcp_conn);
return 0;
}
- /* fall through */
+ fallthrough;
case ISCSI_OP_LOGOUT_RSP:
case ISCSI_OP_NOOP_IN:
case ISCSI_OP_SCSI_TMFUNC_RSP:
case SAS_END_DEVICE:
if (ex_phy->attached_sata_dev)
return sas_ata_clear_pending(dev, ex_phy);
- /* fall through */
+ fallthrough;
default:
return -ENODEV;
}
rphy = NULL;
break;
}
- /* fall through */
+ fallthrough;
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port->port);
break;
} else
memcpy(dev->port->disc.fanout_sas_addr,
ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
- /* fallthrough */
+ fallthrough;
case SAS_EDGE_EXPANDER_DEVICE:
child = sas_ex_discover_expander(dev, phy_id);
break;
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
- /* fallthrough */
+ fallthrough;
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
pr_notice("task 0x%p is not at LU: I_T recover\n",
case SLI_MGMT_GHAT:
case SLI_MGMT_GRPL:
rsp_size = FC_MAX_NS_RSP;
- /* fall through */
+ fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
case SLI_MGMT_GPAT:
case SLI_MGMT_GPAS:
rsp_size = FC_MAX_NS_RSP;
- /* fall through */
+ fallthrough;
case SLI_MGMT_DPRT:
case SLI_MGMT_DPA:
pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
lpfc_nlp_put(ndlp);
return;
}
- /* fall through */
+ fallthrough;
default:
/* Try to recover from this error */
if (phba->sli_rev == LPFC_SLI_REV4)
case CMD_GEN_REQUEST64_CR:
if (iocb->context_un.ndlp == ndlp)
return 1;
- /* fall through */
+ fallthrough;
case CMD_ELS_REQUEST64_CR:
if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
return 1;
- /* fall through */
+ fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
- /* fall through */
}
} else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
case LPFC_LINK_UP:
lpfc_issue_clear_la(phba, vport);
- /* fall through */
+ fallthrough;
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
break;
- /* fall through */
+ fallthrough;
case NLP_STE_REG_LOGIN_ISSUE:
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
bf_get(lpfc_wcqe_c_xb, wcqe));
- /* fall through */
+ fallthrough;
default:
out_err:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
break;
}
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the error
rc = BG_ERR_TGT | BG_ERR_CHECK;
break;
}
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
* For WRITE_INSERT, force the
switch (op) {
case SCSI_PROT_WRITE_PASS:
rc = BG_ERR_CHECK;
- /* fall through */
+ fallthrough;
case SCSI_PROT_WRITE_INSERT:
/*
lpfc_cmd->cur_iocbq.sli4_lxritag,
0, 0);
}
- /* fall through */
+ fallthrough;
default:
cmd->result = DID_ERROR << 16;
break;
*/
if (piocb->iocb_cmpl)
piocb->iocb_cmpl = NULL;
- /*FALLTHROUGH*/
+ fallthrough;
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
case CMD_CLOSE_XRI_CX:
cmnd = CMD_XMIT_SEQUENCE64_CR;
if (phba->link_flag & LS_LOOPBACK_MODE)
bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
- /* fall through */
+ fallthrough;
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2537 Receive Frame Truncated!!\n");
- /* fall through */
+ fallthrough;
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
- /* fallthrough */
+ fallthrough;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6126 Receive Frame Truncated!!\n");
- /* fall through */
+ fallthrough;
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
atomic_read(&tgtp->rcv_fcp_cmd_out),
atomic_read(&tgtp->xmt_fcp_release));
}
- /* fallthrough */
+ fallthrough;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
hrq->RQ_no_posted_buf++;
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
LPFC_CQ_CNT_WORD7);
break;
}
- /* fall through */
+ fallthrough;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0361 Unsupported CQ count: "
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
LPFC_CQ_CNT_WORD7);
break;
}
- /* fall through */
+ fallthrough;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3118 Bad CQ count. (%d)\n",
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest */
+ fallthrough; /* otherwise default to smallest */
case 256:
bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
&cq_set->u.request, LPFC_CQ_CNT_256);
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 16:
bf_set(lpfc_mq_context_ring_size,
&mq_create_ext->u.request.context,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
status = -EINVAL;
goto out;
}
- /* fall through - otherwise default to smallest count */
+ fallthrough; /* otherwise default to smallest count */
case 512:
bf_set(lpfc_rq_context_rqe_count,
&rq_create->u.request.context,
if (adapter->support_random_del && adapter->read_ldidmap )
switch (cmd->cmnd[0]) {
- case READ_6: /* fall through */
- case WRITE_6: /* fall through */
- case READ_10: /* fall through */
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
case WRITE_10:
ldrv_num += 0x80;
}
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE: /* Fall through */
+ case RESERVE:
case RELEASE:
/*
adapter->flag |= (1L << cmd->device->channel);
}
- /* Fall through */
+ fallthrough;
default:
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
adapter->flag |= (1L << cmd->device->channel);
}
- /* Fall through */
+ fallthrough;
default:
epthru->numsgelements = mega_build_sglist(adapter, scb,
&epthru->dataxferaddr, &epthru->dataxferlen);
return NULL;
}
- /* Fall through */
+ fallthrough;
case READ_CAPACITY:
/*
megasas_complete_int_cmd(instance, cmd);
break;
}
- /* fall through */
+ fallthrough;
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
- /* Fall through - and complete IO */
+ fallthrough; /* and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
atomic_dec(&instance->fw_outstanding);
if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
/* huh? we expected a phase mismatch */
ms->n_msgin = 0;
ms->msgphase = msg_in;
- /* fall through */
+ fallthrough;
case msg_in:
/* should have some message bytes in fifo */
ioc_info(ioc, "performance mode: balanced\n");
return;
}
- /* Fall through */
+ fallthrough;
case MPT_PERF_MODE_LATENCY:
/*
* Enable interrupt coalescing on all reply queues
}
/* drop to default case for posting the request */
}
- /* fall through */
+ fallthrough;
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
- /* fall through */
+ fallthrough;
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
if (!test_bit(handle, ioc->pend_os_device_add))
break;
- /* fall through */
+ fallthrough;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
- /* fall through */
+ fallthrough;
case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
if (ioc->shost_recovery)
break;
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
dev_info(&pdev->dev,
"HBA is in Configurable Secure mode\n");
- /* fall through */
+ fallthrough;
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
sdev_printk(KERN_INFO, sdev,
"Background Initialization Aborted\n");
- /* Fallthrough */
+ fallthrough;
case MYRB_STATUS_NO_BGI_INPROGRESS:
cb->bgi_status.status = MYRB_BGI_INVALID;
break;
scmd->scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_6:
lba = (((scmd->cmnd[1] & 0x1F) << 16) |
(scmd->cmnd[2] << 8) |
scmd->scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_10:
case VERIFY: /* 0x2F */
case WRITE_VERIFY: /* 0x2E */
scmd->scsi_done(scmd);
return 0;
}
- /* fall through */
+ fallthrough;
case WRITE_12:
case VERIFY_12: /* 0xAF */
case WRITE_VERIFY_12: /* 0xAE */
new = old;
break;
}
- /* fall through */
+ fallthrough;
default:
panic("ncr_script_copy_and_bind: weird relocation %x\n", old);
break;
np->scsi_mode = SMODE_HVD;
break;
}
- /* fall through */
+ fallthrough;
case 3: /* SYMBIOS controllers report HVD through GPIO3 */
if (INB(nc_gpreg) & 0x08)
break;
- /* fall through */
+ fallthrough;
case 2: /* Set HVD unconditionally */
np->scsi_mode = SMODE_HVD;
- /* fall through */
+ fallthrough;
case 1: /* Trust previous settings for HVD */
if (np->sv_stest2 & 0x20)
np->scsi_mode = SMODE_HVD;
break;
cp->phys.header.wgoalp = cpu_to_scr(goalp);
cp->phys.header.wlastp = cpu_to_scr(lastp);
- /* fall through */
+ fallthrough;
case DMA_FROM_DEVICE:
goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
if (segments <= MAX_SCATTERL)
OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return;
}
- /* fall through */
+ fallthrough;
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */
*/
OUTB (HS_PRT, HS_BUSY);
- /* fall through */
+ fallthrough;
case SIR_NEGO_PROTO:
/*-------------------------------------------------------
nsp_scsi_done(tmpSC);
return IRQ_HANDLED;
}
- /* fall thru */
+ fallthrough;
default:
if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) {
return IRQ_HANDLED;
}
cmd->SCp.phase++;
}
- /* fall through */
+ fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
if (!ppa_select(dev, scmd_id(cmd))) {
return 0;
}
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
if (retv == 0)
return 1;
cmd->SCp.phase++;
- /* fall through */
+ fallthrough;
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
container_of(work, struct qedf_ctx, stag_work.work);
if (!qedf) {
- QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ QEDF_ERR(NULL, "qedf is NULL");
return;
}
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
static inline int
ql_mask_match(uint level)
{
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+
return (level & ql2xextended_error_logging) == level;
}
uint32_t scm_supported_f:1;
/* Enabled in Driver */
uint32_t scm_enabled:1;
+ uint32_t max_req_queue_warned:1;
} flags;
uint16_t max_exchg;
break;
case CS_TIMEOUT:
rval = QLA_FUNCTION_TIMEOUT;
- /* fall through */
+ fallthrough;
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
static uint
qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
{
+ uint speeds = 0;
+
if (IS_CNA_CAPABLE(ha))
return FDMI_PORT_SPEED_10GB;
if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
- uint speeds = 0;
-
if (ha->max_supported_speed == 2) {
if (ha->min_supported_speed <= 6)
speeds |= FDMI_PORT_SPEED_64GB;
}
return speeds;
}
- if (IS_QLA2031(ha))
- return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB;
+ if (IS_QLA2031(ha)) {
+ if ((ha->pdev->subsystem_vendor == 0x103C) &&
+ (ha->pdev->subsystem_device == 0x8002)) {
+ speeds = FDMI_PORT_SPEED_16GB;
+ } else {
+ speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB;
+ }
+ return speeds;
+ }
if (IS_QLA25XX(ha))
return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
fcport->scan_state = QLA_FCPORT_SCAN;
- fcport->logout_on_delete = 0;
}
}
goto login_logout;
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ bool do_delete = false;
+
+ if (fcport->scan_needed &&
+ fcport->disc_state == DSC_LOGIN_PEND) {
+ /* Cable got disconnected after we sent
+ * a login. Do delete to prevent timeout.
+ */
+ fcport->logout_on_delete = 1;
+ do_delete = true;
+ }
+
fcport->scan_needed = 0;
- if ((qla_dual_mode_enabled(vha) ||
- qla_ini_mode_enabled(vha)) &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
+ if (((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) ||
+ do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
fcport->logout_on_delete = 0;
unsigned long flags;
const char *name = sp->name;
+ if (res == QLA_OS_TIMER_EXPIRED) {
+ /* switch is ignoring all commands.
+ * This might be a zone disable behavior.
+ * This means we hit 64s timeout.
+ * 22s GPNFT + 44s Abort = 64s
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: Switch Zone check please .\n",
+ name);
+ qla2x00_mark_all_devices_lost(vha);
+ }
+
/*
* We are in an Interrupt context, queue up this
* sp for GNNFT_DONE work. This will allow all
fcport);
break;
}
- /* fall through */
+ fallthrough;
default:
if (fcport_is_smaller(fcport)) {
/* local adapter is bigger */
&vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* fall through */
+ fallthrough;
default:
ql_dbg(ql_dbg_disc, vha, 0x20eb,
"%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
qla2xxx_wake_dpc(vha);
}
}
- /* fall through */
+ fallthrough;
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
- /* Fallthru */
+ fallthrough;
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
IS_QLA8044(ha))
res = DID_ERROR << 16;
}
}
- ql_dbg(ql_dbg_user, vha, 0x503f,
- "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
+ ql_dbg(ql_dbg_disc, vha, 0x503f,
+ "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* fall through */
+ fallthrough;
default:
data[0] = MBS_COMMAND_ERROR;
break;
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
fcport->nvme_flag |= NVME_FLAG_RESETTING;
- /* fall through */
+ fallthrough;
case CS_ABORTED:
case CS_PORT_BUSY:
fd->transferred_length = 0;
} else {
qlt_24xx_process_atio_queue(vha, 1);
}
- /* fall through */
+ fallthrough;
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
if (time_after(jiffies, wait_time))
break;
- /*
- * Check if it's UNLOADING, cause we cannot poll in
- * this case, or else a NULL pointer dereference
- * is triggered.
- */
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
- return QLA_FUNCTION_TIMEOUT;
-
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_2|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_1|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (mb != NULL) {
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (IS_QLA8031(ha))
mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA8031(ha))
mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
struct nvme_private *priv = fd->private;
struct qla_nvme_rport *qla_rport = rport->private;
+ if (!priv) {
+ /* nvme association has been torn down */
+ return rval;
+ }
+
fcport = qla_rport->fcport;
if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
tmpl = &qla_nvme_fc_transport;
WARN_ON(vha->nvme_local_port);
- WARN_ON(ha->max_req_queues < 3);
+
+ if (ha->max_req_queues < 3) {
+ if (!ha->flags.max_req_queue_warned)
+ ql_log(ql_log_info, vha, 0x2120,
+ "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
+ __func__, ha->max_req_queues);
+ ha->flags.max_req_queue_warned = 1;
+ return ret;
+ }
qla_nvme_fc_transport.max_hw_queues =
min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
+
+ /* Check if FW supports MQ or not */
+ if (!(ha->fw_attributes & BIT_6))
+ goto mqiobase_exit;
+
if (!ql2xmqsupport || !ql2xnvmeenable ||
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
/* This may fail but that's ok */
pci_enable_pcie_error_reporting(pdev);
- /* Turn off T10-DIF when FC-NVMe is enabled */
- if (ql2xnvmeenable)
- ql2xenabledif = 0;
-
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
sec_mask = 0x10000;
break;
}
- /* Fall through... */
+ fallthrough;
case 0x1f: /* Atmel flash. */
/* 512k sector size. */
sec_mask = 0x80000000;
break;
}
- /* Fall through... */
+ fallthrough;
case 0x01: /* AMD flash. */
if (flash_id == 0x38 || flash_id == 0x40 ||
sec_mask = 0x1e000;
break;
}
- /* fall through */
+ fallthrough;
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
- /* fall through */
+ fallthrough;
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
qla24xx_chk_fcp_state(sess);
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
"Scheduling sess %p for deletion %8phC\n",
sess, sess->port_name);
case QLA_TGT_CLEAR_TS:
case QLA_TGT_ABORT_TS:
abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- /* fall through */
+ fallthrough;
case QLA_TGT_CLEAR_ACA:
h = qlt_find_qphint(vha, mcmd->unpacked_lun);
mcmd->qpair = h->qpair;
res = 1;
break;
}
- /* fall through */
+ fallthrough;
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
chap_tbl.secret_len);
}
}
- /* fall through */
+ fallthrough;
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
/* Write mailbox command registers. */
switch (mbox_param[param[0]] >> 4) {
case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
- /* Fall through */
+ fallthrough;
case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
- /* Fall through */
+ fallthrough;
case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
- /* Fall through */
+ fallthrough;
case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
- /* Fall through */
+ fallthrough;
case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
- /* Fall through */
+ fallthrough;
case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
}
/* Read back output parameters. */
switch (mbox_param[param[0]] & 0xf) {
case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
- /* Fall through */
+ fallthrough;
case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
- /* Fall through */
+ fallthrough;
case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
- /* Fall through */
+ fallthrough;
case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
- /* Fall through */
+ fallthrough;
case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
- /* Fall through */
+ fallthrough;
case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
}
goto fini;
}
- if (zc == ZC2_IMPLICIT_OPEN)
- zbc_close_zone(devip, zsp);
zbc_open_zone(devip, zsp, true);
fini:
write_unlock(macc_lckp);
u64 d = ktime_get_boottime_ns() - ns_from_boot;
if (kt <= d) { /* elapsed duration >= kt */
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
sqcp->a_cmnd = NULL;
atomic_dec(&devip->num_in_q);
clear_bit(k, sqp->in_use_bm);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (new_sd_dp)
kfree(sd_dp);
/* call scsi_done() from this thread */
set_host_byte(scmd, DID_ALLOC_FAILURE);
return SUCCESS;
}
- /* FALLTHROUGH */
+ fallthrough;
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
return ADD_TO_MLQUEUE;
else
set_host_byte(scmd, DID_TARGET_FAILURE);
- /* FALLTHROUGH */
+ fallthrough;
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
switch (status_byte(scmd->result)) {
case GOOD:
scsi_handle_queue_ramp_up(scmd->device);
- /* FALLTHROUGH */
+ fallthrough;
case COMMAND_TERMINATED:
return SUCCESS;
case CHECK_CONDITION:
return FAILED;
case QUEUE_FULL:
scsi_handle_queue_full(scmd->device);
- /* fall through */
+ fallthrough;
case BUSY:
return NEEDS_RETRY;
default:
case NEEDS_RETRY:
if (retry_cnt--)
goto retry_tur;
- /*FALLTHRU*/
+ fallthrough;
case SUCCESS:
return 0;
default:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
return 0;
- /* fall through */
+ fallthrough;
case DID_SOFT_ERROR:
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
set_host_byte(scmd, DID_TIME_OUT);
return SUCCESS;
}
- /* FALLTHROUGH */
+ fallthrough;
case DID_NO_CONNECT:
case DID_BAD_TARGET:
/*
* lower down
*/
break;
- /* fallthrough */
+ fallthrough;
case DID_BUS_BUSY:
case DID_PARITY:
goto maybe_retry;
* the case of trying to send too many commands to a
* tagged queueing device.
*/
- /* FALLTHROUGH */
+ fallthrough;
case BUSY:
/*
* device can't talk to us at the moment. Should only
if (scmd->cmnd[0] == REPORT_LUNS)
scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
- /* FALLTHROUGH */
+ fallthrough;
case COMMAND_TERMINATED:
return SUCCESS;
case TASK_ABORTED:
rtn = scsi_try_bus_device_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_TARGET:
rtn = scsi_try_target_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_BUS:
rtn = scsi_try_bus_reset(scmd);
if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
break;
- /* FALLTHROUGH */
+ fallthrough;
case SG_SCSI_RESET_HOST:
rtn = scsi_try_host_reset(scmd);
if (rtn == SUCCESS)
break;
- /* FALLTHROUGH */
+ fallthrough;
default:
rtn = FAILED;
break;
case NOT_READY: /* This happens if there is no disc in drive */
if (sdev->removable)
break;
- /* FALLTHROUGH */
+ fallthrough;
case UNIT_ATTENTION:
if (sdev->removable) {
sdev->changed = 1;
result = 0; /* This is no longer considered an error */
break;
}
- /* FALLTHROUGH -- for non-removable media */
+ fallthrough; /* for non-removable media */
default:
sdev_printk(KERN_INFO, sdev,
"ioctl_internal_command return code = %x\n",
}
if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
return;
- /*FALLTHRU*/
+ fallthrough;
case ACTION_REPREP:
scsi_io_completion_reprep(cmd, q);
break;
break;
case BMIC_SENSE_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
case BMIC_SENSE_SUBSYSTEM_INFORMATION:
break;
case BMIC_SET_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_WRITE_HOST_WELLNESS:
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
switch (scmd->cmnd[0]) {
case WRITE_6:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_6:
first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
break;
case WRITE_10:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_10:
first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
break;
case WRITE_12:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_12:
first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
break;
case WRITE_16:
is_write = true;
- /* fall through */
+ fallthrough;
case READ_16:
first_block = get_unaligned_be64(&scmd->cmnd[2]);
block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
if (io_request->scmd)
io_request->scmd->result = 0;
- /* fall through */
+ fallthrough;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
break;
case PQI_RESPONSE_IU_VENDOR_GENERAL:
switch (reset_status) {
case RESET_INITIATE_DRIVER:
- /* fall through */
case RESET_TIMEDOUT:
dev_info(&ctrl_info->pci_dev->dev,
"resetting controller %u\n", ctrl_info->ctrl_id);
sis_soft_reset(ctrl_info);
- /* fall through */
+ fallthrough;
case RESET_INITIATE_FIRMWARE:
rc = pqi_ofa_ctrl_restart(ctrl_info);
pqi_ofa_free_host_buffer(ctrl_info);
case 2340:
case 2352:
sector_size = 2048;
- /* fall through */
+ fallthrough;
case 2048:
cd->capacity *= 4;
- /* fall through */
+ fallthrough;
case 512:
break;
default:
switch (sense[0] & 0x7f) {
case 0x71:
s->deferred = 1;
- /* fall through */
+ fallthrough;
case 0x70:
s->fixed_format = 1;
s->flags = sense[2] & 0xe0;
break;
case 0x73:
s->deferred = 1;
- /* fall through */
+ fallthrough;
case 0x72:
s->fixed_format = 0;
ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
switch (cmd_in) {
case MTFSFM:
chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
+ fallthrough;
case MTFSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
break;
case MTBSFM:
chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
+ fallthrough;
case MTBSF:
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
case CSR_LEFT_3:
*vaddr = (dregs->bpack_lo & 0xff00) >> 8;
vaddr--;
- /* Fall through */
+ fallthrough;
case CSR_LEFT_2:
*vaddr = (dregs->bpack_hi & 0x00ff);
vaddr--;
- /* Fall through */
+ fallthrough;
case CSR_LEFT_1:
*vaddr = (dregs->bpack_hi & 0xff00) >> 8;
new = old;
break;
}
- /* fall through */
+ fallthrough;
default:
new = 0;
panic("sym_fw_bind_script: "
sym_print_addr(cp->cmd, "%s\n",
s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
}
- /* fall through */
+ fallthrough;
default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
sym_complete_error (np, cp);
break;
* Negotiation failed.
* Target does not want answer message.
*/
- /* fall through */
+ fallthrough;
case SIR_NEGO_PROTO:
sym_nego_default(np, tp, cp);
goto out;
data, len);
if (!x)
break;
- /* fall through */
+ fallthrough;
default:
x = sym_read_T93C46_nvram(np, nvram);
break;
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(dev, "Cannot claim MPHY clock.\n");
goto clk_err;
}
ktime_t timeout, time_checked;
u32 val;
- timeout = ktime_add_us(ktime_get(), ms_to_ktime(max_wait_ms));
+ timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
goto out;
}
- /* fall through */
+ fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
return err;
}
+static int ufs_intel_ehl_init(struct ufs_hba *hba)
+{
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ return 0;
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.link_startup_notify = ufs_intel_link_startup_notify,
};
+static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_ehl_init,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* ufshcd_pci_suspend - suspend power management function
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ } /* terminate list */
};
int ufshcd_hold(struct ufs_hba *hba, bool async)
{
int rc = 0;
+ bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba))
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
+ flush_result = flush_work(&hba->clk_gating.ungate_work);
+ if (hba->clk_gating.is_suspended && !flush_result)
+ goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
- /* fallthrough */
+ fallthrough;
case CLKS_OFF:
ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
* fall through to check if we should wait for this
* work to be done or not.
*/
- /* fallthrough */
+ fallthrough;
case REQ_CLKS_ON:
if (async) {
rc = -EAGAIN;
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
ufshcd_copy_sense_data(lrbp);
- /* fallthrough */
+ fallthrough;
case SAM_STAT_GOOD:
result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 |
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status;
+ u32 intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
* read, make sure we handle them by checking the interrupt status
* again in a loop until we process all of the reqs before returning.
*/
- do {
+ while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- } while (intr_status && --retries);
+ }
- if (retval == IRQ_NONE) {
+ if (enabled_intr_status && retval == IRQ_NONE) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
__func__, intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
switch (msgcode) {
case UPIU_TRANSACTION_NOP_OUT:
cmd_type = DEV_CMD_TYPE_NOP;
- /* fall through */
+ fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
ufshcd_hold(hba, false);
mutex_lock(&hba->dev_cmd.lock);
goto out;
}
- if (!(reg & (1 << tag))) {
- dev_err(hba->dev,
- "%s: cmd was completed, but without a notifying intr, tag = %d",
- __func__, tag);
- }
-
/* Print Transfer Request of aborted task */
- dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+ dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
/*
* Print detailed info about aborted request.
}
hba->req_abort_count++;
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ goto cleanup;
+ }
+
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
err = -EIO;
/* command completed already */
dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
__func__, tag);
- goto out;
+ goto cleanup;
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
goto out;
}
+cleanup:
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
* OCS FATAL ERROR with device error through sense data
*/
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
+
+ /*
+ * This quirk needs to be enabled if the host controller has
+ * auto-hibernate capability but it doesn't work.
+ */
+ UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
};
enum ufshcd_caps {
static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
{
- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
+ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
}
static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
resp->response);
- /* fall through */
+ fallthrough;
case VIRTIO_SCSI_S_FAILURE:
set_host_byte(sc, DID_ERROR);
break;
case BTSTAT_TAGREJECT:
case BTSTAT_BADMSG:
cmd->result = (DRIVER_INVALID << 24);
- /* fall through */
+ fallthrough;
case BTSTAT_HAHARDWARE:
case BTSTAT_INVPHASE:
case 1: --x;
break;
case 2: ++x;
- /* fall through */
+ fallthrough;
case 3: ++x;
}
return x;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's Closing state */
+ fallthrough; /* Missed the backend's Closing state */
case XenbusStateClosing:
scsifront_disconnect(info);
break;
debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.nmodem_supported);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 14):
qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
&qcom_socinfo->info.num_defective_parts);
debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.ndefective_parts_array_offset);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 13):
qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.nproduct_id);
DEBUGFS_ADD(info, chip_id);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 12):
qcom_socinfo->info.chip_family =
__le32_to_cpu(info->chip_family);
debugfs_create_x32("raw_device_number", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_device_num);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 11):
case SOCINFO_VERSION(0, 10):
case SOCINFO_VERSION(0, 9):
debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.foundry_id);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 8):
case SOCINFO_VERSION(0, 7):
DEBUGFS_ADD(info, pmic_model);
DEBUGFS_ADD(info, pmic_die_rev);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 6):
qcom_socinfo->info.hw_plat_subtype =
__le32_to_cpu(info->hw_plat_subtype);
debugfs_create_u32("hardware_platform_subtype", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat_subtype);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 5):
qcom_socinfo->info.accessory_chip =
__le32_to_cpu(info->accessory_chip);
debugfs_create_u32("accessory_chip", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.accessory_chip);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 4):
qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
debugfs_create_u32("platform_version", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.plat_ver);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 3):
qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
debugfs_create_u32("hardware_platform", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 2):
qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_ver);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 1):
DEBUGFS_ADD(info, build_id);
break;
case POST_RATE_CHANGE:
pmc->rate = data->new_rate;
- /* fall through */
+ fallthrough;
case ABORT_RATE_CHANGE:
mutex_unlock(&pmc->powergates_lock);
endif # SPI_SLAVE
+config SPI_DYNAMIC
+ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+
endif # SPI
switch (count) {
case 3:
*bs->rx_buf++ = (data >> 16) & 0xff;
- /* fallthrough */
+ fallthrough;
case 2:
*bs->rx_buf++ = (data >> 8) & 0xff;
- /* fallthrough */
+ fallthrough;
case 1:
*bs->rx_buf++ = (data >> 0) & 0xff;
/* fallthrough - no default */
case 1:
kfree(fsl_dummy_rx);
fsl_dummy_rx = NULL;
- /* fall through */
+ fallthrough;
default:
fsl_dummy_rx_refcnt--;
break;
switch (mspi->subblock) {
default:
dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
- /* fall through */
+ fallthrough;
case 0:
mspi->subblock = QE_CR_SUBBLOCK_SPI1;
break;
default:
dev_err(&pdev->dev,
"failed to find hwlock id, %d\n", ret);
- /* fall-through */
+ fallthrough;
case -EPROBE_DEFER:
goto put_ctlr;
}
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
{
u32 div, mbrdiv;
- div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
+ /* Ensure spi->clk_rate is even */
+ div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
/*
* SPI framework set xfer->speed_hz to master->max_speed_hz if
/**
* stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
* @spi: pointer to the spi controller data structure
+ * @xfer_len: length of the message to be transferred
*/
-static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
{
- u32 fthlv, half_fifo;
+ u32 fthlv, half_fifo, packet;
/* data packet should not exceed 1/2 of fifo space */
half_fifo = (spi->fifo_size / 2);
+ /* data_packet should not exceed transfer length */
+ if (half_fifo > xfer_len)
+ packet = xfer_len;
+ else
+ packet = half_fifo;
+
if (spi->cur_bpw <= 8)
- fthlv = half_fifo;
+ fthlv = packet;
else if (spi->cur_bpw <= 16)
- fthlv = half_fifo / 2;
+ fthlv = packet / 2;
else
- fthlv = half_fifo / 4;
+ fthlv = packet / 4;
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
else
fthlv -= (fthlv % 4); /* multiple of 4 */
+ if (!fthlv)
+ fthlv = 1;
+
return fthlv;
}
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, false);
- writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
+ writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
if (end) {
- spi_finalize_current_transfer(master);
stm32h7_spi_disable(spi);
+ spi_finalize_current_transfer(master);
}
return IRQ_HANDLED;
cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
STM32H7_SPI_CFG1_DSIZE;
- spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
+ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
fthlv = spi->cur_fthlv - 1;
cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
unsigned long flags;
unsigned int comm_type;
int nb_words, ret = 0;
+ int mbr;
spin_lock_irqsave(&spi->lock, flags);
- if (spi->cur_bpw != transfer->bits_per_word) {
- spi->cur_bpw = transfer->bits_per_word;
- spi->cfg->set_bpw(spi);
- }
-
- if (spi->cur_speed != transfer->speed_hz) {
- int mbr;
+ spi->cur_xferlen = transfer->len;
- /* Update spi->cur_speed with real clock speed */
- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
- spi->cfg->baud_rate_div_min,
- spi->cfg->baud_rate_div_max);
- if (mbr < 0) {
- ret = mbr;
- goto out;
- }
+ spi->cur_bpw = transfer->bits_per_word;
+ spi->cfg->set_bpw(spi);
- transfer->speed_hz = spi->cur_speed;
- stm32_spi_set_mbr(spi, mbr);
+ /* Update spi->cur_speed with real clock speed */
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+ spi->cfg->baud_rate_div_min,
+ spi->cfg->baud_rate_div_max);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
}
+ transfer->speed_hz = spi->cur_speed;
+ stm32_spi_set_mbr(spi, mbr);
+
comm_type = stm32_spi_communication_type(spi_dev, transfer);
- if (spi->cur_comm != comm_type) {
- ret = spi->cfg->set_mode(spi, comm_type);
+ ret = spi->cfg->set_mode(spi, comm_type);
+ if (ret < 0)
+ goto out;
- if (ret < 0)
- goto out;
-
- spi->cur_comm = comm_type;
- }
+ spi->cur_comm = comm_type;
if (spi->cfg->set_data_idleness)
spi->cfg->set_data_idleness(spi, transfer->len);
goto out;
}
- spi->cur_xferlen = transfer->len;
-
dev_dbg(spi->dev, "transfer communication mode set to %d\n",
spi->cur_comm);
dev_dbg(spi->dev,
pm_runtime_disable(&pdev->dev);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
return 0;
}
clk_disable_unprepare(spi->clk);
- return 0;
+ return pinctrl_pm_select_sleep_state(dev);
}
static int stm32_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
return clk_prepare_enable(spi->clk);
}
return ret;
ret = spi_master_resume(master);
- if (ret)
+ if (ret) {
clk_disable_unprepare(spi->clk);
+ return ret;
+ }
- return ret;
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(dev, "Unable to power device:%d\n", ret);
+ return ret;
+ }
+
+ spi->cfg->config(spi);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
}
#endif
*/
static DEFINE_MUTEX(board_lock);
+/*
+ * Prevents addition of devices with same chip select and
+ * addition of devices below an unregistering controller.
+ */
+static DEFINE_MUTEX(spi_add_lock);
+
/**
* spi_alloc_device - Allocate a new SPI device
* @ctlr: Controller to which device is connected
*/
int spi_add_device(struct spi_device *spi)
{
- static DEFINE_MUTEX(spi_add_lock);
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
goto done;
}
+ /* Controller may unregister concurrently */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
+ !device_is_registered(&ctlr->dev)) {
+ status = -ENODEV;
+ goto done;
+ }
+
/* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
struct spi_controller *found;
int id = ctlr->bus_num;
+ /* Prevent addition of new devices, unregister existing ones */
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_lock(&spi_add_lock);
+
device_for_each_child(&ctlr->dev, NULL, __unregister);
/* First make sure that this controller was ever added */
if (found == ctlr)
idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
+
+ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+ mutex_unlock(&spi_add_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
}
- /* Fall through */
+ fallthrough;
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
}
set_irq(dev, irq++);
break;
}
- /* fallthrough */
+ fallthrough;
case SSB_DEV_EXTIF:
set_irq(dev, 0);
break;
switch (bus->bustype) {
case SSB_BUSTYPE_SSB:
/* Only map the first core for now. */
- /* fallthrough... */
+ fallthrough;
case SSB_BUSTYPE_PCMCIA:
mmio = ioremap(baseaddr, SSB_CORE_SIZE);
break;
if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID) {
return MIPI_PORT1_ID + 1;
}
- /* fall through */
+ fallthrough;
default:
dev_err(isp->dev, "unsupported port: %d\n", port);
return MIPI_PORT0_ID;
return false;
}
- /* fall-through */
+ fallthrough;
case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE:
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
pipe_id == IA_CSS_PIPE_ID_PREVIEW)
return true;
return false;
- /* fall-through */
+ fallthrough;
case ATOMISP_RUN_MODE_VIDEO:
if (!asd->continuous_mode->val) {
if (pipe_id == IA_CSS_PIPE_ID_VIDEO ||
else
return false;
}
- /* fall through */
+ fallthrough;
case ATOMISP_RUN_MODE_SDV:
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
pipe_id == IA_CSS_PIPE_ID_VIDEO)
if (!atomisp_is_mbuscode_raw(asd->fmt[asd->capture_pad].fmt.code)) {
return IA_CSS_PIPE_ID_CAPTURE;
}
- /* fall through */
+ fallthrough;
case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
if (asd->yuvpp_mode)
return IA_CSS_PIPE_ID_YUVPP;
case ATOMISP_RUN_MODE_VIDEO:
return IA_CSS_PIPE_ID_VIDEO;
case ATOMISP_RUN_MODE_STILL_CAPTURE:
- /* fall through */
default:
return IA_CSS_PIPE_ID_CAPTURE;
}
case RAW_CAMERA:
dev_dbg(isp->dev, "raw_index: %d\n", raw_index);
raw_index = isp->input_cnt;
- /* fall through */
+ fallthrough;
case SOC_CAMERA:
dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt);
if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
break;
}
- /* fall through */
+ fallthrough;
/*
* if dynamic memory pool doesn't exist, need to free
#endif
pipe->stop_requested = false;
}
- /* fall through */
+ fallthrough;
case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
- /* fall-through */
+ fallthrough;
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
- /* fall-through */
+ fallthrough;
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
passthrough_cycles = incc->cycles;
break;
}
- /* fallthrough - non-passthrough RGB565 (CSI-2 bus) */
+ fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */
default:
burst_size = (image.pix.width & 0xf) ? 8 : 16;
passthrough_bits = 16;
struct v4l2_ctrl *ctrl_test;
unsigned int count;
unsigned int i;
+ int ret = 0;
list_for_each_entry(obj, &req->objects, list) {
struct vb2_buffer *vb;
if (!ctrl_test) {
v4l2_info(&ctx->dev->v4l2_dev,
"Missing required codec control\n");
- return -ENOENT;
+ ret = -ENOENT;
+ break;
}
}
v4l2_ctrl_request_hdl_put(hdl);
+ if (ret)
+ return ret;
+
return vb2_request_validate(req);
}
switch (len) {
case 4:
buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4);
- /* fall through */
+ fallthrough;
case 3:
buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3);
- /* fall through */
+ fallthrough;
case 2:
buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2);
- /* fall through */
+ fallthrough;
case 1:
buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1);
break;
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
- /* fall through */
+ fallthrough;
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
return -ECONNRESET;
- /* fall through */
+ fallthrough;
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
case ISTATE_SEND_NOPIN:
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
- /* fall through */
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
- /* fall through */
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
}
if (!protect)
return TCM_NO_SENSE;
- /* Fallthrough */
+ fallthrough;
default:
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
"PROTECT: 0x%02x\n", cdb[0], protect);
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* fall through */
+ fallthrough;
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
goto queue_full;
break;
}
- /* fall through */
+ fallthrough;
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
case FCP_PTA_ACA:
task_attr = TCM_ACA_TAG;
break;
- case FCP_PTA_SIMPLE: /* Fallthrough */
+ case FCP_PTA_SIMPLE:
default:
task_attr = TCM_SIMPLE_TAG;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
chip->temp = mili_celsius;
}
- *temp = chip->temp < 0 ? 0 : chip->temp;
+ *temp = chip->temp;
return 0;
}
p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT;
for (i = 0; i < priv->num_sensors; i++)
p2[i] = ((base1 + p2[i]) << 3);
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK);
p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT;
p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB:
case ONE_PT_CALIB2:
base1 = bkp[0] & BASE1_MASK;
p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB:
case ONE_PT_CALIB2:
base1 = calib[0] & BASE1_MASK;
p2[i] <<= 2;
p2[i] |= BIT_APPEND;
}
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
for (i = 0; i < priv->num_sensors; i++) {
p1[i] += base1;
p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT;
for (i = 0; i < priv->num_sensors; i++)
p2[i] = ((base1 + p2[i]) << 2);
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT;
p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
for (i = 0; i < priv->num_sensors; i++)
p2[i] = ((base1 + p2[i]) << 2);
- /* Fall through */
+ fallthrough;
case ONE_PT_CALIB2:
base0 = qfprom_cdata[0] & MSM8976_BASE0_MASK;
p1[0] = (qfprom_cdata[0] & MSM8976_S0_P1_MASK) >> MSM8976_S0_P1_SHIFT;
*/
void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{
- int i;
+ int i, tz_id;
const struct thermal_zone_params *tzp;
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
return;
tzp = tz->tzp;
+ tz_id = tz->id;
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node)
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
- thermal_notify_tz_delete(tz->id);
+ thermal_notify_tz_delete(tz_id);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
/*
* Temperature values in milli degree celsius
- * ADC code values from 530 to 923
+ * ADC code values from 13 to 107, see TRM
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
*/
static const int
omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
- 117000, 118000, 120000, 122000, 123000,
+ -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
+ -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
+ -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
+ 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
+ 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
+ 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
+ 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
+ 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
+ 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
+ 115000, 117000, 118500, 120000, 122000, 123500, 125000,
};
/* OMAP4430 data */
* and thresholds for OMAP4430.
*/
-/* ADC conversion table limits */
-#define OMAP4430_ADC_START_VALUE 0
-#define OMAP4430_ADC_END_VALUE 127
+/*
+ * ADC conversion table limits. Ignore values outside the TRM listed
+ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
+ */
+#define OMAP4430_ADC_START_VALUE 13
+#define OMAP4430_ADC_END_VALUE 107
/* bandgap clock limits (no control on 4430) */
#define OMAP4430_MAX_FREQ 32768
#define OMAP4430_MIN_FREQ 32768
"RX: checksum mismatch, dropping packet\n");
goto rx;
}
- /* Fall through */
+ fallthrough;
case TB_CFG_PKG_ICM_EVENT:
if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
goto rx;
if (tb_route(sw))
return 0;
- /* fallthrough */
+ fallthrough;
case 3:
ret = tb_switch_set_uuid(sw);
if (ret)
switch (rate) {
default:
WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
- /* Fallthrough */
+ fallthrough;
case 1620:
val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
break;
default:
WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
lanes);
- /* Fallthrough */
+ fallthrough;
case 1:
val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
/* Fall back to a 3 byte encoding */
word.bytes = 3;
word.word &= 0x00ffffff;
- /* Fall through */
+ fallthrough;
case 3:
/* 3 byte encoding */
word.word |= 0x82000000;
gsm_process_modem(tty, dlci, modem, clen);
tty_kref_put(tty);
}
- /* Fall through */
+ fallthrough;
case 1: /* Line state will go via DLCI 0 controls only */
default:
tty_insert_flip_string(port, data, len);
gsm->address = 0;
gsm->state = GSM_ADDRESS;
gsm->fcs = INIT_FCS;
- /* Fall through */
+ fallthrough;
case GSM_ADDRESS: /* Address continuation */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
case TCOFLUSH:
flush_tx_queue(tty);
}
- /* fall through - to default */
+ fallthrough; /* to default */
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
}
break;
case R3964_WAIT_FOR_RX_REPEAT:
- /* FALLTHROUGH */
case R3964_IDLE:
if (c == STX) {
/* Prevent rx_queue from overflow: */
break;
case UART_IER: /* IER @ 0x04 */
value &= 0x0f; /* only 4 valid bits - not Xscale */
- /* fall-through */
+ fallthrough;
case UART_DLL_EM: /* DLL @ 0x24 (+9) */
case UART_DLM_EM: /* DLM @ 0x28 (+9) */
writel(value, p->membase + (offset << 2));
.exit = pci_xr17v35x_exit,
};
+static const struct exar8250_board pbn_fastcom35x_2 = {
+ .num_ports = 2,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
+static const struct exar8250_board pbn_fastcom35x_4 = {
+ .num_ports = 4,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
+static const struct exar8250_board pbn_fastcom35x_8 = {
+ .num_ports = 8,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
.setup = pci_xr17v35x_setup,
EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x),
EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358),
EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358),
- EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_fastcom35x_2),
+ EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_fastcom35x_4),
+ EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_fastcom35x_8),
EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2),
EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
case CHIP_ID_F81866:
sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
0);
- /* fall through */
+ fallthrough;
case CHIP_ID_F81865:
sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE,
F81866_IRQ_SHARE);
break;
case 3:
offset = board->uart_offset;
- /* FALLTHROUGH */
+ fallthrough;
case 4: /* BAR 2 */
case 5: /* BAR 3 */
case 6: /* BAR 4 */
switch (iir & 0x3f) {
case UART_IIR_RX_TIMEOUT:
serial8250_rx_dma_flush(up);
- /* fall-through */
+ fallthrough;
case UART_IIR_RLSI:
return true;
}
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
+
+ if (port->irqflags & IRQF_SHARED)
+ disable_irq_nosync(port->irq);
+
/*
* Test for UARTs that do not reassert THRE when the
* transmitter is idle and the interrupt has already
* allow register changes to become visible.
*/
spin_lock_irqsave(&port->lock, flags);
- if (up->port.irqflags & IRQF_SHARED)
- disable_irq_nosync(port->irq);
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+
if (port->irqflags & IRQF_SHARED)
enable_irq(port->irq);
- spin_unlock_irqrestore(&port->lock, flags);
/*
* If the interrupt is not reasserted, or we otherwise
break;
case UART_LCR:
valshift = 8;
- /* fall through */
+ fallthrough;
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
case UART_SCR:
/* No SCR for this hardware. Use CHAR as a scratch register */
valshift = 8;
- /* fall through */
+ fallthrough;
case UART_FCR:
offset = UNIPHIER_UART_CHAR_FCR;
break;
valshift = 8;
/* Divisor latch access bit does not exist. */
value &= ~UART_LCR_DLAB;
- /* fall through */
+ fallthrough;
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
config SERIAL_IMX_EARLYCON
bool "Earlycon on IMX serial port"
+ depends on ARCH_MXC || COMPILE_TEST
depends on OF
select SERIAL_EARLYCON
help
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
obj-$(CONFIG_SERIAL_IMX) += imx.o
+obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o
obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
obj-$(CONFIG_SERIAL_ICOM) += icom.o
obj-$(CONFIG_SERIAL_MESON) += meson_uart.o
clk_disable(uap->clk);
}
-static void __init
-pl011_console_get_options(struct uart_amba_port *uap, int *baud,
- int *parity, int *bits)
+static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+ int *parity, int *bits)
{
if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
unsigned int lcr_h, ibrd, fbrd;
}
}
-static int __init pl011_console_setup(struct console *co, char *options)
+static int pl011_console_setup(struct console *co, char *options)
{
struct uart_amba_port *uap;
int baud = 38400;
*
* Returns 0 if console matches; otherwise non-zero to use default matching
*/
-static int __init pl011_console_match(struct console *co, char *name, int idx,
- char *options)
+static int pl011_console_match(struct console *co, char *name, int idx,
+ char *options)
{
unsigned char iotype;
resource_size_t addr;
static int pl011_register_port(struct uart_amba_port *uap)
{
- int ret;
+ int ret, i;
/* Ensure interrupts from this UART are masked and cleared */
pl011_write(0, uap, REG_IMSC);
if (ret < 0) {
dev_err(uap->port.dev,
"Failed to register AMBA-PL011 driver\n");
+ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
+ if (amba_ports[i] == uap)
+ amba_ports[i] = NULL;
return ret;
}
}
version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
case 0x814: /* sama5d2 */
- /* fall through */
+ fallthrough;
case 0x701: /* sama5d4 */
atmel_port->fidi_min = 3;
atmel_port->fidi_max = 65535;
transmit_chars(up, lsr);
break;
case UART_IIR_RX_TIMEOUT:
- /* FALLTHROUGH */
case UART_IIR_RDI:
serial_omap_rdi(up, lsr);
break;
/* simply try again */
break;
case UART_IIR_XOFF:
- /* FALLTHROUGH */
default:
break;
}
return NO_POLL_CHAR;
if (word_cnt == 1 && (status & RX_LAST))
+ /*
+ * NOTE: If RX_LAST_BYTE_VALID is 0 it needs to be
+ * treated as if it was BYTES_PER_FIFO_WORD.
+ */
private_data->poll_cached_bytes_cnt =
(status & RX_LAST_BYTE_VALID_MSK) >>
RX_LAST_BYTE_VALID_SHFT;
- else
- private_data->poll_cached_bytes_cnt = 4;
+
+ if (private_data->poll_cached_bytes_cnt == 0)
+ private_data->poll_cached_bytes_cnt = BYTES_PER_FIFO_WORD;
private_data->poll_cached_bytes =
readl(uport->membase + SE_GENI_RX_FIFOn);
}
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
-static int __init qcom_geni_console_setup(struct console *co, char *options)
+static int qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
case CS5:
case CS6:
dev_warn(port->dev, "bit size not supported, using 7 bits\n");
- /* Fall through */
+ fallthrough;
case CS7:
ctrl &= ~RDA_UART_DBITS_8;
break;
ourport->tx_irq = ret + 1;
}
- ret = platform_get_irq(platdev, 1);
- if (ret > 0)
- ourport->tx_irq = ret;
+ if (!s3c24xx_serial_has_interrupt_mask(port)) {
+ ret = platform_get_irq(platdev, 1);
+ if (ret > 0)
+ ourport->tx_irq = ret;
+ }
/*
* DMA is currently supported only on DT platforms, if DMA properties
* are specified.
tegra_uart_write(tup, ier, UART_IER);
break;
}
- /* Fall through */
+ fallthrough;
case 2: /* Receive */
if (!tup->use_rx_pio) {
is_rx_start = tup->rx_in_progress;
switch (parity) {
case 'o': case 'O':
termios.c_cflag |= PARODD;
- /*fall through*/
+ fallthrough;
case 'e': case 'E':
termios.c_cflag |= PARENB;
break;
return ret;
if (stm32port->info->cfg.has_wakeup) {
- stm32port->wakeirq = platform_get_irq(pdev, 1);
+ stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
return stm32port->wakeirq ? : -ENODEV;
}
switch (ret) {
case 2:
sunsu_change_mouse_baud(up);
- /* fallthru */
+ fallthrough;
case 1:
break;
switch (ret) {
case 2:
sunzilog_change_mouse_baud(up);
- /* fallthru */
+ fallthrough;
case 1:
break;
cdns_uart->baud = cdns_uart_set_baud_rate(cdns_uart->port,
cdns_uart->baud);
- /* fall through */
+ fallthrough;
case ABORT_RATE_CHANGE:
if (!locked)
spin_lock_irqsave(&cdns_uart->port->lock, flags);
ld->ops->flush_buffer(tty);
tty_unthrottle(tty);
}
- /* fall through */
+ fallthrough;
case TCOFLUSH:
tty_driver_flush_buffer(tty);
break;
unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
- unsigned short *newscreen;
+ unsigned short *oldscreen, *newscreen;
struct uni_screen *new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
if (new_scr_end > new_origin)
scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
new_scr_end - new_origin);
- kfree(vc->vc_screenbuf);
+ oldscreen = vc->vc_screenbuf;
vc->vc_screenbuf = newscreen;
vc->vc_screenbuf_size = new_screen_size;
set_origin(vc);
+ kfree(oldscreen);
/* do part of a reset_terminal() */
vc->vc_top = 0;
break;
case 3: /* include scrollback */
flush_scrollback(vc);
- /* fallthrough */
+ fallthrough;
case 2: /* erase whole display */
vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
count = vc->vc_cols * vc->vc_rows;
lf(vc);
if (!is_kbd(vc, lnm))
return;
- /* fall through */
+ fallthrough;
case 13:
cr(vc);
return;
return;
}
vc->vc_priv = EPecma;
- /* fall through */
+ fallthrough;
case ESgetpars:
if (c == ';' && vc->vc_npar < NPAR - 1) {
vc->vc_npar++;
console_lock();
vcp = vc_cons[i].d;
if (vcp) {
+ int ret;
+ int save_scan_lines = vcp->vc_scan_lines;
+ int save_font_height = vcp->vc_font.height;
+
if (v.v_vlin)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vcp->vc_font.height = v.v_clin;
vcp->vc_resize_user = 1;
- vc_resize(vcp, v.v_cols, v.v_rows);
+ ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ if (ret) {
+ vcp->vc_scan_lines = save_scan_lines;
+ vcp->vc_font.height = save_font_height;
+ console_unlock();
+ return ret;
+ }
}
console_unlock();
}
if (ret)
return ret;
break;
- } /* else fallthrough */
+ }
+ fallthrough;
case STATUS_STAGE:
pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
if (current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
- if (acm->nb_size) {
- kfree(acm->notification_buffer);
- acm->nb_size = 0;
- }
+ u8 *new_buffer;
alloc_size = roundup_pow_of_two(expected_size);
- /*
- * kmalloc ensures a valid notification_buffer after a
- * use of kfree in case the previous allocation was too
- * small. Final freeing is done on disconnect.
- */
- acm->notification_buffer =
- kmalloc(alloc_size, GFP_ATOMIC);
- if (!acm->notification_buffer)
+ /* Final freeing is done on disconnect. */
+ new_buffer = krealloc(acm->notification_buffer,
+ alloc_size, GFP_ATOMIC);
+ if (!new_buffer) {
+ acm->nb_index = 0;
goto exit;
+ }
+
+ acm->notification_buffer = new_buffer;
acm->nb_size = alloc_size;
+ dr = (struct usb_cdc_notification *)acm->notification_buffer;
}
copy_size = min(current_size,
return 0;
}
+static bool is_dev_usb_generic_driver(struct device *dev)
+{
+ struct usb_device_driver *udd = dev->driver ?
+ to_usb_device_driver(dev->driver) : NULL;
+
+ return udd == &usb_generic_driver;
+}
+
+static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
+{
+ struct usb_device_driver *new_udriver = data;
+ struct usb_device *udev;
+ int ret;
+
+ if (!is_dev_usb_generic_driver(dev))
+ return 0;
+
+ udev = to_usb_device(dev);
+ if (usb_device_match_id(udev, new_udriver->id_table) == NULL &&
+ (!new_udriver->match || new_udriver->match(udev) != 0))
+ return 0;
+
+ ret = device_reprobe(dev);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to reprobe device (error %d)\n", ret);
+
+ return 0;
+}
+
/**
* usb_register_device_driver - register a USB device (not interface) driver
* @new_udriver: USB operations for the device driver
retval = driver_register(&new_udriver->drvwrap.driver);
- if (!retval)
+ if (!retval) {
pr_info("%s: registered new device driver %s\n",
usbcore_name, new_udriver->name);
- else
+ /*
+ * Check whether any device could be better served with
+ * this new driver
+ */
+ bus_for_each_dev(&usb_bus_type, NULL, new_udriver,
+ __usb_bus_reprobe_drivers);
+ } else {
printk(KERN_ERR "%s: error %d registering device "
" driver %s\n",
usbcore_name, retval, new_udriver->name);
+ }
return retval;
}
udrv = to_usb_device_driver(drv);
if (udrv == &usb_generic_driver)
return 0;
-
- return usb_device_match_id(udev, udrv->id_table) != NULL;
+ if (usb_device_match_id(udev, udrv->id_table) != NULL)
+ return 1;
+ return (udrv->match && udrv->match(udev));
}
static bool usb_generic_driver_match(struct usb_device *udev)
void usb_hcd_pci_remove(struct pci_dev *dev)
{
struct usb_hcd *hcd;
+ int hcd_driver_flags;
hcd = pci_get_drvdata(dev);
if (!hcd)
return;
+ hcd_driver_flags = hcd->driver->flags;
+
if (pci_dev_run_wake(dev))
pm_runtime_get_noresume(&dev->dev);
up_read(&companions_rwsem);
}
usb_put_hcd(hcd);
- if ((hcd->driver->flags & HCD_MASK) < HCD_USB3)
+ if ((hcd_driver_flags & HCD_MASK) < HCD_USB3)
pci_free_irq_vectors(dev);
pci_disable_device(dev);
}
if ((++hub->nerrors < 10) || hub->error)
goto resubmit;
hub->error = status;
- /* FALL THROUGH */
+ fallthrough;
/* let hub_wq handle things */
case 0: /* we got data: port status changed */
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
+ /* Sound Devices MixPre-D */
+ { USB_DEVICE(0x0926, 0x0208), .driver_info =
+ USB_QUIRK_ENDPOINT_IGNORE },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
{ USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
*/
static const struct usb_device_id usb_endpoint_ignore[] = {
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
+ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
{ }
};
if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
- /* FALLTHROUGH */
default:
break;
}
default:
dev_err(dev, "invalid maximum_speed parameter %d\n",
dwc->maximum_speed);
- /* fall through */
+ fallthrough;
case USB_SPEED_UNKNOWN:
/* default to superspeed */
dwc->maximum_speed = USB_SPEED_SUPER;
* dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared
* @req: dwc3_request pointer
+ * @trb_length: buffer size of the TRB
* @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type.
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- struct dwc3_request *req, unsigned chain, unsigned node)
+ struct dwc3_request *req, unsigned int trb_length,
+ unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
- unsigned int length;
dma_addr_t dma;
unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt;
unsigned is_last = req->request.is_last;
- if (req->request.num_sgs > 0) {
- length = sg_dma_len(req->start_sg);
+ if (req->request.num_sgs > 0)
dma = sg_dma_address(req->start_sg);
- } else {
- length = req->request.length;
+ else
dma = req->request.dma;
- }
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
+ __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
stream_id, short_not_ok, no_interrupt, is_last);
}
struct scatterlist *sg = req->start_sg;
struct scatterlist *s;
int i;
-
+ unsigned int length = req->request.length;
unsigned int remaining = req->request.num_mapped_sgs
- req->num_queued_sgs;
+ /*
+ * If we resume preparing the request, then get the remaining length of
+ * the request and resume where we left off.
+ */
+ for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
+ length -= sg_dma_len(s);
+
for_each_sg(sg, s, remaining, i) {
- unsigned int length = req->request.length;
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp;
+ unsigned int trb_length;
unsigned chain = true;
+ trb_length = min_t(unsigned int, length, sg_dma_len(s));
+
+ length -= trb_length;
+
/*
* IOMMU driver is coalescing the list of sgs which shares a
* page boundary into one and giving it to USB driver. With
* sgs passed. So mark the chain bit to false if it isthe last
* mapped sg.
*/
- if (i == remaining - 1)
+ if ((i == remaining - 1) || !length)
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, i);
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
+ } else if (req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ !rem && !chain) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->needs_extra_trb = true;
+
+ /* Prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
+
+ /* Prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+ !req->direction, 1,
+ req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt,
+ req->request.is_last);
+
+ /* Prepare one more TRB to handle MPS alignment */
+ if (!req->direction) {
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
+ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt,
+ req->request.is_last);
+ }
} else {
- dwc3_prepare_one_trb(dep, req, chain, i);
+ dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
}
/*
req->num_queued_sgs++;
+ /*
+ * The number of pending SG entries may not correspond to the
+ * number of mapped SG entries. If all the data are queued, then
+ * don't include unused SG entries.
+ */
+ if (length == 0) {
+ req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
+ break;
+ }
+
if (!dwc3_calc_trbs_left(dep))
break;
}
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, 0);
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
req->request.no_interrupt,
req->request.is_last);
} else if (req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
req->needs_extra_trb = true;
/* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, true, 0);
+ dwc3_prepare_one_trb(dep, req, length, true, 0);
- /* Now prepare one extra TRB to handle ZLP */
+ /* Prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue];
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
- false, 1, req->request.stream_id,
+ !req->direction, 1, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt,
req->request.is_last);
+
+ /* Prepare one more TRB to handle MPS alignment for OUT */
+ if (!req->direction) {
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
+ false, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt,
+ req->request.is_last);
+ }
} else {
- dwc3_prepare_one_trb(dep, req, false, 0);
+ dwc3_prepare_one_trb(dep, req, length, false, 0);
}
}
status);
if (req->needs_extra_trb) {
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
+
+ /* Reclaim MPS padding TRB for ZLP */
+ if (!req->direction && req->request.zero && req->request.length &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ (IS_ALIGNED(req->request.length, maxp)))
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
+
req->needs_extra_trb = false;
}
case RELEASE:
case RESERVE:
case SEND_DIAGNOSTIC:
- fallthrough;
default:
unknown_cmnd:
int ndp_index;
unsigned dg_len, dg_len2;
unsigned ndp_len;
+ unsigned block_len;
struct sk_buff *skb2;
int ret = -EINVAL;
- unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+ unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+ unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
+ bool ndp_after_header;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
}
tmp++; /* skip wSequence */
+ block_len = get_ncm(&tmp, opts->block_length);
/* (d)wBlockLength */
- if (get_ncm(&tmp, opts->block_length) > max_size) {
+ if (block_len > ntb_max) {
INFO(port->func.config->cdev, "OUT size exceeded\n");
goto err;
}
ndp_index = get_ncm(&tmp, opts->ndp_index);
+ ndp_after_header = false;
/* Run through all the NDP's in the NTB */
do {
- /* NCM 3.2 */
- if (((ndp_index % 4) != 0) &&
- (ndp_index < opts->nth_size)) {
+ /*
+ * NCM 3.2
+ * dwNdpIndex
+ */
+ if (((ndp_index % 4) != 0) ||
+ (ndp_index < opts->nth_size) ||
+ (ndp_index > (block_len -
+ opts->ndp_size))) {
INFO(port->func.config->cdev, "Bad index: %#X\n",
ndp_index);
goto err;
}
+ if (ndp_index == opts->nth_size)
+ ndp_after_header = true;
- /* walk through NDP */
+ /*
+ * walk through NDP
+ * dwSignature
+ */
tmp = (void *)(skb->data + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
ndp_len = get_unaligned_le16(tmp++);
/*
* NCM 3.3.1
+ * wLength
* entry is 2 items
* item size is 16/32 bits, opts->dgram_item_len * 2 bytes
* minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
* Each entry is a dgram index and a dgram length.
*/
if ((ndp_len < opts->ndp_size
- + 2 * 2 * (opts->dgram_item_len * 2))
- || (ndp_len % opts->ndplen_align != 0)) {
+ + 2 * 2 * (opts->dgram_item_len * 2)) ||
+ (ndp_len % opts->ndplen_align != 0)) {
INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
ndp_len);
goto err;
do {
index = index2;
+ /* wDatagramIndex[0] */
+ if ((index < opts->nth_size) ||
+ (index > block_len - opts->dpe_size)) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index);
+ goto err;
+ }
+
dg_len = dg_len2;
- if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */
+ /*
+ * wDatagramLength[0]
+ * ethernet hdr + crc or larger than max frame size
+ */
+ if ((dg_len < 14 + crc_len) ||
+ (dg_len > frame_max)) {
INFO(port->func.config->cdev,
"Bad dgram length: %#X\n", dg_len);
goto err;
index2 = get_ncm(&tmp, opts->dgram_item_len);
dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+ if (index2 == 0 || dg_len2 == 0)
+ break;
+
+ /* wDatagramIndex[1] */
+ if (ndp_after_header) {
+ if (index2 < opts->nth_size + opts->ndp_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+ } else {
+ if (index2 < opts->nth_size + opts->dpe_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+ }
+ if (index2 > block_len - opts->dpe_size) {
+ INFO(port->func.config->cdev,
+ "Bad index: %#X\n", index2);
+ goto err;
+ }
+
+ /* wDatagramLength[1] */
+ if ((dg_len2 < 14 + crc_len) ||
+ (dg_len2 > frame_max)) {
+ INFO(port->func.config->cdev,
+ "Bad dgram length: %#X\n", dg_len);
+ goto err;
+ }
+
/*
* Copy the data into a new skb.
* This ensures the truesize is correct
ndp_len -= 2 * (opts->dgram_item_len * 2);
dgram_counter++;
-
- if (index2 == 0 || dg_len2 == 0)
- break;
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
goto err_sts;
return 0;
+
err_sts:
- usb_ep_free_request(fu->ep_status, stream->req_status);
- stream->req_status = NULL;
-err_out:
usb_ep_free_request(fu->ep_out, stream->req_out);
stream->req_out = NULL;
+err_out:
+ usb_ep_free_request(fu->ep_in, stream->req_in);
+ stream->req_in = NULL;
out:
return -ENOMEM;
}
#define __U_F_H__
#include <linux/usb/gadget.h>
+#include <linux/overflow.h>
/* Variable Length Array Macros **********************************************/
#define vla_group(groupname) size_t groupname##__next = 0
#define vla_item(groupname, type, name, n) \
size_t groupname##_##name##__offset = ({ \
- size_t align_mask = __alignof__(type) - 1; \
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
- size_t size = (n) * sizeof(type); \
- groupname##__next = offset + size; \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t size = array_size(n, sizeof(type)); \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, size, \
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
offset; \
})
#define vla_item_with_sz(groupname, type, name, n) \
- size_t groupname##_##name##__sz = (n) * sizeof(type); \
- size_t groupname##_##name##__offset = ({ \
- size_t align_mask = __alignof__(type) - 1; \
- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
- size_t size = groupname##_##name##__sz; \
- groupname##__next = offset + size; \
- offset; \
+ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, groupname##_##name##__sz,\
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
})
#define vla_ptr(ptr, groupname, name) \
switch (fifo_mode) {
default:
fifo_mode = 0;
- /* fall through */
+ fallthrough;
case 0:
udc->fifo_cfg = NULL;
n = 0;
break;
case FSL_USB2_PHY_UTMI_WIDE:
portctrl |= PORTSCX_PTW_16BIT;
- /* fall through */
+ fallthrough;
case FSL_USB2_PHY_UTMI:
case FSL_USB2_PHY_UTMI_DUAL:
if (udc->pdata->have_sysif_regs) {
case PXA250_A0:
case PXA250_A1:
/* A0/A1 "not released"; ep 13, 15 unusable */
- /* fall through */
+ fallthrough;
case PXA250_B2: case PXA210_B2:
case PXA250_B1: case PXA210_B1:
case PXA250_B0: case PXA210_B0:
/* OUT-DMA is broken ... */
- /* fall through */
+ fallthrough;
case PXA250_C0: case PXA210_C0:
break;
#elif defined(CONFIG_ARCH_IXP4XX)
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp116x->lock, flags);
- /* fall through */
+ fallthrough;
case C_HUB_LOCAL_POWER:
DBG("C_HUB_LOCAL_POWER\n");
break;
isp116x_write_reg32(isp116x, HCCONTROL,
(val & ~HCCONTROL_HCFS) |
HCCONTROL_USB_RESET);
- /* fall through */
+ fallthrough;
case HCCONTROL_USB_RESET:
ret = -EBUSY;
- /* fall through */
+ fallthrough;
default: /* HCCONTROL_USB_SUSPEND */
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}
ehci_bios_handoff(pdev, op_reg_base, cap, offset);
break;
case 0: /* Illegal reserved cap, set cap=0 so we exit */
- cap = 0; /* fall through */
+ cap = 0;
+ fallthrough;
default:
dev_warn(&pdev->dev,
"EHCI: unrecognized capability %02x\n",
switch (comp_code) {
case COMP_SUCCESS:
remain_length = 0;
- /* FALLTHROUGH */
+ fallthrough;
case COMP_SHORT_PACKET:
status = 0;
break;
static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
{
- int dci;
+ int ep_index;
dma_addr_t dma;
struct xhci_hcd *xhci;
struct xhci_ep_ctx *ep_ctx;
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
- for (dci = 1; dci < 32; dci++) {
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
- dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
+ for (ep_index = 0; ep_index < 31; ep_index++) {
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
{
u32 pls = status_reg & PORT_PLS_MASK;
- /* resume state is a xHCI internal state.
- * Do not report it to usb core, instead, pretend to be U3,
- * thus usb core knows it's not ready for transfer
- */
- if (pls == XDEV_RESUME) {
- *status |= USB_SS_PORT_LS_U3;
- return;
- }
-
/* When the CAS bit is set then warm reset
* should be performed on port
*/
pls |= USB_PORT_STAT_CONNECTION;
} else {
/*
+ * Resume state is an xHCI internal state. Do not report it to
+ * usb core, instead, pretend to be U3, thus usb core knows
+ * it's not ready for transfer.
+ */
+ if (pls == XDEV_RESUME) {
+ *status |= USB_SS_PORT_LS_U3;
+ return;
+ }
+
+ /*
* If CAS bit isn't set but the Port is already at
* Compliance Mode, fake a connection so the USB core
* notices the Compliance state and resets the port.
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
- /* fall through */
+ fallthrough;
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION:
interval = xhci_parse_microframe_interval(udev, ep);
break;
}
- /* Fall through - SS and HS isoc/int have same decoding */
+ fallthrough; /* SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
* since it uses the same rules as low speed interrupt
* endpoints.
*/
- /* fall through */
+ fallthrough;
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
#define RENESAS_RETRY 10000
#define RENESAS_DELAY 10
-#define ROM_VALID_01 0x2013
-#define ROM_VALID_02 0x2026
-
-static int renesas_verify_fw_version(struct pci_dev *pdev, u32 version)
-{
- switch (version) {
- case ROM_VALID_01:
- case ROM_VALID_02:
- return 0;
- }
- dev_err(&pdev->dev, "FW has invalid version :%d\n", version);
- return -EINVAL;
-}
-
static int renesas_fw_download_image(struct pci_dev *dev,
const u32 *fw, size_t step, bool rom)
{
version &= RENESAS_FW_VERSION_FIELD;
version = version >> RENESAS_FW_VERSION_OFFSET;
-
- err = renesas_verify_fw_version(pdev, version);
- if (err)
- return err;
+ dev_dbg(&pdev->dev, "Found ROM version: %x\n", version);
/*
* Test if ROM is present and loaded, if so we can skip everything
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
trb_comp_code, ep_index);
- /* else fall through */
+ fallthrough;
case COMP_STALL_ERROR:
/* Did we transfer part of the data (middle) phase? */
if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
unsigned int i, phy_count = 0;
for (i = 0; i < tegra->soc->num_types; i++) {
- if (!strncmp(tegra->soc->phy_types[i].name, "usb2",
+ if (!strncmp(tegra->soc->phy_types[i].name, name,
strlen(name)))
return tegra->phys[phy_count+port];
INIT_WORK(&tegra->id_work, tegra_xhci_id_work);
tegra->id_nb.notifier_call = tegra_xhci_id_notify;
+ tegra->otg_usb2_port = -EINVAL;
+ tegra->otg_usb3_port = -EINVAL;
for (i = 0; i < tegra->num_usb_phys; i++) {
struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
wait_for_completion(cfg_cmd->completion);
- ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
xhci_free_command(xhci, cfg_cmd);
cleanup:
xhci_free_command(xhci, stop_cmd);
+ if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
+ ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
}
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
break;
}
/* Otherwise the calculation is the same as isoc eps */
- /* fall through */
+ fallthrough;
case USB_ENDPOINT_XFER_ISOC:
timeout_ns = xhci_service_interval_to_ns(desc);
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
- return ret;
+ return ret < 0 ? ret : -EINVAL;
}
/* submit urb to poll interrupt endpoint */
prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
dev->cntl_buffer[0]);
- retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
if (retval >= 0)
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
musb_dbg(musb, "%cX DMA%d not allocated!",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_DMA_STATUS_FREE:
break;
}
case OTG_STATE_B_IDLE:
if (!musb->is_active)
break;
- /* fall through */
+ fallthrough;
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
musb->is_active = musb->g.b_hnp_enable;
case OTG_STATE_A_PERIPHERAL:
musb_hnp_stop(musb);
musb_root_disconnect(musb);
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_B_WAIT_ACON:
- /* FALLTHROUGH */
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_g_disconnect(musb);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
musb_g_reset(musb);
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
/* never use invalid T(a_wait_bcon) */
musb_dbg(musb, "HNP: in %s, %d msec timeout",
break;
case OTG_STATE_B_IDLE:
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_B_PERIPHERAL:
musb_g_reset(musb);
break;
switch (fifo_mode) {
default:
fifo_mode = 0;
- /* FALLTHROUGH */
+ fallthrough;
case 0:
cfg = mode_0_cfg;
n = ARRAY_SIZE(mode_0_cfg);
musb->quirk_retries--;
return;
}
- /* fall through */
+ fallthrough;
case MUSB_QUIRK_A_DISCONNECT_19:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
dsps_mod_timer_optional(glue);
break;
}
- /* fall through */
+ fallthrough;
case OTG_STATE_A_WAIT_BCON:
/* keep VBUS on for host-only mode */
}
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
- /* fall through */
+ fallthrough;
case OTG_STATE_A_IDLE:
case OTG_STATE_B_IDLE:
case USB_SPEED_SUPER:
dev_warn(dev, "ignore incorrect maximum_speed "
"(super-speed) setting in dts");
- /* fall through */
+ fallthrough;
default:
config->maximum_speed = USB_SPEED_HIGH;
}
musb_writeb(mbase, MUSB_TESTMODE,
musb->test_mode_nr);
}
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_EP0_STAGE_STATUSOUT:
/* end of sequence #1: write to host (TX state) */
*/
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_EP0_STAGE_SETUP:
setup:
qh = first_qh(head);
break;
}
- /* fall through */
+ fallthrough;
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
- /* FALLTHROUGH */
+ fallthrough;
case MUSB_EP0_OUT:
fifo_count = min_t(size_t, qh->maxpacket,
urb->transfer_buffer_length -
interval = max_t(u8, epd->bInterval, 1);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case USB_ENDPOINT_XFER_ISOC:
/* ISO always uses logarithmic encoding */
interval = min_t(u8, epd->bInterval, 16);
musb->g.is_a_peripheral = 1;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_A_HOST:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
musb->is_active = 0;
if (error)
break;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- /* Fall through */
+ fallthrough;
case OTG_STATE_A_WAIT_VRISE:
case OTG_STATE_A_WAIT_BCON:
case OTG_STATE_A_HOST:
dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
usb_otg_state_string(musb->xceiv->otg->state));
}
- /* FALLTHROUGH */
+ fallthrough;
case OTG_STATE_A_IDLE:
tusb_musb_set_vbus(musb, 0);
default:
/* Wait for PHY to reset */
usleep_range(30, 300);
+ reg = readl(priv->base + REG_USBPCR_OFFSET);
writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET);
usleep_range(300, 1000);
case 0x64:
info->pageshift = 8;
info->smallpageshift = 1;
- /* fall through */
+ fallthrough;
case 0x5d: // 5d is a ROM card with pagesize 512.
return 0x00200000;
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
- /* fall through */
+ fallthrough;
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
case DMA_NONE:
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_BROKEN_FUA ),
+ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ),
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/* Reported-by: Till Dörges <doerges@pre-sense.de> */
+UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
+ "Sony",
+ "PSZ-HA*",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Julian Groß <julian.g@posteo.de> */
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
"LaCie",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
case 0x3:
if (sink)
return TYPEC_CC_RP_3_0;
- /* fall through */
+ fallthrough;
case 0x0:
default:
return TYPEC_CC_OPEN;
tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
break;
case SRC_HARD_RESET_VBUS_OFF:
- tcpm_set_vconn(port, true);
+ /*
+ * 7.1.5 Response to Hard Resets
+ * Hard Reset Signaling indicates a communication failure has occurred and the
+ * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
+ * drive VBUS to vSafe0V as shown in Figure 7-9.
+ */
+ tcpm_set_vconn(port, false);
tcpm_set_vbus(port, false);
tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
tcpm_data_role_for_source(port));
- tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
+ /*
+ * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
+ * PD_T_SRC_RECOVER before turning vbus back on.
+ * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
+ * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
+ * tells the Device Policy Manager to instruct the power supply to perform a
+ * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
+ * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
+ * re-establish communication with the Sink and resume USB Default Operation.
+ * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
+ */
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
break;
case SRC_HARD_RESET_VBUS_ON:
+ tcpm_set_vconn(port, true);
tcpm_set_vbus(port, true);
port->tcpc->set_pd_rx(port->tcpc, true);
tcpm_set_attached_state(port, true);
tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
break;
case SRC_HARD_RESET_VBUS_OFF:
- tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
+ /*
+ * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
+ * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
+ */
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
break;
case HARD_RESET_SEND:
break;
struct typec_altmode *alt;
struct ucsi_dp *dp;
- mutex_lock(&con->lock);
-
/* We can't rely on the firmware with the capabilities. */
desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
desc->vdo |= all_assignments << 16;
alt = typec_port_register_altmode(con->port, desc);
- if (IS_ERR(alt)) {
- mutex_unlock(&con->lock);
+ if (IS_ERR(alt))
return alt;
- }
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp) {
typec_unregister_altmode(alt);
- mutex_unlock(&con->lock);
return ERR_PTR(-ENOMEM);
}
alt->ops = &ucsi_displayport_ops;
typec_altmode_set_drvdata(alt, dp);
- mutex_unlock(&con->lock);
-
return alt;
}
return UCSI_CCI_LENGTH(cci);
}
-static int ucsi_run_command(struct ucsi *ucsi, u64 command,
- void *data, size_t size)
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ void *data, size_t size)
{
u8 length;
int ret;
+ mutex_lock(&ucsi->ppm_lock);
+
ret = ucsi_exec_command(ucsi, command);
if (ret < 0)
- return ret;
+ goto out;
length = ret;
if (data) {
ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
- return ret;
+ goto out;
}
ret = ucsi_acknowledge_command(ucsi);
if (ret)
- return ret;
-
- return length;
-}
-
-int ucsi_send_command(struct ucsi *ucsi, u64 command,
- void *retval, size_t size)
-{
- int ret;
+ goto out;
- mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, command, retval, size);
+ ret = length;
+out:
mutex_unlock(&ucsi->ppm_lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_send_command);
int i;
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
+ ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt));
+ len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt));
/*
* We are collecting all altmodes first and then registering.
* Some type-C device will return zero length data beyond last
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
+ len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
if (len <= 0)
return len;
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
command |= UCSI_GET_PDOS_SRC_PDOS;
- ret = ucsi_run_command(ucsi, command, con->src_pdos,
+ ret = ucsi_send_command(ucsi, command, con->src_pdos,
sizeof(con->src_pdos));
if (ret < 0) {
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
*/
command = UCSI_GET_CAM_SUPPORTED;
command |= UCSI_CONNECTOR_NUMBER(con->num);
- ucsi_run_command(con->ucsi, command, NULL, 0);
+ ucsi_send_command(con->ucsi, command, NULL, 0);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
u32 cci;
int ret;
+ mutex_lock(&ucsi->ppm_lock);
+
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
sizeof(command));
if (ret < 0)
- return ret;
+ goto out;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- if (time_is_before_jiffies(tmo))
- return -ETIMEDOUT;
+ if (time_is_before_jiffies(tmo)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
- return ret;
+ goto out;
/* If the PPM is still doing something else, reset it again. */
if (cci & ~UCSI_CCI_RESET_COMPLETE) {
&command,
sizeof(command));
if (ret < 0)
- return ret;
+ goto out;
}
msleep(20);
} while (!(cci & UCSI_CCI_RESET_COMPLETE));
- return 0;
+out:
+ mutex_unlock(&ucsi->ppm_lock);
+ return ret;
}
static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
u64 c;
/* PPM most likely stopped responding. Resetting everything. */
- mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
- mutex_unlock(&con->ucsi->ppm_lock);
c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
ucsi_send_command(con->ucsi, c, NULL, 0);
con->num = index + 1;
con->ucsi = ucsi;
+ /* Delay other interactions with the con until registration is complete */
+ mutex_lock(&con->lock);
+
/* Get connector capability */
command = UCSI_GET_CONNECTOR_CAPABILITY;
command |= UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
+ ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
- return ret;
+ goto out;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
cap->data = TYPEC_PORT_DRD;
ret = ucsi_register_port_psy(con);
if (ret)
- return ret;
+ goto out;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
- if (IS_ERR(con->port))
- return PTR_ERR(con->port);
+ if (IS_ERR(con->port)) {
+ ret = PTR_ERR(con->port);
+ goto out;
+ }
/* Alternate modes */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
- if (ret)
+ if (ret) {
dev_err(ucsi->dev, "con%d: failed to register alt modes\n",
con->num);
+ goto out;
+ }
/* Get the status */
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_run_command(ucsi, command, &con->status,
- sizeof(con->status));
+ ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
- return 0;
+ ret = 0;
+ goto out;
}
+ ret = 0; /* ucsi_send_command() returns length on success */
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
- if (ret)
+ if (ret) {
dev_err(ucsi->dev,
"con%d: failed to register alternate modes\n",
con->num);
- else
+ ret = 0;
+ } else {
ucsi_altmode_update_active(con);
+ }
}
trace_ucsi_register_port(con->num, &con->status);
- return 0;
+out:
+ mutex_unlock(&con->lock);
+ return ret;
}
/**
int ret;
int i;
- mutex_lock(&ucsi->ppm_lock);
-
/* Reset the PPM */
ret = ucsi_reset_ppm(ucsi);
if (ret) {
/* Enable basic notifications */
ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
- ret = ucsi_run_command(ucsi, command, NULL, 0);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
command = UCSI_GET_CAPABILITY;
- ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
+ ret = ucsi_send_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
/* Enable all notifications */
ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
- ret = ucsi_run_command(ucsi, command, NULL, 0);
+ ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
- mutex_unlock(&ucsi->ppm_lock);
-
return 0;
err_unregister:
err_reset:
ucsi_reset_ppm(ucsi);
err:
- mutex_unlock(&ucsi->ppm_lock);
-
return ret;
}
return;
}
+static bool usbip_match(struct usb_device *udev)
+{
+ return true;
+}
+
#ifdef CONFIG_PM
/* These functions need usb_port_suspend and usb_port_resume,
.name = "usbip-host",
.probe = stub_probe,
.disconnect = stub_disconnect,
+ .match = usbip_match,
#ifdef CONFIG_PM
.suspend = stub_suspend,
.resume = stub_resume,
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
-
+ unsigned int config_irq;
};
struct ifcvf_adapter {
vf->vring[i].irq = -EINVAL;
}
+ devm_free_irq(&pdev->dev, vf->config_irq, vf);
ifcvf_free_irq_vectors(pdev);
}
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
pci_name(pdev));
vector = 0;
- irq = pci_irq_vector(pdev, vector);
- ret = devm_request_irq(&pdev->dev, irq,
+ vf->config_irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, vf->config_irq,
ifcvf_config_changed, 0,
vf->config_msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request config irq\n");
+ return ret;
+ }
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
#define VALID_FEATURES_MASK \
- (BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) | \
- BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) | \
- BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) | \
- BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \
- BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) | \
- BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) | \
- BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) | \
- BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
- BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) | \
- BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) | \
- BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) | \
- BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) | \
- BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV))
+ (BIT_ULL(VIRTIO_NET_F_CSUM) | BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | \
+ BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_MAC) | \
+ BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | \
+ BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | \
+ BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | BIT_ULL(VIRTIO_NET_F_HOST_ECN) | BIT_ULL(VIRTIO_NET_F_HOST_UFO) | \
+ BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | BIT_ULL(VIRTIO_NET_F_STATUS) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | \
+ BIT_ULL(VIRTIO_NET_F_CTRL_RX) | BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | \
+ BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE) | \
+ BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | \
+ BIT_ULL(VIRTIO_NET_F_RSS) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_STANDBY) | \
+ BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX) | BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY) | \
+ BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) | \
+ BIT_ULL(VIRTIO_F_RING_PACKED) | BIT_ULL(VIRTIO_F_ORDER_PLATFORM) | BIT_ULL(VIRTIO_F_SR_IOV))
#define VALID_STATUS_MASK \
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
- if (features & BIT(_feature)) \
+ if (features & BIT_ULL(_feature)) \
mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
} while (0)
static u16 get_features_12_3(u64 features)
{
- return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) |
- (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) |
- (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) |
- (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6);
+ return (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO4)) << 9) |
+ (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO6)) << 8) |
+ (!!(features & BIT_ULL(VIRTIO_NET_F_CSUM)) << 7) |
+ (!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6);
}
static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
u64 result = 0;
if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
- result |= BIT(VIRTIO_NET_F_GUEST_CSUM);
+ result |= BIT_ULL(VIRTIO_NET_F_GUEST_CSUM);
if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
- result |= BIT(VIRTIO_NET_F_CSUM);
+ result |= BIT_ULL(VIRTIO_NET_F_CSUM);
if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
- result |= BIT(VIRTIO_NET_F_HOST_TSO6);
+ result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO6);
if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
- result |= BIT(VIRTIO_NET_F_HOST_TSO4);
+ result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO4);
return result;
}
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
- ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1);
- ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
}
static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
{
- if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM)))
+ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
return -EOPNOTSUPP;
return 0;
case VFIO_PCI_ERR_IRQ_INDEX:
if (pci_is_pcie(vdev->pdev))
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
struct vfio_pci_ioeventfd {
struct list_head next;
+ struct vfio_pci_device *vdev;
struct virqfd *virqfd;
void __iomem *addr;
uint64_t data;
loff_t pos;
int bar;
int count;
+ bool test_mem;
};
struct vfio_pci_irq_ctx {
#define vfio_ioread8 ioread8
#define vfio_iowrite8 iowrite8
+#define VFIO_IOWRITE(size) \
+static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \
+ bool test_mem, u##size val, void __iomem *io) \
+{ \
+ if (test_mem) { \
+ down_read(&vdev->memory_lock); \
+ if (!__vfio_pci_memory_enabled(vdev)) { \
+ up_read(&vdev->memory_lock); \
+ return -EIO; \
+ } \
+ } \
+ \
+ vfio_iowrite##size(val, io); \
+ \
+ if (test_mem) \
+ up_read(&vdev->memory_lock); \
+ \
+ return 0; \
+}
+
+VFIO_IOWRITE(8)
+VFIO_IOWRITE(16)
+VFIO_IOWRITE(32)
+#ifdef iowrite64
+VFIO_IOWRITE(64)
+#endif
+
+#define VFIO_IOREAD(size) \
+static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \
+ bool test_mem, u##size *val, void __iomem *io) \
+{ \
+ if (test_mem) { \
+ down_read(&vdev->memory_lock); \
+ if (!__vfio_pci_memory_enabled(vdev)) { \
+ up_read(&vdev->memory_lock); \
+ return -EIO; \
+ } \
+ } \
+ \
+ *val = vfio_ioread##size(io); \
+ \
+ if (test_mem) \
+ up_read(&vdev->memory_lock); \
+ \
+ return 0; \
+}
+
+VFIO_IOREAD(8)
+VFIO_IOREAD(16)
+VFIO_IOREAD(32)
+
/*
* Read or write from an __iomem region (MMIO or I/O port) with an excluded
* range which is inaccessible. The excluded range drops writes and fills
* reads with -1. This is intended for handling MSI-X vector tables and
* leftover space for ROM BARs.
*/
-static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
+ void __iomem *io, char __user *buf,
loff_t off, size_t count, size_t x_start,
size_t x_end, bool iswrite)
{
ssize_t done = 0;
+ int ret;
while (count) {
size_t fillable, filled;
if (copy_from_user(&val, buf, 4))
return -EFAULT;
- vfio_iowrite32(val, io + off);
+ ret = vfio_pci_iowrite32(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
} else {
- val = vfio_ioread32(io + off);
+ ret = vfio_pci_ioread32(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
if (copy_to_user(buf, &val, 4))
return -EFAULT;
if (copy_from_user(&val, buf, 2))
return -EFAULT;
- vfio_iowrite16(val, io + off);
+ ret = vfio_pci_iowrite16(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
} else {
- val = vfio_ioread16(io + off);
+ ret = vfio_pci_ioread16(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
if (copy_to_user(buf, &val, 2))
return -EFAULT;
if (copy_from_user(&val, buf, 1))
return -EFAULT;
- vfio_iowrite8(val, io + off);
+ ret = vfio_pci_iowrite8(vdev, test_mem,
+ val, io + off);
+ if (ret)
+ return ret;
} else {
- val = vfio_ioread8(io + off);
+ ret = vfio_pci_ioread8(vdev, test_mem,
+ &val, io + off);
+ if (ret)
+ return ret;
if (copy_to_user(buf, &val, 1))
return -EFAULT;
count = min(count, (size_t)(end - pos));
- if (res->flags & IORESOURCE_MEM) {
- down_read(&vdev->memory_lock);
- if (!__vfio_pci_memory_enabled(vdev)) {
- up_read(&vdev->memory_lock);
- return -EIO;
- }
- }
-
if (bar == PCI_ROM_RESOURCE) {
/*
* The ROM can fill less space than the BAR, so we start the
x_end = vdev->msix_offset + vdev->msix_size;
}
- done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
+ done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
+ count, x_start, x_end, iswrite);
if (done >= 0)
*ppos += done;
if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io);
out:
- if (res->flags & IORESOURCE_MEM)
- up_read(&vdev->memory_lock);
-
return done;
}
return ret;
}
- done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
+ /*
+ * VGA MMIO is a legacy, non-BAR resource that hopefully allows
+ * probing, so we don't currently worry about access in relation
+ * to the memory enable bit in the command register.
+ */
+ done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
vga_put(vdev->pdev, rsrc);
switch (ioeventfd->count) {
case 1:
- vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
case 2:
- vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
case 4:
- vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
- vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
+ vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
#endif
}
goto out_unlock;
}
+ ioeventfd->vdev = vdev;
ioeventfd->addr = vdev->barmap[bar] + pos;
ioeventfd->data = data;
ioeventfd->pos = pos;
ioeventfd->bar = bar;
ioeventfd->count = count;
+ ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
NULL, NULL, &ioeventfd->virqfd, fd);
static int vfio_iommu_replay(struct vfio_iommu *iommu,
struct vfio_domain *domain)
{
- struct vfio_domain *d;
+ struct vfio_domain *d = NULL;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
- d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+ if (!list_empty(&iommu->domain_list))
+ d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+
n = rb_first(&iommu->dma_list);
for (; n; n = rb_next(n)) {
phys_addr_t p;
dma_addr_t i;
+ if (WARN_ON(!d)) { /* mapped w/o a domain?! */
+ ret = -EINVAL;
+ goto unwind;
+ }
+
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
- return ret;
+ goto unwind;
}
phys = pfn << PAGE_SHIFT;
ret = iommu_map(domain->domain, iova, phys,
size, dma->prot | domain->prot);
- if (ret)
- return ret;
+ if (ret) {
+ if (!dma->iommu_mapped)
+ vfio_unpin_pages_remote(dma, iova,
+ phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT,
+ true);
+ goto unwind;
+ }
iova += size;
}
+ }
+
+ /* All dmas are now mapped, defer to second tree walk for unwind */
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
dma->iommu_mapped = true;
}
+
return 0;
+
+unwind:
+ for (; n; n = rb_prev(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+ dma_addr_t iova;
+
+ if (dma->iommu_mapped) {
+ iommu_unmap(domain->domain, dma->iova, dma->size);
+ continue;
+ }
+
+ iova = dma->iova;
+ while (iova < dma->iova + dma->size) {
+ phys_addr_t phys, p;
+ size_t size;
+ dma_addr_t i;
+
+ phys = iommu_iova_to_phys(domain->domain, iova);
+ if (!phys) {
+ iova += PAGE_SIZE;
+ continue;
+ }
+
+ size = PAGE_SIZE;
+ p = phys + size;
+ i = iova + size;
+ while (i < dma->iova + dma->size &&
+ p == iommu_iova_to_phys(domain->domain, i)) {
+ size += PAGE_SIZE;
+ p += PAGE_SIZE;
+ i += PAGE_SIZE;
+ }
+
+ iommu_unmap(domain->domain, iova, size);
+ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, true);
+ }
+ }
+
+ return ret;
}
/*
break;
case VFIO_TYPE1_NESTING_IOMMU:
iommu->nesting = true;
- /* fall through */
+ fallthrough;
case VFIO_TYPE1v2_IOMMU:
iommu->v2 = true;
break;
EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
/**
- * vhost_iotlb_itree_first - return the next overlapped range
- * @iotlb: the IOTLB
+ * vhost_iotlb_itree_next - return the next overlapped range
+ * @map: the starting map node
* @start: start of IOVA range
* @end: end of IOVA range
*/
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r)
- vq_err(vq, "Failed to enable notification at %p: %d\n",
+ vq_err(vq, "Failed to disable notification at %p: %d\n",
&vq->used->flags, r);
}
}
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
data->gdwn_dis = !!pdata->gdwn_dis;
- /* fall through */
+ fallthrough;
case ADP8860_MANUFID:
data->en_ambl_sens = !!pdata->en_ambl_sens;
break;
case 'M':
case 'm':
size *= 1024;
- /* Fall through */
+ fallthrough;
case 'K':
case 'k':
size *= 1024;
schedule();
finish_wait(&arcfb_waitq, &wait);
}
- /* fall through */
+ fallthrough;
case FBIO_GETCONTROL2:
{
case 32:
var->transp.offset = 24;
var->transp.length = 8;
- /* fall through */
+ fallthrough;
case 24:
if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
/* RGB:888 mode */
case 2: value |= ATMEL_LCDC_PIXELSIZE_2; break;
case 4: value |= ATMEL_LCDC_PIXELSIZE_4; break;
case 8: value |= ATMEL_LCDC_PIXELSIZE_8; break;
- case 15: /* fall through */
+ case 15:
case 16: value |= ATMEL_LCDC_PIXELSIZE_16; break;
case 24: value |= ATMEL_LCDC_PIXELSIZE_24; break;
case 32: value |= ATMEL_LCDC_PIXELSIZE_32; break;
case 1:
if (mc & 0x4)
break;
- /* fall through */
+ fallthrough;
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET;
- /* fall through */
+ fallthrough;
case 0:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET;
case 1:
if (!(mc & 0x4))
break;
- /* fall through */
+ fallthrough;
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET;
mdelay(100);
/* mode */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
- /* fall through */
+ fallthrough;
case BT_GD5480:
/* from Klaus' NetBSD driver: */
vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
- /* fall through */
+ fallthrough;
case BT_ALPINE:
/* put blitter into 542x compat */
vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
#include <linux/cuda.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
+#endif
+#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
break;
case FB_BLANK_POWERDOWN:
ctrl &= ~0x33;
- /* fall through */
+ fallthrough;
case FB_BLANK_NORMAL:
ctrl |= 0x400;
break;
}
}
+#define PITCH(w) (((w) + 7) >> 3)
+#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
+
static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user)
{
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
+ if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
+ int size;
+ int pitch = PITCH(vc->vc_font.width);
+
+ /*
+ * If user font, ensure that a possible change to user font
+ * height or width will not allow a font data out-of-bounds access.
+ * NOTE: must use original charcount in calculation as font
+ * charcount can change and cannot be used to determine the
+ * font data allocated size.
+ */
+ if (pitch <= 0)
+ return -EINVAL;
+ size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
+ if (size > FNTSIZE(vc->vc_font.data))
+ return -EINVAL;
+ }
+
virt_w = FBCON_SWAP(ops->rotate, width, height);
virt_h = FBCON_SWAP(ops->rotate, height, width);
virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
int size;
int i, csum;
u8 *new_data, *data = font->data;
- int pitch = (font->width+7) >> 3;
+ int pitch = PITCH(font->width);
/* Is there a reason why fbconsole couldn't handle any charcount >256?
* If not this check should be changed to charcount < 256 */
if (fbcon_invalid_charcount(info, charcount))
return -EINVAL;
- size = h * pitch * charcount;
+ size = CALC_FONTSZ(h, pitch, charcount);
new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
case FBIOGET_CON2FBMAP:
case FBIOPUT_CON2FBMAP:
arg = (unsigned long) compat_ptr(arg);
- /* fall through */
+ fallthrough;
case FBIOBLANK:
ret = do_fb_ioctl(info, cmd, arg);
break;
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- if (efi_enabled(EFI_BOOT) &&
+ if (efi_enabled(EFI_MEMMAP) &&
!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
if ((efifb_fix.smem_start + efifb_fix.smem_len) >
(md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
dev_warn(info->dev,
"MFB_SET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_SET_PIXFMT_OLD);
- /* fall through */
+ fallthrough;
case MFB_SET_PIXFMT:
if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
return -EFAULT;
dev_warn(info->dev,
"MFB_GET_PIXFMT value of 0x%08x is deprecated.\n",
MFB_GET_PIXFMT_OLD);
- /* fall through */
+ fallthrough;
case MFB_GET_PIXFMT:
pix_fmt = ad->pix_fmt;
if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
break;
case DFA_PIX_32BIT:
val |= (reg << 24);
- /* fall through */
+ fallthrough;
case DFA_PIX_24BIT:
val |= (reg << 16) | (reg << 8);
break;
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
if (!ret)
break;
- /* Fallthrough */
+ fallthrough;
case VERSION_WS2008:
case VERSION_WIN7:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
break;
case 9 ... 15:
bpp = 15;
- /* fall through */
+ fallthrough;
case 16:
if ((1000000 / var->pixclock) > DACSPEED16) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 15/16bpp)\n",
else
return PIXFMT_BGR888UNPACK;
}
-
- /* fall through */
}
return -EINVAL;
if (!par->FlatPanel)
state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
0xeffffeff;
- /* fallthrough */
+ fallthrough;
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
out_le32(par->cmap_adr + 0x58,
in_le32(par->cmap_adr + 0x58) & ~0x20);
- /* fall through */
+ fallthrough;
case cmap_r128:
/* Set palette index & data */
out_8(par->cmap_adr + 0xb0, regno);
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
out_le32(par->cmap_adr + 0x58,
in_le32(par->cmap_adr + 0x58) & ~0x20);
- /* fall through */
+ fallthrough;
case cmap_r128:
/* Set palette index & data */
out_8(par->cmap_adr + 0xb0, i);
lcdc.bpp = 12;
break;
}
- /* fallthrough */
+ fallthrough;
case OMAPFB_COLOR_YUV422:
if (lcdc.ext_mode) {
lcdc.bpp = 16;
break;
}
- /* fallthrough */
+ fallthrough;
default:
/* FIXME: other BPPs.
* bpp1: code 0, size 256
if (fbdev->ctrl->setcolreg)
r = fbdev->ctrl->setcolreg(regno, red, green, blue,
transp, update_hw_pal);
- /* Fallthrough */
+ fallthrough;
case OMAPFB_COLOR_RGB565:
case OMAPFB_COLOR_RGB444:
if (r != 0)
return 0;
case 12:
var->bits_per_pixel = 16;
- /* fall through */
+ fallthrough;
case 16:
if (plane->fbdev->panel->bpp == 12)
plane->color_mode = OMAPFB_COLOR_RGB444;
case OMAPFB_ACTIVE:
for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
unregister_framebuffer(fbdev->fb_info[i]);
- /* fall through */
+ fallthrough;
case 7:
omapfb_unregister_sysfs(fbdev);
- /* fall through */
+ fallthrough;
case 6:
if (fbdev->panel->disable)
fbdev->panel->disable(fbdev->panel);
- /* fall through */
+ fallthrough;
case 5:
omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
- /* fall through */
+ fallthrough;
case 4:
planes_cleanup(fbdev);
- /* fall through */
+ fallthrough;
case 3:
ctrl_cleanup(fbdev);
- /* fall through */
+ fallthrough;
case 2:
if (fbdev->panel->cleanup)
fbdev->panel->cleanup(fbdev->panel);
- /* fall through */
+ fallthrough;
case 1:
dev_set_drvdata(fbdev->dev, NULL);
kfree(fbdev);
case 'm':
case 'M':
vram *= 1024;
- /* Fall through */
+ fallthrough;
case 'k':
case 'K':
vram *= 1024;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
- /* fall through */
+ fallthrough;
case OMAP_DSS_ROT_90:
case OMAP_DSS_ROT_270:
*offset1 = 0;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY)
width = width >> 1;
- /* fall through */
+ fallthrough;
case OMAP_DSS_ROT_90 + 4:
case OMAP_DSS_ROT_270 + 4:
*offset1 = 0;
r = -ENODEV;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
/ (var->bits_per_pixel >> 2);
break;
}
- /* fall through */
+ fallthrough;
default:
screen_width = fix->line_length / (var->bits_per_pixel >> 3);
break;
switch (bpp) {
case 24:
timing *= 3;
- /* fall through */
+ fallthrough;
case 8:
timing >>= 1;
- /* fall through */
+ fallthrough;
case 16:
timing >>= 1;
case 32:
else
return PIX_FMT_BGR1555;
}
-
- /* fall through */
}
/*
else
return PIX_FMT_BGR888UNPACK;
}
-
- /* fall through */
}
return -EINVAL;
*/
if (old_state != C_DISABLE_PM)
break;
- /* fall through */
+ fallthrough;
case C_ENABLE:
/*
break;
case 9 ... 15:
var->green.length = 5;
- /* fall through */
+ fallthrough;
case 16:
var->bits_per_pixel = 16;
/* The Riva128 supports RGB555 only */
/* 666 with one bit alpha/transparency */
var->transp.offset = 18;
var->transp.length = 1;
- /* fall through */
+ fallthrough;
case 18:
var->bits_per_pixel = 32;
case 25:
var->transp.length = var->bits_per_pixel - 24;
var->transp.offset = 24;
- /* fall through */
+ fallthrough;
case 24:
/* our 24bpp is unpacked, so 32bpp */
var->bits_per_pixel = 32;
case FB_BLANK_POWERDOWN:
wincon &= ~WINCONx_ENWIN;
sfb->enabled &= ~(1 << index);
- /* fall through - to FB_BLANK_NORMAL */
+ fallthrough; /* to FB_BLANK_NORMAL */
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
*/
if (old_state != C_DISABLE_PM)
break;
- /* fall through */
+ fallthrough;
case C_ENABLE:
/*
if ((vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6))
RamSavage4[1] = 8;
- /*FALLTHROUGH*/
+ fallthrough;
case S3_SAVAGE2000:
videoRam = RamSavage4[(config1 & 0xE0) >> 5] * 1024;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
- /* Fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
info->fix.xpanstep = 2;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
- /* Fall through */
+ fallthrough;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
info->fix.xpanstep = 2;
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_GET_INFO: /* For communication with X driver */
ivideo->sisfb_infoblock.sisfb_id = SISFB_ID;
ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR;
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_GET_VBRSTATUS:
if(sisfb_CheckVBRetrace(ivideo))
return put_user((u32)1, argp);
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_GET_AUTOMAXIMIZE:
if(ivideo->sisfb_max)
return put_user((u32)1, argp);
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
- /* fall through */
+ fallthrough;
case SISFB_SET_AUTOMAXIMIZE:
if(get_user(gpu32, argp))
return -EFAULT;
case FB_BLANK_POWERDOWN:
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
- /* fall through */
+ fallthrough;
case FB_BLANK_NORMAL:
ctrl |= SM501_DC_CRT_CONTROL_BLANK;
case S9000_ID_HCRX:
HYPER_ENABLE_DISABLE_DISPLAY(fb, enable);
break;
- case S9000_ID_A1659A: /* fall through */
+ case S9000_ID_A1659A:
case S9000_ID_TIMBER:
case CRX24_OVERLAY_PLANES:
default:
dev_name);
goto out_err0;
}
- /* fall through */
+ fallthrough;
case S9000_ID_ARTIST:
case S9000_ID_HCRX:
case S9000_ID_TIMBER:
case 32:
var->transp.offset = 24;
var->transp.length = 8;
- /* fall through */
+ fallthrough;
case 24:
var->red.offset = 16;
var->green.offset = 8;
viaparinfo->chip_info->gfx_chip_name))
viafb_write_reg_mask(CR97, VIACR, 0x84,
BIT7 + BIT2 + BIT1 + BIT0);
- /* fall through */
+ fallthrough;
case INTERFACE_DVP0:
case INTERFACE_DVP1:
case INTERFACE_DFP_HIGH:
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state. */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
return -EINVAL;
timeout = new_timeout;
sc1200wdt_write_data(WDTO, timeout);
- /* fall through - and return the new timeout */
+ fallthrough; /* and return the new timeout */
case WDIOC_GETTIMEOUT:
return put_user(timeout * 60, p);
wdrtas_interval = i;
else
wdrtas_interval = wdrtas_get_interval(i);
- /* fallthrough */
+ fallthrough;
case WDIOC_GETTIMEOUT:
return put_user(wdrtas_interval, argp);
config XEN_FRONT_PGDIR_SHBUF
tristate
+config XEN_UNPOPULATED_ALLOC
+ bool "Use unpopulated memory ranges for guest mappings"
+ depends on X86 && ZONE_DEVICE
+ default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
+ help
+ Use unpopulated memory ranges in order to create mappings for guest
+ memory regions, including grant maps and foreign pages. This avoids
+ having to balloon out RAM regions in order to obtain physical memory
+ space to create such mappings.
+
endmenu
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
+obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
}
EXPORT_SYMBOL(free_xenballooned_pages);
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
register_sysctl_table(xen_root);
#endif
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
{
int i;
/* Get info for IRQ */
struct irq_info *info_for_irq(unsigned irq)
{
- return irq_get_handler_data(irq);
+ return irq_get_chip_data(irq);
}
/* Constructors for packed IRQ information. */
info->type = IRQT_UNBOUND;
info->refcnt = -1;
- irq_set_handler_data(irq, info);
+ irq_set_chip_data(irq, info);
list_add_tail(&info->list, &xen_irq_list_head);
}
static void xen_free_irq(unsigned irq)
{
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = irq_get_chip_data(irq);
if (WARN_ON(!info))
return;
list_del(&info->list);
- irq_set_handler_data(irq, NULL);
+ irq_set_chip_data(irq, NULL);
WARN_ON(info->refcnt > 0);
static void __unbind_from_irq(unsigned int irq)
{
evtchn_port_t evtchn = evtchn_from_irq(irq);
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = irq_get_chip_data(irq);
if (info->refcnt > 0) {
info->refcnt--;
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{
- struct irq_info *info = irq_get_handler_data(irq);
+ struct irq_info *info = irq_get_chip_data(irq);
if (WARN_ON(!info))
return;
if (irq == -1)
return -ENOENT;
- info = irq_get_handler_data(irq);
+ info = irq_get_chip_data(irq);
if (!info)
return -ENOENT;
if (irq == -1)
goto done;
- info = irq_get_handler_data(irq);
+ info = irq_get_chip_data(irq);
if (!info)
goto done;
{
int ret;
- ret = alloc_xenballooned_pages(nr_pages, pages);
+ ret = xen_alloc_unpopulated_pages(nr_pages, pages);
if (ret < 0)
return ret;
void gnttab_free_pages(int nr_pages, struct page **pages)
{
gnttab_pages_clear_private(nr_pages, pages);
- free_xenballooned_pages(nr_pages, pages);
+ xen_free_unpopulated_pages(nr_pages, pages);
}
EXPORT_SYMBOL_GPL(gnttab_free_pages);
if (pages == NULL)
return -ENOMEM;
- rc = alloc_xenballooned_pages(numpgs, pages);
+ rc = xen_alloc_unpopulated_pages(numpgs, pages);
if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc);
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
if (rc == 0)
- free_xenballooned_pages(numpgs, pages);
+ xen_free_unpopulated_pages(numpgs, pages);
else
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
numpgs, rc);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state */
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+
+#include <xen/page.h>
+#include <xen/xen.h>
+
+static DEFINE_MUTEX(list_lock);
+static LIST_HEAD(page_list);
+static unsigned int list_count;
+
+static int fill_list(unsigned int nr_pages)
+{
+ struct dev_pagemap *pgmap;
+ void *vaddr;
+ unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
+ int ret;
+
+ pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->type = MEMORY_DEVICE_GENERIC;
+ pgmap->res.name = "Xen scratch";
+ pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ ret = allocate_resource(&iomem_resource, &pgmap->res,
+ alloc_pages * PAGE_SIZE, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ if (ret < 0) {
+ pr_err("Cannot allocate new IOMEM resource\n");
+ kfree(pgmap);
+ return ret;
+ }
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ /*
+ * memremap will build page tables for the new memory so
+ * the p2m must contain invalid entries so the correct
+ * non-present PTEs will be written.
+ *
+ * If a failure occurs, the original (identity) p2m entries
+ * are not restored since this region is now known not to
+ * conflict with any devices.
+ */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+
+ for (i = 0; i < alloc_pages; i++) {
+ if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+ pr_warn("set_phys_to_machine() failed, no memory added\n");
+ release_resource(&pgmap->res);
+ kfree(pgmap);
+ return -ENOMEM;
+ }
+ }
+ }
+#endif
+
+ vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
+ if (IS_ERR(vaddr)) {
+ pr_err("Cannot remap memory range\n");
+ release_resource(&pgmap->res);
+ kfree(pgmap);
+ return PTR_ERR(vaddr);
+ }
+
+ for (i = 0; i < alloc_pages; i++) {
+ struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+
+ BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+ list_add(&pg->lru, &page_list);
+ list_count++;
+ }
+
+ return 0;
+}
+
+/**
+ * xen_alloc_unpopulated_pages - alloc unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&list_lock);
+ if (list_count < nr_pages) {
+ ret = fill_list(nr_pages - list_count);
+ if (ret)
+ goto out;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *pg = list_first_entry_or_null(&page_list,
+ struct page,
+ lru);
+
+ BUG_ON(!pg);
+ list_del(&pg->lru);
+ list_count--;
+ pages[i] = pg;
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ ret = xen_alloc_p2m_entry(page_to_pfn(pg));
+ if (ret < 0) {
+ unsigned int j;
+
+ for (j = 0; j <= i; j++) {
+ list_add(&pages[j]->lru, &page_list);
+ list_count++;
+ }
+ goto out;
+ }
+ }
+#endif
+ }
+
+out:
+ mutex_unlock(&list_lock);
+ return ret;
+}
+EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
+
+/**
+ * xen_free_unpopulated_pages - return unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+
+ mutex_lock(&list_lock);
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&pages[i]->lru, &page_list);
+ list_count++;
+ }
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(xen_free_unpopulated_pages);
+
+#ifdef CONFIG_XEN_PV
+static int __init init(void)
+{
+ unsigned int i;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!xen_pv_domain())
+ return 0;
+
+ /*
+ * Initialize with pages from the extra memory regions (see
+ * arch/x86/xen/setup.c).
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ unsigned int j;
+
+ for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
+ struct page *pg =
+ pfn_to_page(xen_extra_mem[i].start_pfn + j);
+
+ list_add(&pg->lru, &page_list);
+ list_count++;
+ }
+ }
+
+ return 0;
+}
+subsys_initcall(init);
+#endif
case ACPI_NOTIFY_BUS_CHECK:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived BUS CHECK notification for device\n"));
- /* Fall Through */
+ fallthrough;
case ACPI_NOTIFY_DEVICE_CHECK:
if (event == ACPI_NOTIFY_DEVICE_CHECK)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
xenbus_switch_state(xdev, XenbusStateClosed);
if (xenbus_dev_is_online(xdev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
device_unregister(&xdev->dev);
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through - if not online */
+ fallthrough; /* if not online */
case XenbusStateUnknown:
device_unregister(&dev->dev);
break;
int i, j;
for (i = 0; i < nr_pages; i++) {
- err = gnttab_grant_foreign_access(dev->otherend_id,
- virt_to_gfn(vaddr), 0);
+ unsigned long gfn;
+
+ if (is_vmalloc_addr(vaddr))
+ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
+ else
+ gfn = virt_to_gfn(vaddr);
+
+ err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"granting access to ring page");
bool leaked = false;
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
+ err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
addr, nr_pages);
out_free_ballooned_pages:
if (!leaked)
- free_xenballooned_pages(nr_pages, node->hvm.pages);
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
out_err:
return err;
}
info.addrs);
if (!rv) {
vunmap(vaddr);
- free_xenballooned_pages(nr_pages, node->hvm.pages);
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
}
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
case XenbusStateConnected:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
xenbus_reset_wait_for_backend(be, XenbusStateClosing);
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
xenbus_reset_wait_for_backend(be, XenbusStateClosed);
- /* fall through */
+ fallthrough;
case XenbusStateClosed:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
kfree(pages);
return -ENOMEM;
}
- rc = alloc_xenballooned_pages(nr_pages, pages);
+ rc = xen_alloc_unpopulated_pages(nr_pages, pages);
if (rc) {
pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
nr_pages, rc);
if (!vaddr) {
pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
nr_pages, rc);
- free_xenballooned_pages(nr_pages, pages);
+ xen_free_unpopulated_pages(nr_pages, pages);
kfree(pages);
kfree(pfns);
return -ENOMEM;
break;
default:
WARN_ONCE(1, "unknown lock status code: %d\n", status);
- /* fall through */
+ fallthrough;
case P9_LOCK_ERROR:
case P9_LOCK_GRACE:
res = -ENOLCK;
switch (len) {
case 4: val |= p[3] << 24;
- /* fall through */
+ fallthrough;
case 3: val |= p[2] << 16;
- /* fall through */
+ fallthrough;
case 2: val |= p[1] << 8;
- /* fall through */
+ fallthrough;
default: val |= p[0];
}
return val;
{
switch (len) {
case 4: p[3] = val >> 24;
- /* fall through */
+ fallthrough;
case 3: p[2] = val >> 16;
- /* fall through */
+ fallthrough;
case 2: p[1] = val >> 8;
- /* fall through */
+ fallthrough;
default: p[0] = val;
}
}
u32 prot = AFFS_I(inode)->i_protect;
umode_t mode = inode->i_mode;
+ /*
+ * First, clear all RWED bits for owner, group, other.
+ * Then, recalculate them afresh.
+ *
+ * We'll always clear the delete-inhibit bit for the owner, as that is
+ * the classic single-user mode AmigaOS protection bit and we need to
+ * stay compatible with all scenarios.
+ *
+ * Since multi-user AmigaOS is an extension, we'll only set the
+ * delete-allow bit if any of the other bits in the same user class
+ * (group/other) are used.
+ */
+ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
+ | FIBF_NOWRITE | FIBF_NODELETE
+ | FIBF_GRP_EXECUTE | FIBF_GRP_READ
+ | FIBF_GRP_WRITE | FIBF_GRP_DELETE
+ | FIBF_OTR_EXECUTE | FIBF_OTR_READ
+ | FIBF_OTR_WRITE | FIBF_OTR_DELETE);
+
+ /* Classic single-user AmigaOS flags. These are inverted. */
if (!(mode & 0100))
prot |= FIBF_NOEXECUTE;
if (!(mode & 0400))
prot |= FIBF_NOREAD;
if (!(mode & 0200))
prot |= FIBF_NOWRITE;
+
+ /* Multi-user extended flags. Not inverted. */
if (mode & 0010)
prot |= FIBF_GRP_EXECUTE;
if (mode & 0040)
prot |= FIBF_GRP_READ;
if (mode & 0020)
prot |= FIBF_GRP_WRITE;
+ if (mode & 0070)
+ prot |= FIBF_GRP_DELETE;
+
if (mode & 0001)
prot |= FIBF_OTR_EXECUTE;
if (mode & 0004)
prot |= FIBF_OTR_READ;
if (mode & 0002)
prot |= FIBF_OTR_WRITE;
+ if (mode & 0007)
+ prot |= FIBF_OTR_DELETE;
AFFS_I(inode)->i_protect = prot;
}
return ret;
}
+static int affs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ int ret;
+
+ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+ /* Clear Archived bit on file writes, as AmigaOS would do */
+ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+ mark_inode_dirty(inode);
+ }
+
+ return ret;
+}
+
static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,affs_get_block);
.readpage = affs_readpage,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
- .write_end = generic_write_end,
+ .write_end = affs_write_end,
.direct_IO = affs_direct_IO,
.bmap = _affs_bmap
};
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
+ /* Clear Archived bit on file writes, as AmigaOS would do */
+ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+ mark_inode_dirty(inode);
+ }
+
err_first_bh:
unlock_page(page);
put_page(page);
case ST_ROOT:
inode->i_uid = sbi->s_uid;
inode->i_gid = sbi->s_gid;
- /* fall through */
+ fallthrough;
case ST_USERDIR:
if (be32_to_cpu(tail->stype) == ST_USERDIR ||
affs_test_opt(sbi->s_flags, SF_SETMODE)) {
case MUFS_INTLFFS:
case MUFS_DCFFS:
affs_set_opt(sbi->s_flags, SF_MUFS);
- /* fall thru */
+ fallthrough;
case FS_INTLFFS:
case FS_DCFFS:
affs_set_opt(sbi->s_flags, SF_INTL);
break;
case MUFS_OFS:
affs_set_opt(sbi->s_flags, SF_MUFS);
- /* fall through */
+ fallthrough;
case FS_OFS:
affs_set_opt(sbi->s_flags, SF_OFS);
sb->s_flags |= SB_NOEXEC;
case MUFS_DCOFS:
case MUFS_INTLOFS:
affs_set_opt(sbi->s_flags, SF_MUFS);
- /* fall through */
+ fallthrough;
case FS_DCOFS:
case FS_INTLOFS:
affs_set_opt(sbi->s_flags, SF_INTL);
call->unmarshall++;
/* extract the FID array and its count in two steps */
- /* fall through */
+ fallthrough;
case 1:
_debug("extract FID count");
ret = afs_extract_data(call, true);
afs_extract_to_buf(call, call->count * 3 * 4);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 2:
_debug("extract FID array");
ret = afs_extract_data(call, true);
call->unmarshall++;
/* extract the callback array and its count in two steps */
- /* fall through */
+ fallthrough;
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, true);
iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 4:
_debug("extract discard %zu/%u",
iov_iter_count(call->iter), call->count2 * 3 * 4);
afs_extract_to_buf(call, 11 * sizeof(__be32));
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 1:
_debug("extract UUID");
ret = afs_extract_data(call, false);
afs_extract_to_buf(call, 11 * sizeof(__be32));
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 1:
_debug("extract UUID");
ret = afs_extract_data(call, false);
call->unmarshall++;
/* extract the FID array and its count in two steps */
- /* Fall through */
+ fallthrough;
case 1:
_debug("extract FID count");
ret = afs_extract_data(call, true);
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 2:
_debug("extract FID array");
ret = afs_extract_data(call, false);
net->dynroot_sb = NULL;
mutex_unlock(&net->proc_cells_lock);
- inode_lock(root->d_inode);
-
- /* Remove all the pins for dirs created for manually added cells */
- list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
- if (subdir->d_fsdata) {
- subdir->d_fsdata = NULL;
- dput(subdir);
+ if (root) {
+ inode_lock(root->d_inode);
+
+ /* Remove all the pins for dirs created for manually added cells */
+ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
+ if (subdir->d_fsdata) {
+ subdir->d_fsdata = NULL;
+ dput(subdir);
+ }
}
- }
- inode_unlock(root->d_inode);
+ inode_unlock(root->d_inode);
+ }
}
case -ENOBUFS:
_debug("cache said ENOBUFS");
- /* fall through */
+ fallthrough;
default:
go_on:
req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
spin_unlock(&vnode->lock);
return;
- /* Fall through */
default:
/* Looks like a lock request was withdrawn. */
spin_unlock(&vnode->lock);
afs_end_cursor(&op->ac);
afs_put_serverlist(op->net, op->server_list);
afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
+ key_put(op->key);
kfree(op);
return ret;
}
}
}
- rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
- if (rtt_us < server->probe.rtt) {
+ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+ rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
server->rtt = rtt_us;
alist->preferred = index;
call->tmp_u = htonl(0);
afs_extract_to_tmp(call);
}
- /* Fall through */
+ fallthrough;
/* extract the returned data length */
case 1:
call->bvec[0].bv_page = req->pages[req->index];
iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
- /* Fall through */
+ fallthrough;
/* extract the returned data */
case 2:
/* Discard any excess data the server gave us */
afs_extract_discard(call, req->actual_len - req->len);
call->unmarshall = 3;
- /* Fall through */
+ fallthrough;
case 3:
_debug("extract discard %zu/%llu",
no_more_data:
call->unmarshall = 4;
afs_extract_to_buf(call, (21 + 3 + 6) * 4);
- /* Fall through */
+ fallthrough;
/* extract the metadata */
case 4:
case 0:
call->unmarshall++;
afs_extract_to_buf(call, 12 * 4);
- /* Fall through */
+ fallthrough;
/* extract the returned status record */
case 1:
xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs);
call->unmarshall++;
afs_extract_to_tmp(call);
- /* Fall through */
+ fallthrough;
/* extract the volume name length */
case 2:
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the volume name */
case 3:
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the offline message length */
case 4:
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the offline message */
case 5:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the message of the day length */
case 6:
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the message of the day */
case 7:
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the capabilities word count */
case 1:
call->count2 = count;
afs_extract_discard(call, count * sizeof(__be32));
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract capabilities words */
case 2:
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the file status count and array in two steps */
case 1:
call->unmarshall++;
more_counts:
afs_extract_to_buf(call, 21 * sizeof(__be32));
- /* Fall through */
+ fallthrough;
case 2:
_debug("extract status array %u", call->count);
call->count = 0;
call->unmarshall++;
afs_extract_to_tmp(call);
- /* Fall through */
+ fallthrough;
/* Extract the callback count and array in two steps */
case 3:
call->unmarshall++;
more_cbs:
afs_extract_to_buf(call, 3 * sizeof(__be32));
- /* Fall through */
+ fallthrough;
case 4:
_debug("extract CB array");
afs_extract_to_buf(call, 6 * sizeof(__be32));
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 5:
ret = afs_extract_data(call, false);
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the returned data length */
case 1:
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the returned data */
case 2:
afs_extract_to_buf(call, (21 + 6) * 4);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the metadata */
case 3:
#define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */
#define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */
#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */
+#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */
rwlock_t lock; /* Lock on addresses */
atomic_t usage;
+ unsigned int rtt; /* Server's current RTT in uS */
/* Probe state */
wait_queue_head_t probe_wq;
atomic_t probe_outstanding;
spinlock_t probe_lock;
struct {
- unsigned int rtt; /* RTT as ktime/64 */
+ unsigned int rtt; /* RTT in uS */
u32 abort_code;
short error;
- bool have_result;
- bool responded:1;
- bool is_yfs:1;
- bool not_yfs:1;
- bool local_failure:1;
+ unsigned short flags;
+#define AFS_VLSERVER_PROBE_RESPONDED 0x01 /* At least once response (may be abort) */
+#define AFS_VLSERVER_PROBE_IS_YFS 0x02 /* The peer appears to be YFS */
+#define AFS_VLSERVER_PROBE_NOT_YFS 0x04 /* The peer appears not to be YFS */
+#define AFS_VLSERVER_PROBE_LOCAL_FAILURE 0x08 /* A local failure prevented a probe */
} probe;
u16 port;
if (e->error == -ETIMEDOUT ||
e->error == -ETIME)
return;
- /* Fall through */
+ fallthrough;
case -ETIMEDOUT:
case -ETIME:
if (e->error == -ENOMEM ||
e->error == -ENONET)
return;
- /* Fall through */
+ fallthrough;
case -ENOMEM:
case -ENONET:
if (e->error == -ERFKILL)
return;
- /* Fall through */
+ fallthrough;
case -ERFKILL:
if (e->error == -EADDRNOTAVAIL)
return;
- /* Fall through */
+ fallthrough;
case -EADDRNOTAVAIL:
if (e->error == -ENETUNREACH)
return;
- /* Fall through */
+ fallthrough;
case -ENETUNREACH:
if (e->error == -EHOSTUNREACH)
return;
- /* Fall through */
+ fallthrough;
case -EHOSTUNREACH:
if (e->error == -EHOSTDOWN)
return;
- /* Fall through */
+ fallthrough;
case -EHOSTDOWN:
if (e->error == -ECONNREFUSED)
return;
- /* Fall through */
+ fallthrough;
case -ECONNREFUSED:
if (e->error == -ECONNRESET)
return;
- /* Fall through */
+ fallthrough;
case -ECONNRESET: /* Responded, but call expired. */
if (e->responded)
return;
alist->preferred == i ? '>' : '-',
&alist->addrs[i].transport);
}
+ seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt);
+ seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n",
+ vlserver->probe.flags, vlserver->probe.error,
+ vlserver->probe.abort_code,
+ atomic_read(&vlserver->probe_outstanding));
return 0;
}
case -ETIME:
if (op->error != -EDESTADDRREQ)
goto iterate_address;
- /* Fall through */
+ fallthrough;
case -ERFKILL:
case -EADDRNOTAVAIL:
case -ENETUNREACH:
case -EIO:
pr_err("kAFS: Call %u in bad state %u\n",
call->debug_id, state);
- /* Fall through */
+ fallthrough;
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
ret = call->ret0;
call->ret0 = 0;
- /* Fall through */
+ fallthrough;
case -ECONNABORTED:
ac->responded = true;
break;
_debug("oom");
rxrpc_kernel_abort_call(net->socket, call->rxcall,
RX_USER_ABORT, -ENOMEM, "KOO");
- /* Fall through */
+ fallthrough;
default:
_leave(" [error]");
return;
rwlock_init(&vlserver->lock);
init_waitqueue_head(&vlserver->probe_wq);
spin_lock_init(&vlserver->probe_lock);
+ vlserver->rtt = UINT_MAX;
vlserver->name_len = name_len;
vlserver->port = port;
memcpy(vlserver->name, name, name_len);
#include "internal.h"
#include "protocol_yfs.h"
-static bool afs_vl_probe_done(struct afs_vlserver *server)
+
+/*
+ * Handle the completion of a set of probes.
+ */
+static void afs_finished_vl_probe(struct afs_vlserver *server)
{
- if (!atomic_dec_and_test(&server->probe_outstanding))
- return false;
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) {
+ server->rtt = UINT_MAX;
+ clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags);
+ }
- wake_up_var(&server->probe_outstanding);
clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags);
wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING);
- return true;
+}
+
+/*
+ * Handle the completion of a probe RPC call.
+ */
+static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up)
+{
+ if (atomic_dec_and_test(&server->probe_outstanding)) {
+ afs_finished_vl_probe(server);
+ wake_up = true;
+ }
+
+ if (wake_up)
+ wake_up_all(&server->probe_wq);
}
/*
server->probe.error = 0;
goto responded;
case -ECONNABORTED:
- if (!server->probe.responded) {
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) {
server->probe.abort_code = call->abort_code;
server->probe.error = ret;
}
goto responded;
case -ENOMEM:
case -ENONET:
- server->probe.local_failure = true;
- afs_io_error(call, afs_io_error_vl_probe_fail);
+ case -EKEYEXPIRED:
+ case -EKEYREVOKED:
+ case -EKEYREJECTED:
+ server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE;
+ if (server->probe.error == 0)
+ server->probe.error = ret;
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
case -ERFKILL:
default:
clear_bit(index, &alist->responded);
set_bit(index, &alist->failed);
- if (!server->probe.responded &&
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) &&
(server->probe.error == 0 ||
server->probe.error == -ETIMEDOUT ||
server->probe.error == -ETIME))
server->probe.error = ret;
- afs_io_error(call, afs_io_error_vl_probe_fail);
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail);
goto out;
}
clear_bit(index, &alist->failed);
if (call->service_id == YFS_VL_SERVICE) {
- server->probe.is_yfs = true;
+ server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS;
set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
} else {
- server->probe.not_yfs = true;
- if (!server->probe.is_yfs) {
+ server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS;
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) {
clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
}
}
- rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
- if (rtt_us < server->probe.rtt) {
+ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+ rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
alist->preferred = index;
- have_result = true;
}
smp_wmb(); /* Set rtt before responded. */
- server->probe.responded = true;
+ server->probe.flags |= AFS_VLSERVER_PROBE_RESPONDED;
set_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
+ set_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags);
+ have_result = true;
out:
spin_unlock(&server->probe_lock);
_debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
server_index, index, &alist->addrs[index].transport, rtt_us, ret);
- have_result |= afs_vl_probe_done(server);
- if (have_result) {
- server->probe.have_result = true;
- wake_up_var(&server->probe.have_result);
- wake_up_all(&server->probe_wq);
- }
+ afs_done_one_vl_probe(server, have_result);
}
/*
in_progress = true;
} else {
afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
+ afs_done_one_vl_probe(server, false);
}
}
- if (!in_progress)
- afs_vl_probe_done(server);
return in_progress;
}
{
struct wait_queue_entry *waits;
struct afs_vlserver *server;
- unsigned int rtt = UINT_MAX;
+ unsigned int rtt = UINT_MAX, rtt_s;
bool have_responders = false;
int pref = -1, i;
server = vllist->servers[i].server;
if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
__clear_bit(i, &untried);
- if (server->probe.responded)
+ if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)
have_responders = true;
}
}
for (i = 0; i < vllist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = vllist->servers[i].server;
- if (server->probe.responded)
+ if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)
goto stop;
if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
still_probing = true;
for (i = 0; i < vllist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = vllist->servers[i].server;
- if (server->probe.responded &&
- server->probe.rtt < rtt) {
+ rtt_s = READ_ONCE(server->rtt);
+ if (test_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags) &&
+ rtt_s < rtt) {
pref = i;
- rtt = server->probe.rtt;
+ rtt = rtt_s;
}
remove_wait_queue(&server->probe_wq, &waits[i]);
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
- if (!test_bit(i, &vc->untried) || !s->probe.responded)
+ if (!test_bit(i, &vc->untried) ||
+ !test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
continue;
if (s->probe.rtt < rtt) {
vc->index = i;
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
+ if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
+ e.responded = true;
afs_prioritise_error(&e, READ_ONCE(s->probe.error),
s->probe.abort_code);
}
+ error = e.error;
+
failed_set_error:
vc->error = error;
failed:
/* Extract the returned uuid, uniquifier, nentries and
* blkaddrs size */
- /* Fall through */
+ fallthrough;
case 1:
ret = afs_extract_data(call, true);
if (ret < 0)
count = min(call->count, 4U);
afs_extract_to_buf(call, count * sizeof(__be32));
- /* Fall through - and extract entries */
+ fallthrough; /* and extract entries */
case 2:
ret = afs_extract_data(call, call->count > 4);
if (ret < 0)
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through - and extract the capabilities word count */
+ fallthrough; /* and extract the capabilities word count */
case 1:
ret = afs_extract_data(call, true);
if (ret < 0)
call->unmarshall++;
afs_extract_discard(call, count * sizeof(__be32));
- /* Fall through - and extract capabilities words */
+ fallthrough; /* and extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
/* Extract the returned uuid, uniquifier, fsEndpoints count and
* either the first fsEndpoint type or the volEndpoints
* count if there are no fsEndpoints. */
- /* Fall through */
+ fallthrough;
case 1:
ret = afs_extract_data(call, true);
if (ret < 0)
afs_extract_to_buf(call, size);
call->unmarshall = 2;
- /* Fall through - and extract fsEndpoints[] entries */
+ fallthrough; /* and extract fsEndpoints[] entries */
case 2:
ret = afs_extract_data(call, true);
if (ret < 0)
* extract the type of the next endpoint when we extract the
* data of the current one, but this is the first...
*/
- /* Fall through */
+ fallthrough;
case 3:
ret = afs_extract_data(call, true);
if (ret < 0)
afs_extract_to_buf(call, size);
call->unmarshall = 4;
- /* Fall through - and extract volEndpoints[] entries */
+ fallthrough; /* and extract volEndpoints[] entries */
case 4:
ret = afs_extract_data(call, true);
if (ret < 0)
afs_extract_discard(call, 0);
call->unmarshall = 5;
- /* Fall through - Done */
+ fallthrough; /* Done */
case 5:
ret = afs_extract_data(call, false);
if (ret < 0)
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through - and extract the cell name length */
+ fallthrough; /* and extract the cell name length */
case 1:
ret = afs_extract_data(call, true);
if (ret < 0)
afs_extract_begin(call, cell_name, namesz);
call->unmarshall++;
- /* Fall through - and extract cell name */
+ fallthrough; /* and extract cell name */
case 2:
ret = afs_extract_data(call, true);
if (ret < 0)
afs_extract_discard(call, call->count2);
call->unmarshall++;
- /* Fall through - and extract padding */
+ fallthrough; /* and extract padding */
case 3:
ret = afs_extract_data(call, false);
if (ret < 0)
default:
pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
- /* Fall through */
+ fallthrough;
case -EACCES:
case -EPERM:
case -ENOKEY:
req->offset = req->pos & (PAGE_SIZE - 1);
afs_extract_to_tmp64(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the returned data length */
case 1:
call->bvec[0].bv_page = req->pages[req->index];
iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
- /* Fall through */
+ fallthrough;
/* extract the returned data */
case 2:
/* Discard any excess data the server gave us */
afs_extract_discard(call, req->actual_len - req->len);
call->unmarshall = 3;
- /* Fall through */
+ fallthrough;
case 3:
_debug("extract discard %zu/%llu",
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- /* Fall through */
+ fallthrough;
/* extract the metadata */
case 4:
req->file_size = vp->scb.status.size;
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 5:
break;
case 0:
call->unmarshall++;
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
- /* Fall through */
+ fallthrough;
/* extract the returned status record */
case 1:
xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs);
call->unmarshall++;
afs_extract_to_tmp(call);
- /* Fall through */
+ fallthrough;
/* extract the volume name length */
case 2:
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the volume name */
case 3:
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the offline message length */
case 4:
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the offline message */
case 5:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the message of the day length */
case 6:
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the message of the day */
case 7:
_debug("motd '%s'", p);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 8:
break;
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the file status count and array in two steps */
case 1:
call->unmarshall++;
more_counts:
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
- /* Fall through */
+ fallthrough;
case 2:
_debug("extract status array %u", call->count);
call->count = 0;
call->unmarshall++;
afs_extract_to_tmp(call);
- /* Fall through */
+ fallthrough;
/* Extract the callback count and array in two steps */
case 3:
call->unmarshall++;
more_cbs:
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
- /* Fall through */
+ fallthrough;
case 4:
_debug("extract CB array");
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 5:
ret = afs_extract_data(call, false);
xdr_decode_YFSVolSync(&bp, &op->volsync);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 6:
break;
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the file ACL length */
case 1:
afs_extract_discard(call, size);
}
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the file ACL */
case 2:
afs_extract_to_tmp(call);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the volume ACL length */
case 3:
afs_extract_discard(call, size);
}
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* Extract the volume ACL */
case 4:
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
call->unmarshall++;
- /* Fall through */
+ fallthrough;
/* extract the metadata */
case 5:
xdr_decode_YFSVolSync(&bp, &op->volsync);
call->unmarshall++;
- /* Fall through */
+ fallthrough;
case 6:
break;
* may be already running. Just fail this IO with EINTR.
*/
ret = -EINTR;
- /*FALLTHRU*/
+ fallthrough;
default:
req->ki_complete(req, ret, 0);
}
goto err;
}
- len = data_len + extra;
+ len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
realdatastart = vm_mmap(NULL, 0, len,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
vm_munmap(textpos, text_len);
goto err;
}
- datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
+ datapos = ALIGN(realdatastart +
+ MAX_SHARED_LIBS * sizeof(unsigned long),
+ FLAT_DATA_ALIGN);
pr_debug("Allocated data+bss+stack (%u bytes): %lx\n",
data_len + bss_len + stack_len, datapos);
memp_size = len;
} else {
- len = text_len + data_len + extra;
+ len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32);
len = PAGE_ALIGN(len);
textpos = vm_mmap(NULL, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
}
realdatastart = textpos + ntohl(hdr->data_start);
- datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
+ datapos = ALIGN(realdatastart +
+ MAX_SHARED_LIBS * sizeof(u32),
+ FLAT_DATA_ALIGN);
reloc = (__be32 __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
(text_len + full_data
- sizeof(struct flat_hdr)),
0);
- if (datapos != realdatastart)
- memmove((void *)datapos, (void *)realdatastart,
- full_data);
+ memmove((void *) datapos, (void *) realdatastart,
+ full_data);
#else
/*
* This is used on MMU systems mainly for testing.
if (IS_ERR_VALUE(result)) {
ret = result;
pr_err("Unable to read code+data+bss, errno %d\n", ret);
- vm_munmap(textpos, text_len + data_len + extra);
+ vm_munmap(textpos, text_len + data_len + extra +
+ MAX_SHARED_LIBS * sizeof(u32));
goto err;
}
}
cache->fs_info = fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
- set_free_space_tree_thresholds(cache);
cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
if (ret < 0)
goto error;
+ set_free_space_tree_thresholds(cache);
+
if (need_clear) {
/*
* When we mount with old space cache, we need to
return -ENOMEM;
cache->length = size;
+ set_free_space_tree_thresholds(cache);
cache->used = bytes_used;
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1;
btrfs_csums[csum_type].name;
}
-size_t __const btrfs_get_num_csums(void)
+size_t __attribute_const__ btrfs_get_num_csums(void)
{
return ARRAY_SIZE(btrfs_csums);
}
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
+ eb_rewin, btrfs_header_level(eb_rewin));
btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
if (!eb)
return NULL;
- btrfs_tree_read_lock(eb);
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
+ btrfs_header_level(eb));
+ btrfs_tree_read_lock(eb);
if (tm)
__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else
int btrfs_super_csum_size(const struct btrfs_super_block *s);
const char *btrfs_super_csum_name(u16 csum_type);
const char *btrfs_super_csum_driver(u16 csum_type);
-size_t __const btrfs_get_num_csums(void);
+size_t __attribute_const__ btrfs_get_num_csums(void);
/*
u64 bytenr, u64 num_bytes);
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_root *root,
- u64 objectid, u64 offset, u64 bytenr);
+ u64 objectid, u64 offset, u64 bytenr, bool strict);
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 parent, u64 root_objectid,
u64 start, u64 len);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
- u64 *ram_bytes);
+ u64 *ram_bytes, bool strict);
void __btrfs_del_delalloc_inode(struct btrfs_root *root,
struct btrfs_inode *inode);
cache->io_ctl.inode = NULL;
iput(inode);
}
+ ASSERT(cache->io_ctl.pages == NULL);
btrfs_put_block_group(cache);
}
static noinline int check_committed_ref(struct btrfs_root *root,
struct btrfs_path *path,
- u64 objectid, u64 offset, u64 bytenr)
+ u64 objectid, u64 offset, u64 bytenr,
+ bool strict)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
goto out;
- /* If extent created before last snapshot => it's definitely shared */
- if (btrfs_extent_generation(leaf, ei) <=
- btrfs_root_last_snapshot(&root->root_item))
+ /*
+ * If extent created before last snapshot => it's shared unless the
+ * snapshot has been deleted. Use the heuristic if strict is false.
+ */
+ if (!strict &&
+ (btrfs_extent_generation(leaf, ei) <=
+ btrfs_root_last_snapshot(&root->root_item)))
goto out;
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
}
int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
- u64 bytenr)
+ u64 bytenr, bool strict)
{
struct btrfs_path *path;
int ret;
do {
ret = check_committed_ref(root, path, objectid,
- offset, bytenr);
+ offset, bytenr, strict);
if (ret && ret != -ENOENT)
goto out;
return ERR_PTR(-EUCLEAN);
}
- btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
+ btrfs_set_buffer_lockdep_class(owner, buf, level);
btrfs_tree_lock(buf);
btrfs_clean_tree_block(buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
}
}
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
- void __user *dstv,
- unsigned long start, unsigned long len)
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+ void __user *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
- if (copy_to_user(dst, kaddr + offset, cur)) {
+ if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
ret = -EFAULT;
break;
}
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
unsigned long start,
unsigned long len);
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
- void __user *dst, unsigned long start,
- unsigned long len);
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+ void __user *dst, unsigned long start,
+ unsigned long len);
void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
const void *src);
}
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, false);
if (ret <= 0) {
ret = 0;
if (!nowait)
ret = update_cache_item(trans, root, inode, path, offset,
io_ctl->entries, io_ctl->bitmaps);
out:
- io_ctl_free(io_ctl);
if (ret) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
* them out later
*/
io_ctl_drop_pages(io_ctl);
+ io_ctl_free(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state);
size_t bitmap_size;
u64 num_bitmaps, total_bitmap_size;
+ if (WARN_ON(cache->length == 0))
+ btrfs_warn(cache->fs_info, "block group %llu length is zero",
+ cache->start);
+
/*
* We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps.
goto out_check;
ret = btrfs_cross_ref_exist(root, ino,
found_key.offset -
- extent_offset, disk_bytenr);
+ extent_offset, disk_bytenr, false);
if (ret) {
/*
* ret could be -EIO if the above fails to read
u64 bio_offset)
{
struct inode *inode = private_data;
- blk_status_t ret = 0;
- ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
- BUG_ON(ret); /* -ENOMEM */
- return 0;
+ return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
}
/*
* @orig_start: (optional) Return the original file offset of the file extent
* @orig_len: (optional) Return the original on-disk length of the file extent
* @ram_bytes: (optional) Return the ram_bytes of the file extent
+ * @strict: if true, omit optimizations that might force us into unnecessary
+ * cow. e.g., don't trust generation number.
*
* This function will flush ordered extents in the range to ensure proper
* nocow checks for (nowait == false) case.
*/
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
- u64 *ram_bytes)
+ u64 *ram_bytes, bool strict)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_path *path;
* Do the same check as in btrfs_cross_ref_exist but without the
* unnecessary search.
*/
- if (btrfs_file_extent_generation(leaf, fi) <=
- btrfs_root_last_snapshot(&root->root_item))
+ if (!strict &&
+ (btrfs_file_extent_generation(leaf, fi) <=
+ btrfs_root_last_snapshot(&root->root_item)))
goto out;
backref_offset = btrfs_file_extent_offset(leaf, fi);
*/
ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
- key.offset - backref_offset, disk_bytenr);
+ key.offset - backref_offset, disk_bytenr,
+ strict);
if (ret) {
ret = 0;
goto out;
block_start = em->block_start + (start - em->start);
if (can_nocow_extent(inode, start, &len, &orig_start,
- &orig_block_len, &ram_bytes) == 1 &&
+ &orig_block_len, &ram_bytes, false) == 1 &&
btrfs_inc_nocow_writers(fs_info, block_start)) {
struct extent_map *em2;
struct bio *bio, u64 offset)
{
struct inode *inode = private_data;
- blk_status_t ret;
- ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1);
- BUG_ON(ret); /* -ENOMEM */
- return 0;
+
+ return btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1);
}
static void btrfs_end_dio_bio(struct bio *bio)
free_extent_map(em);
em = NULL;
- ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
+ ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
if (ret < 0) {
goto out;
} else if (ret) {
sh.len = item_len;
sh.transid = found_transid;
- /* copy search result header */
- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
- ret = -EFAULT;
+ /*
+ * Copy search result header. If we fault then loop again so we
+ * can fault in the pages and -EFAULT there if there's a
+ * problem. Otherwise we'll fault and then copy the buffer in
+ * properly this next time through
+ */
+ if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
+ ret = 0;
goto out;
}
if (item_len) {
char __user *up = ubuf + *sk_offset;
- /* copy the item */
- if (read_extent_buffer_to_user(leaf, up,
- item_off, item_len)) {
- ret = -EFAULT;
+ /*
+ * Copy the item, same behavior as above, but reset the
+ * * sk_offset so we copy the full thing again.
+ */
+ if (read_extent_buffer_to_user_nofault(leaf, up,
+ item_off, item_len)) {
+ ret = 0;
+ *sk_offset -= sizeof(sh);
goto out;
}
key.offset = sk->min_offset;
while (1) {
+ ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
+ if (ret)
+ break;
+
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
if (ret != 0) {
if (ret > 0)
return 0;
}
+static void scrub_workers_put(struct btrfs_fs_info *fs_info)
+{
+ if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
+ &fs_info->scrub_lock)) {
+ struct btrfs_workqueue *scrub_workers = NULL;
+ struct btrfs_workqueue *scrub_wr_comp = NULL;
+ struct btrfs_workqueue *scrub_parity = NULL;
+
+ scrub_workers = fs_info->scrub_workers;
+ scrub_wr_comp = fs_info->scrub_wr_completion_workers;
+ scrub_parity = fs_info->scrub_parity_workers;
+
+ fs_info->scrub_workers = NULL;
+ fs_info->scrub_wr_completion_workers = NULL;
+ fs_info->scrub_parity_workers = NULL;
+ mutex_unlock(&fs_info->scrub_lock);
+
+ btrfs_destroy_workqueue(scrub_workers);
+ btrfs_destroy_workqueue(scrub_wr_comp);
+ btrfs_destroy_workqueue(scrub_parity);
+ }
+}
+
/*
* get a reference count on fs_info->scrub_workers. start worker if necessary
*/
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
int is_dev_replace)
{
+ struct btrfs_workqueue *scrub_workers = NULL;
+ struct btrfs_workqueue *scrub_wr_comp = NULL;
+ struct btrfs_workqueue *scrub_parity = NULL;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
+ int ret = -ENOMEM;
- lockdep_assert_held(&fs_info->scrub_lock);
+ if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
+ return 0;
- if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
- ASSERT(fs_info->scrub_workers == NULL);
- fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
- flags, is_dev_replace ? 1 : max_active, 4);
- if (!fs_info->scrub_workers)
- goto fail_scrub_workers;
-
- ASSERT(fs_info->scrub_wr_completion_workers == NULL);
- fs_info->scrub_wr_completion_workers =
- btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
- max_active, 2);
- if (!fs_info->scrub_wr_completion_workers)
- goto fail_scrub_wr_completion_workers;
+ scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
+ is_dev_replace ? 1 : max_active, 4);
+ if (!scrub_workers)
+ goto fail_scrub_workers;
- ASSERT(fs_info->scrub_parity_workers == NULL);
- fs_info->scrub_parity_workers =
- btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
+ scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
- if (!fs_info->scrub_parity_workers)
- goto fail_scrub_parity_workers;
+ if (!scrub_wr_comp)
+ goto fail_scrub_wr_completion_workers;
+ scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
+ max_active, 2);
+ if (!scrub_parity)
+ goto fail_scrub_parity_workers;
+
+ mutex_lock(&fs_info->scrub_lock);
+ if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
+ ASSERT(fs_info->scrub_workers == NULL &&
+ fs_info->scrub_wr_completion_workers == NULL &&
+ fs_info->scrub_parity_workers == NULL);
+ fs_info->scrub_workers = scrub_workers;
+ fs_info->scrub_wr_completion_workers = scrub_wr_comp;
+ fs_info->scrub_parity_workers = scrub_parity;
refcount_set(&fs_info->scrub_workers_refcnt, 1);
- } else {
- refcount_inc(&fs_info->scrub_workers_refcnt);
+ mutex_unlock(&fs_info->scrub_lock);
+ return 0;
}
- return 0;
+ /* Other thread raced in and created the workers for us */
+ refcount_inc(&fs_info->scrub_workers_refcnt);
+ mutex_unlock(&fs_info->scrub_lock);
+ ret = 0;
+ btrfs_destroy_workqueue(scrub_parity);
fail_scrub_parity_workers:
- btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
+ btrfs_destroy_workqueue(scrub_wr_comp);
fail_scrub_wr_completion_workers:
- btrfs_destroy_workqueue(fs_info->scrub_workers);
+ btrfs_destroy_workqueue(scrub_workers);
fail_scrub_workers:
- return -ENOMEM;
+ return ret;
}
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
int ret;
struct btrfs_device *dev;
unsigned int nofs_flag;
- struct btrfs_workqueue *scrub_workers = NULL;
- struct btrfs_workqueue *scrub_wr_comp = NULL;
- struct btrfs_workqueue *scrub_parity = NULL;
if (btrfs_fs_closing(fs_info))
return -EAGAIN;
if (IS_ERR(sctx))
return PTR_ERR(sctx);
+ ret = scrub_workers_get(fs_info, is_dev_replace);
+ if (ret)
+ goto out_free_ctx;
+
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -ENODEV;
- goto out_free_ctx;
+ goto out;
}
if (!is_dev_replace && !readonly &&
btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
rcu_str_deref(dev->name));
ret = -EROFS;
- goto out_free_ctx;
+ goto out;
}
mutex_lock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EIO;
- goto out_free_ctx;
+ goto out;
}
down_read(&fs_info->dev_replace.rwsem);
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EINPROGRESS;
- goto out_free_ctx;
+ goto out;
}
up_read(&fs_info->dev_replace.rwsem);
- ret = scrub_workers_get(fs_info, is_dev_replace);
- if (ret) {
- mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- goto out_free_ctx;
- }
-
sctx->readonly = readonly;
dev->scrub_ctx = sctx;
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_lock(&fs_info->scrub_lock);
dev->scrub_ctx = NULL;
- if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
- scrub_workers = fs_info->scrub_workers;
- scrub_wr_comp = fs_info->scrub_wr_completion_workers;
- scrub_parity = fs_info->scrub_parity_workers;
-
- fs_info->scrub_workers = NULL;
- fs_info->scrub_wr_completion_workers = NULL;
- fs_info->scrub_parity_workers = NULL;
- }
mutex_unlock(&fs_info->scrub_lock);
- btrfs_destroy_workqueue(scrub_workers);
- btrfs_destroy_workqueue(scrub_wr_comp);
- btrfs_destroy_workqueue(scrub_parity);
+ scrub_workers_put(fs_info);
scrub_put_ctx(sctx);
return ret;
-
+out:
+ scrub_workers_put(fs_info);
out_free_ctx:
scrub_free_ctx(sctx);
} else if (strncmp(args[0].from, "lzo", 3) == 0) {
compress_type = "lzo";
info->compress_type = BTRFS_COMPRESS_LZO;
+ info->compress_level = 0;
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
inode_item_err(leaf, slot,
- "invalid inode generation: has %llu expect [0, %llu]",
+ "invalid inode transid: has %llu expect [0, %llu]",
btrfs_inode_transid(leaf, iitem), super_gen + 1);
return -EUCLEAN;
}
btrfs_free_path(path);
out_unlock:
mutex_unlock(&dir->log_mutex);
- if (ret == -ENOSPC) {
+ if (err == -ENOSPC) {
btrfs_set_log_full_commit(trans);
- ret = 0;
- } else if (ret < 0)
- btrfs_abort_transaction(trans, ret);
+ err = 0;
+ } else if (err < 0 && err != -ENOENT) {
+ /* ENOENT can be returned if the entry hasn't been fsynced yet */
+ btrfs_abort_transaction(trans, err);
+ }
btrfs_end_log_trans(root);
goto skip;
}
update_tree:
+ btrfs_release_path(path);
if (!btrfs_is_empty_uuid(root_item.uuid)) {
ret = btrfs_uuid_tree_add(trans, root_item.uuid,
BTRFS_UUID_KEY_SUBVOL,
}
skip:
+ btrfs_release_path(path);
if (trans) {
ret = btrfs_end_transaction(trans);
trans = NULL;
break;
}
- btrfs_release_path(path);
if (key.offset < (u64)-1) {
key.offset++;
} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
*/
set_buffer_new(bh);
set_buffer_unwritten(bh);
- /* FALLTHRU */
+ fallthrough;
case IOMAP_MAPPED:
if ((iomap->flags & IOMAP_F_NEW) ||
offset >= i_size_read(inode))
WARN_ON(atomic_read(&bh->b_count) < 1);
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
+ /*
+ * The bh should be mapped, but it might not be if the
+ * device was hot-removed. Not much we can do but fail the I/O.
+ */
+ if (!buffer_mapped(bh)) {
+ unlock_buffer(bh);
+ return -EIO;
+ }
+
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
int have = ci->i_snap_caps;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%lx snap issued %s"
- " (mask %s)\n", ci->vfs_inode.i_ino,
+ dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s"
+ " (mask %s)\n", ceph_ino(&ci->vfs_inode),
ceph_cap_string(have),
ceph_cap_string(mask));
return 1;
if (!__cap_is_valid(cap))
continue;
if ((cap->issued & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%lx cap %p issued %s"
- " (mask %s)\n", ci->vfs_inode.i_ino, cap,
+ dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s"
+ " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap,
ceph_cap_string(cap->issued),
ceph_cap_string(mask));
if (touch)
/* does a combination of caps satisfy mask? */
have |= cap->issued;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask ino 0x%lx combo issued %s"
- " (mask %s)\n", ci->vfs_inode.i_ino,
+ dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s"
+ " (mask %s)\n", ceph_ino(&ci->vfs_inode),
ceph_cap_string(cap->issued),
ceph_cap_string(mask));
if (touch) {
struct cap_wait cw;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- cw.ino = inode->i_ino;
+ cw.ino = ceph_ino(inode);
cw.tgid = current->tgid;
cw.need = need;
cw.want = want;
{
struct seq_file *s = p;
- seq_printf(s, "0x%-17lx%-17s%-17s\n", inode->i_ino,
+ seq_printf(s, "0x%-17llx%-17s%-17s\n", ceph_ino(inode),
ceph_cap_string(cap->issued),
ceph_cap_string(cap->implemented));
return 0;
spin_lock(&mdsc->caps_list_lock);
list_for_each_entry(cw, &mdsc->cap_wait_list, list) {
- seq_printf(s, "%-13d0x%-17lx%-17s%-17s\n", cw->tgid, cw->ino,
+ seq_printf(s, "%-13d0x%-17llx%-17s%-17s\n", cw->tgid, cw->ino,
ceph_cap_string(cw->need),
ceph_cap_string(cw->want));
}
dentry, dentry, d_inode(dentry));
ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
- dentry->d_name.len,
- ceph_translate_ino(dentry->d_sb,
- d_inode(dentry)->i_ino),
+ dentry->d_name.len, ceph_present_inode(d_inode(dentry)),
d_inode(dentry)->i_mode >> 12)) {
dput(dentry);
err = 0;
/* always start with . and .. */
if (ctx->pos == 0) {
dout("readdir off 0 -> '.'\n");
- if (!dir_emit(ctx, ".", 1,
- ceph_translate_ino(inode->i_sb, inode->i_ino),
+ if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode),
inode->i_mode >> 12))
return 0;
ctx->pos = 1;
}
if (ctx->pos == 1) {
- ino_t ino = parent_ino(file->f_path.dentry);
+ u64 ino;
+ struct dentry *dentry = file->f_path.dentry;
+
+ spin_lock(&dentry->d_lock);
+ ino = ceph_present_inode(dentry->d_parent->d_inode);
+ spin_unlock(&dentry->d_lock);
+
dout("readdir off 1 -> '..'\n");
- if (!dir_emit(ctx, "..", 2,
- ceph_translate_ino(inode->i_sb, ino),
- inode->i_mode >> 12))
+ if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12))
return 0;
ctx->pos = 2;
}
}
for (; i < rinfo->dir_nr; i++) {
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
- struct ceph_vino vino;
- ino_t ino;
- u32 ftype;
BUG_ON(rde->offset < ctx->pos);
rde->name_len, rde->name, &rde->inode.in);
BUG_ON(!rde->inode.in);
- ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
- vino.ino = le64_to_cpu(rde->inode.in->ino);
- vino.snap = le64_to_cpu(rde->inode.in->snapid);
- ino = ceph_vino_to_ino(vino);
if (!dir_emit(ctx, rde->name, rde->name_len,
- ceph_translate_ino(inode->i_sb, ino), ftype)) {
+ ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
+ le32_to_cpu(rde->inode.in->mode) >> 12)) {
dout("filldir stopping us...\n");
return 0;
}
if (try_async && op == CEPH_MDS_OP_UNLINK &&
(req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
- dout("async unlink on %lu/%.*s caps=%s", dir->i_ino,
+ dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
dentry->d_name.len, dentry->d_name.name,
ceph_cap_string(req->r_dir_caps));
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
case -ENOENT:
if (d_really_is_negative(dentry))
valid = 1;
- /* Fallthrough */
+ fallthrough;
default:
break;
}
case S_IFREG:
ceph_fscache_register_inode_cookie(inode);
ceph_fscache_file_set_cookie(inode, file);
- /* fall through */
+ fallthrough;
case S_IFDIR:
ret = ceph_init_file_info(inode, file, fmode,
S_ISDIR(inode->i_mode));
} else {
struct dentry *dn;
- dout("%s d_adding new inode 0x%llx to 0x%lx/%s\n", __func__,
- vino.ino, dir->i_ino, dentry->d_name.name);
+ dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
+ vino.ino, ceph_ino(dir), dentry->d_name.name);
ceph_dir_clear_ordered(dir);
ceph_init_inode_acls(inode, as_ctx);
if (inode->i_state & I_NEW) {
.mmap = ceph_mmap,
.fsync = ceph_fsync,
.lock = ceph_lock,
+ .setlease = simple_nosetlease,
.flock = ceph_flock,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
*/
static int ceph_set_ino_cb(struct inode *inode, void *data)
{
- ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
- inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ ci->i_vino = *(struct ceph_vino *)data;
+ inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
inode_set_iversion_raw(inode, 0);
return 0;
}
struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
{
struct inode *inode;
- ino_t t = ceph_vino_to_ino(vino);
- inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
+ inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
+ ceph_set_ino_cb, &vino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW)
- dout("get_inode created new inode %p %llx.%llx ino %llx\n",
- inode, ceph_vinop(inode), (u64)inode->i_ino);
- dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
- vino.snap, inode);
+ dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
+ ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
return inode;
}
}
generic_fillattr(inode, stat);
- stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
+ stat->ino = ceph_present_inode(inode);
/*
* btime on newly-allocated inodes is 0, so if this is still set to
struct cap_wait {
struct list_head list;
- unsigned long ino;
+ u64 ino;
pid_t tgid;
int need;
int want;
{
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct super_block *sb = mdsc->fsc->sb;
+ struct inode *root = d_inode(sb->s_root);
if (atomic64_read(&mdsc->quotarealms_count) > 0)
return true;
/* if root is the real CephFS root, we don't have quota realms */
- if (sb->s_root->d_inode &&
- (sb->s_root->d_inode->i_ino == CEPH_INO_ROOT))
+ if (root && ceph_ino(root) == CEPH_INO_ROOT)
return false;
/* otherwise, we can't know for sure */
return true;
return ceph_inode(inode)->i_vino;
}
-/*
- * ino_t is <64 bits on many architectures, blech.
- *
- * i_ino (kernel inode) st_ino (userspace)
- * i386 32 32
- * x86_64+ino32 64 32
- * x86_64 64 64
- */
-static inline u32 ceph_ino_to_ino32(__u64 vino)
+static inline u32 ceph_ino_to_ino32(u64 vino)
{
u32 ino = vino & 0xffffffff;
ino ^= vino >> 32;
}
/*
- * kernel i_ino value
+ * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on
+ * some arches. We generally do not use this value inside the ceph driver, but
+ * we do want to set it to something, so that generic vfs code has an
+ * appropriate value for tracepoints and the like.
*/
-static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
+static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino)
{
-#if BITS_PER_LONG == 32
- return ceph_ino_to_ino32(vino.ino);
-#else
+ if (sizeof(ino_t) == sizeof(u32))
+ return ceph_ino_to_ino32(vino.ino);
return (ino_t)vino.ino;
-#endif
-}
-
-/*
- * user-visible ino (stat, filldir)
- */
-#if BITS_PER_LONG == 32
-static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
-{
- return ino;
-}
-#else
-static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
-{
- if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))
- ino = ceph_ino_to_ino32(ino);
- return ino;
}
-#endif
-
/* for printf-style formatting */
#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
{
return ceph_inode(inode)->i_vino.ino;
}
+
static inline u64 ceph_snap(struct inode *inode)
{
return ceph_inode(inode)->i_vino.snap;
}
+/**
+ * ceph_present_ino - format an inode number for presentation to userland
+ * @sb: superblock where the inode lives
+ * @ino: inode number to (possibly) convert
+ *
+ * If the user mounted with the ino32 option, then the 64-bit value needs
+ * to be converted to something that can fit inside 32 bits. Note that
+ * internal kernel code never uses this value, so this is entirely for
+ * userland consumption.
+ */
+static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
+{
+ if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
+ return ceph_ino_to_ino32(ino);
+ return ino;
+}
+
+static inline u64 ceph_present_inode(struct inode *inode)
+{
+ return ceph_present_ino(inode->i_sb, ceph_ino(inode));
+}
+
static inline int ceph_ino_compare(struct inode *inode, void *data)
{
struct ceph_vino *pvino = (struct ceph_vino *)data;
ci->i_vino.snap == pvino->snap;
}
+
static inline struct inode *ceph_find_inode(struct super_block *sb,
struct ceph_vino vino)
{
- ino_t t = ceph_vino_to_ino(vino);
- return ilookup5(sb, t, ceph_ino_compare, &vino);
+ /*
+ * NB: The hashval will be run through the fs/inode.c hash function
+ * anyway, so there is no need to squash the inode number down to
+ * 32-bits first. Just use low-order bits on arches with 32-bit long.
+ */
+ return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino);
}
return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0;
}
+static inline bool is_tcon_dfs(struct cifs_tcon *tcon)
+{
+ /*
+ * For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS
+ * Subsystem Notifies That a Share Is a DFS Share.
+ *
+ * For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server
+ * Application Updates a Share.
+ */
+ if (!tcon || !tcon->ses || !tcon->ses->server)
+ return false;
+ return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS :
+ tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT);
+}
+
#endif /* _CIFS_GLOB_H */
if (global_secflags &
(CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP))
return true;
- /* Fallthrough */
+ fallthrough;
default:
return false;
}
return 1;
case Opt_sec_krb5i:
vol->sign = true;
- /* Fallthrough */
+ fallthrough;
case Opt_sec_krb5:
vol->sectype = Kerberos;
break;
case Opt_sec_ntlmsspi:
vol->sign = true;
- /* Fallthrough */
+ fallthrough;
case Opt_sec_ntlmssp:
vol->sectype = RawNTLMSSP;
break;
case Opt_sec_ntlmi:
vol->sign = true;
- /* Fallthrough */
+ fallthrough;
case Opt_ntlm:
vol->sectype = NTLM;
break;
case Opt_sec_ntlmv2i:
vol->sign = true;
- /* Fallthrough */
+ fallthrough;
case Opt_sec_ntlmv2:
vol->sectype = NTLMv2;
break;
vol->password = NULL;
break;
}
- /* Fallthrough - to Opt_pass below.*/
+ fallthrough; /* to Opt_pass below */
case Opt_pass:
/* Obtain the value string */
value = strchr(data, '=');
if (!tcon)
continue;
/* Make sure that requests go through new root servers */
- if (tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT)) {
+ if (is_tcon_dfs(tcon)) {
put_root_ses(root_ses);
set_root_ses(cifs_sb, ses, &root_ses);
}
if ((server->sec_kerberos || server->sec_mskerberos) &&
(global_secflags & CIFSSEC_MAY_KRB5))
return Kerberos;
- /* Fallthrough */
+ fallthrough;
default:
return Unspecified;
}
default:
break;
}
- /* Fallthrough - to attempt LANMAN authentication next */
+ fallthrough; /* to attempt LANMAN authentication next */
case CIFS_NEGFLAVOR_LANMAN:
switch (requested) {
case LANMAN:
case Unspecified:
if (global_secflags & CIFSSEC_MAY_LANMAN)
return LANMAN;
- /* Fallthrough */
+ fallthrough;
default:
return Unspecified;
}
if ((server->sec_kerberos || server->sec_mskerberos) &&
(global_secflags & CIFSSEC_MAY_KRB5))
return Kerberos;
- /* Fallthrough */
+ fallthrough;
default:
return Unspecified;
}
switch (whence) {
case 1:
offset += file->f_pos;
- /* fall through */
+ fallthrough;
case 0:
if (offset >= 0)
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
ret = dax_load_hole(&xas, mapping, &entry, vmf);
goto finish_iomap;
}
- /*FALLTHRU*/
+ fallthrough;
default:
WARN_ON_ONCE(1);
error = -EIO;
break;
case -EAGAIN:
error = 0;
- /* fall through */
+ fallthrough;
default:
__put_lkb(ls, lkb);
goto out;
return z_erofs_extent_lookback(m, m->delta[0]);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
map->m_flags &= ~EROFS_MAP_ZIPPED;
- /* fallthrough */
+ fallthrough;
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
map->m_la = (lcn << lclusterbits) | m->clusterofs;
break;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
if (endoff >= m.clusterofs)
map->m_flags &= ~EROFS_MAP_ZIPPED;
- /* fallthrough */
+ fallthrough;
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
if (endoff >= m.clusterofs) {
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
end = (m.lcn << lclusterbits) | m.clusterofs;
map->m_flags |= EROFS_MAP_FULL_MAPPED;
m.delta[0] = 1;
- /* fallthrough */
+ fallthrough;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */
err = z_erofs_extent_lookback(&m, m.delta[0]);
* not already there, and calling reverse_path_check()
* during ep_insert().
*/
- if (list_empty(&epi->ffd.file->f_tfile_llink))
- list_add(&epi->ffd.file->f_tfile_llink,
- &tfile_check_list);
+ if (list_empty(&epi->ffd.file->f_tfile_llink)) {
+ if (get_file_rcu(epi->ffd.file))
+ list_add(&epi->ffd.file->f_tfile_llink,
+ &tfile_check_list);
+ }
}
}
mutex_unlock(&ep->mtx);
file = list_first_entry(&tfile_check_list, struct file,
f_tfile_llink);
list_del_init(&file->f_tfile_llink);
+ fput(file);
}
INIT_LIST_HEAD(&tfile_check_list);
}
full_check = 1;
if (is_file_epoll(tf.file)) {
error = -ELOOP;
- if (ep_loop_check(ep, tf.file) != 0) {
- clear_tfile_check_list();
+ if (ep_loop_check(ep, tf.file) != 0)
goto error_tgt_fput;
- }
- } else
+ } else {
+ get_file(tf.file);
list_add(&tf.file->f_tfile_llink,
&tfile_check_list);
+ }
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
- if (error) {
-out_del:
- list_del(&tf.file->f_tfile_llink);
+ if (error)
goto error_tgt_fput;
- }
if (is_file_epoll(tf.file)) {
tep = tf.file->private_data;
error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
if (error) {
mutex_unlock(&ep->mtx);
- goto out_del;
+ goto error_tgt_fput;
}
}
}
error = ep_insert(ep, epds, tf.file, fd, full_check);
} else
error = -EEXIST;
- if (full_check)
- clear_tfile_check_list();
break;
case EPOLL_CTL_DEL:
if (epi)
mutex_unlock(&ep->mtx);
error_tgt_fput:
- if (full_check)
+ if (full_check) {
+ clear_tfile_check_list();
mutex_unlock(&epmutex);
+ }
fdput(tf);
error_fput:
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
vm_fault_t ret;
+ bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
- if (vmf->flags & FAULT_FLAG_WRITE) {
+ if (write) {
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
}
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
up_read(&ei->dax_sem);
- if (vmf->flags & FAULT_FLAG_WRITE)
+ if (write)
sb_end_pagefault(inode->i_sb);
return ret;
}
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 1);
}
- /* fall through */
+ fallthrough;
case EXT2_IND_BLOCK:
nr = i_data[EXT2_DIND_BLOCK];
if (nr) {
mark_inode_dirty(inode);
ext2_free_branches(inode, &nr, &nr+1, 2);
}
- /* fall through */
+ fallthrough;
case EXT2_DIND_BLOCK:
nr = i_data[EXT2_TIND_BLOCK];
if (nr) {
case Opt_xip:
ext2_msg(sb, KERN_INFO, "use dax instead of xip");
set_opt(opts->s_mount_opt, XIP);
- /* Fall through */
+ fallthrough;
case Opt_dax:
#ifdef CONFIG_FS_DAX
ext2_msg(sb, KERN_WARNING,
This builds the ext4 KUnit tests.
KUnit tests run during boot and output the results to the debug log
- in TAP format (http://testanything.org/). Only useful for kernel devs
+ in TAP format (https://testanything.org/). Only useful for kernel devs
running KUnit test harness and are not for inclusion into a production
build.
* Return buffer_head on success or an ERR_PTR in case of failure.
*/
struct buffer_head *
-ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
+ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
+ bool ignore_locked)
{
struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
return ERR_PTR(-ENOMEM);
}
+ if (ignore_locked && buffer_locked(bh)) {
+ /* buffer under IO already, return if called for prefetching */
+ put_bh(bh);
+ return NULL;
+ }
+
if (bitmap_uptodate(bh))
goto verify;
* submit the buffer_head for reading
*/
set_buffer_new(bh);
- trace_ext4_read_block_bitmap_load(sb, block_group);
+ trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
bh->b_end_io = ext4_end_bitmap_read;
get_bh(bh);
- submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO |
+ (ignore_locked ? REQ_RAHEAD : 0), bh);
return bh;
verify:
err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
struct buffer_head *bh;
int err;
- bh = ext4_read_block_bitmap_nowait(sb, block_group);
+ bh = ext4_read_block_bitmap_nowait(sb, block_group, false);
if (IS_ERR(bh))
return bh;
err = ext4_wait_block_bitmap(sb, block_group, bh);
struct rb_node node;
ext4_fsblk_t start_blk;
unsigned int count;
+ u32 ino;
};
static struct kmem_cache *ext4_system_zone_cachep;
static inline int can_merge(struct ext4_system_zone *entry1,
struct ext4_system_zone *entry2)
{
- if ((entry1->start_blk + entry1->count) == entry2->start_blk)
+ if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
+ entry1->ino == entry2->ino)
return 1;
return 0;
}
*/
static int add_system_zone(struct ext4_system_blocks *system_blks,
ext4_fsblk_t start_blk,
- unsigned int count)
+ unsigned int count, u32 ino)
{
- struct ext4_system_zone *new_entry = NULL, *entry;
+ struct ext4_system_zone *new_entry, *entry;
struct rb_node **n = &system_blks->root.rb_node, *node;
struct rb_node *parent = NULL, *new_node = NULL;
n = &(*n)->rb_left;
else if (start_blk >= (entry->start_blk + entry->count))
n = &(*n)->rb_right;
- else {
- if (start_blk + count > (entry->start_blk +
- entry->count))
- entry->count = (start_blk + count -
- entry->start_blk);
- new_node = *n;
- new_entry = rb_entry(new_node, struct ext4_system_zone,
- node);
- break;
- }
+ else /* Unexpected overlap of system zones. */
+ return -EFSCORRUPTED;
}
- if (!new_entry) {
- new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
- GFP_KERNEL);
- if (!new_entry)
- return -ENOMEM;
- new_entry->start_blk = start_blk;
- new_entry->count = count;
- new_node = &new_entry->node;
-
- rb_link_node(new_node, parent, n);
- rb_insert_color(new_node, &system_blks->root);
- }
+ new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
+ GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ new_entry->start_blk = start_blk;
+ new_entry->count = count;
+ new_entry->ino = ino;
+ new_node = &new_entry->node;
+
+ rb_link_node(new_node, parent, n);
+ rb_insert_color(new_node, &system_blks->root);
/* Can we merge to the left? */
node = rb_prev(new_node);
printk(KERN_CONT "\n");
}
-/*
- * Returns 1 if the passed-in block region (start_blk,
- * start_blk+count) is valid; 0 if some part of the block region
- * overlaps with filesystem metadata blocks.
- */
-static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
- struct ext4_system_blocks *system_blks,
- ext4_fsblk_t start_blk,
- unsigned int count)
-{
- struct ext4_system_zone *entry;
- struct rb_node *n;
-
- if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
- (start_blk + count < start_blk) ||
- (start_blk + count > ext4_blocks_count(sbi->s_es)))
- return 0;
-
- if (system_blks == NULL)
- return 1;
-
- n = system_blks->root.rb_node;
- while (n) {
- entry = rb_entry(n, struct ext4_system_zone, node);
- if (start_blk + count - 1 < entry->start_blk)
- n = n->rb_left;
- else if (start_blk >= (entry->start_blk + entry->count))
- n = n->rb_right;
- else
- return 0;
- }
- return 1;
-}
-
static int ext4_protect_reserved_inode(struct super_block *sb,
struct ext4_system_blocks *system_blks,
u32 ino)
if (n == 0) {
i++;
} else {
- if (!ext4_data_block_valid_rcu(sbi, system_blks,
- map.m_pblk, n)) {
- err = -EFSCORRUPTED;
- __ext4_error(sb, __func__, __LINE__, -err,
- map.m_pblk, "blocks %llu-%llu "
- "from inode %u overlap system zone",
- map.m_pblk,
- map.m_pblk + map.m_len - 1, ino);
+ err = add_system_zone(system_blks, map.m_pblk, n, ino);
+ if (err < 0) {
+ if (err == -EFSCORRUPTED) {
+ __ext4_error(sb, __func__, __LINE__,
+ -err, map.m_pblk,
+ "blocks %llu-%llu from inode %u overlap system zone",
+ map.m_pblk,
+ map.m_pblk + map.m_len - 1,
+ ino);
+ }
break;
}
- err = add_system_zone(system_blks, map.m_pblk, n);
- if (err < 0)
- break;
i += n;
}
}
int flex_size = ext4_flex_bg_size(sbi);
int ret;
- if (!test_opt(sb, BLOCK_VALIDITY)) {
- if (sbi->system_blks)
- ext4_release_system_zone(sb);
- return 0;
- }
- if (sbi->system_blks)
- return 0;
-
system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
if (!system_blks)
return -ENOMEM;
for (i=0; i < ngroups; i++) {
cond_resched();
if (ext4_bg_has_super(sb, i) &&
- ((i < 5) || ((i % flex_size) == 0)))
- add_system_zone(system_blks,
+ ((i < 5) || ((i % flex_size) == 0))) {
+ ret = add_system_zone(system_blks,
ext4_group_first_block_no(sb, i),
- ext4_bg_num_gdb(sb, i) + 1);
+ ext4_bg_num_gdb(sb, i) + 1, 0);
+ if (ret)
+ goto err;
+ }
gdp = ext4_get_group_desc(sb, i, NULL);
ret = add_system_zone(system_blks,
- ext4_block_bitmap(sb, gdp), 1);
+ ext4_block_bitmap(sb, gdp), 1, 0);
if (ret)
goto err;
ret = add_system_zone(system_blks,
- ext4_inode_bitmap(sb, gdp), 1);
+ ext4_inode_bitmap(sb, gdp), 1, 0);
if (ret)
goto err;
ret = add_system_zone(system_blks,
ext4_inode_table(sb, gdp),
- sbi->s_itb_per_group);
+ sbi->s_itb_per_group, 0);
if (ret)
goto err;
}
call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
}
-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
+/*
+ * Returns 1 if the passed-in block region (start_blk,
+ * start_blk+count) is valid; 0 if some part of the block region
+ * overlaps with some other filesystem metadata blocks.
+ */
+int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
unsigned int count)
{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_system_blocks *system_blks;
- int ret;
+ struct ext4_system_zone *entry;
+ struct rb_node *n;
+ int ret = 1;
+
+ if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (start_blk + count < start_blk) ||
+ (start_blk + count > ext4_blocks_count(sbi->s_es)))
+ return 0;
/*
* Lock the system zone to prevent it being released concurrently
*/
rcu_read_lock();
system_blks = rcu_dereference(sbi->system_blks);
- ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
- count);
+ if (system_blks == NULL)
+ goto out_rcu;
+
+ n = system_blks->root.rb_node;
+ while (n) {
+ entry = rb_entry(n, struct ext4_system_zone, node);
+ if (start_blk + count - 1 < entry->start_blk)
+ n = n->rb_left;
+ else if (start_blk >= (entry->start_blk + entry->count))
+ n = n->rb_right;
+ else {
+ ret = (entry->ino == inode->i_ino);
+ break;
+ }
+ }
+out_rcu:
rcu_read_unlock();
return ret;
}
while (bref < p+max) {
blk = le32_to_cpu(*bref++);
if (blk &&
- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- blk, 1))) {
+ unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
ext4_error_inode(inode, function, line, blk,
"invalid block");
return -EFSCORRUPTED;
#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
-#define EXT4_FL_USER_VISIBLE 0x725BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE 0x624BC0FF /* User modifiable flags */
-
-/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
+/* User modifiable flags */
+#define EXT4_FL_USER_MODIFIABLE (EXT4_SECRM_FL | \
+ EXT4_UNRM_FL | \
+ EXT4_COMPR_FL | \
+ EXT4_SYNC_FL | \
+ EXT4_IMMUTABLE_FL | \
+ EXT4_APPEND_FL | \
+ EXT4_NODUMP_FL | \
+ EXT4_NOATIME_FL | \
+ EXT4_JOURNAL_DATA_FL | \
+ EXT4_NOTAIL_FL | \
+ EXT4_DIRSYNC_FL | \
+ EXT4_TOPDIR_FL | \
+ EXT4_EXTENTS_FL | \
+ 0x00400000 /* EXT4_EOFBLOCKS_FL */ | \
+ EXT4_DAX_FL | \
+ EXT4_PROJINHERIT_FL | \
+ EXT4_CASEFOLD_FL)
+
+/* User visible flags */
+#define EXT4_FL_USER_VISIBLE (EXT4_FL_USER_MODIFIABLE | \
+ EXT4_DIRTY_FL | \
+ EXT4_COMPRBLK_FL | \
+ EXT4_NOCOMPR_FL | \
+ EXT4_ENCRYPT_FL | \
+ EXT4_INDEX_FL | \
+ EXT4_VERITY_FL | \
+ EXT4_INLINE_DATA_FL)
+
+/* Flags we can manipulate with through FS_IOC_FSSETXATTR */
#define EXT4_FL_XFLAG_VISIBLE (EXT4_SYNC_FL | \
EXT4_IMMUTABLE_FL | \
EXT4_APPEND_FL | \
/*
* ioctl commands
*/
-#define EXT4_IOC_GETFLAGS FS_IOC_GETFLAGS
-#define EXT4_IOC_SETFLAGS FS_IOC_SETFLAGS
#define EXT4_IOC_GETVERSION _IOR('f', 3, long)
#define EXT4_IOC_SETVERSION _IOW('f', 4, long)
#define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
-#define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
-#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
-#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
/* ioctl codes 19--39 are reserved for fscrypt */
#define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40)
#define EXT4_IOC_GETSTATE _IOW('f', 41, __u32)
#define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap)
-#define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR
-#define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR
-
#define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32)
/*
/*
* ioctl commands in 32 bit emulation
*/
-#define EXT4_IOC32_GETFLAGS FS_IOC32_GETFLAGS
-#define EXT4_IOC32_SETFLAGS FS_IOC32_SETFLAGS
#define EXT4_IOC32_GETVERSION _IOR('f', 3, int)
#define EXT4_IOC32_SETVERSION _IOW('f', 4, int)
#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
struct timespec64 i_crtime;
/* mballoc */
+ atomic_t i_prealloc_active;
struct list_head i_prealloc_list;
spinlock_t i_prealloc_lock;
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
#define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */
+#define EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS 0x4000000
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
unsigned int s_mb_stats;
unsigned int s_mb_order2_reqs;
unsigned int s_mb_group_prealloc;
+ unsigned int s_mb_max_inode_prealloc;
unsigned int s_max_dir_size_kb;
/* where last allocation was done - for stream allocation */
unsigned long s_mb_last_group;
unsigned long s_mb_last_start;
+ unsigned int s_mb_prefetch;
+ unsigned int s_mb_prefetch_limit;
/* stats for buddy allocator */
atomic_t s_bal_reqs; /* number of reqs with len > 1 */
struct ratelimit_state s_err_ratelimit_state;
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
+ atomic_t s_warning_count;
+ atomic_t s_msg_count;
/* Encryption context for '-o test_dummy_encryption' */
struct fscrypt_dummy_context s_dummy_enc_ctx;
#ifdef CONFIG_EXT4_DEBUG
unsigned long s_simulate_fail;
#endif
+ /* Record the errseq of the backing block device */
+ errseq_t s_bdev_wb_err;
+ spinlock_t s_bdev_wb_lock;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
struct mutex li_list_mtx;
};
+enum ext4_li_mode {
+ EXT4_LI_MODE_PREFETCH_BBITMAP,
+ EXT4_LI_MODE_ITABLE,
+};
+
struct ext4_li_request {
struct super_block *lr_super;
- struct ext4_sb_info *lr_sbi;
+ enum ext4_li_mode lr_mode;
+ ext4_group_t lr_first_not_zeroed;
ext4_group_t lr_next_group;
struct list_head lr_request;
unsigned long lr_next_sched;
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
- ext4_group_t block_group);
+ ext4_group_t block_group,
+ bool ignore_locked);
extern int ext4_wait_block_bitmap(struct super_block *sb,
ext4_group_t block_group,
struct buffer_head *bh);
extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
struct ext4_allocation_request *, int *);
extern int ext4_mb_reserve_blocks(struct super_block *, int);
-extern void ext4_discard_preallocations(struct inode *);
+extern void ext4_discard_preallocations(struct inode *, unsigned int);
extern int __init ext4_init_mballoc(void);
extern void ext4_exit_mballoc(void);
+extern ext4_group_t ext4_mb_prefetch(struct super_block *sb,
+ ext4_group_t group,
+ unsigned int nr, int *cnt);
+extern void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+ unsigned int nr);
+
extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t block,
unsigned long count, int flags);
struct ext4_filename *fname,
unsigned int offset,
struct ext4_dir_entry_2 **res_dir);
-extern int ext4_generic_delete_entry(handle_t *handle,
- struct inode *dir,
+extern int ext4_generic_delete_entry(struct inode *dir,
struct ext4_dir_entry_2 *de_del,
struct buffer_head *bh,
void *entry_buf,
#endif
-extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
- __u32 compat);
-extern int ext4_update_rocompat_feature(handle_t *handle,
- struct super_block *sb, __u32 rocompat);
-extern int ext4_update_incompat_feature(handle_t *handle,
- struct super_block *sb, __u32 incompat);
extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg);
extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
(1 << EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT)
#define EXT4_GROUP_INFO_IBITMAP_CORRUPT \
(1 << EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT)
+#define EXT4_GROUP_INFO_BBITMAP_READ_BIT 4
#define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
(set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
#define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
(clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_TEST_AND_SET_READ(grp) \
+ (test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_READ_BIT, &((grp)->bb_state)))
#define EXT4_MAX_CONTENTION 8
#define EXT4_CONTENTION_THRESHOLD 2
extern int ext4_setup_system_zone(struct super_block *sb);
extern int __init ext4_init_system_zone(void);
extern void ext4_exit_system_zone(void);
-extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
- ext4_fsblk_t start_blk,
- unsigned int count);
+extern int ext4_inode_block_valid(struct inode *inode,
+ ext4_fsblk_t start_blk,
+ unsigned int count);
extern int ext4_check_blockref(const char *, unsigned int,
struct inode *, __le32 *, unsigned int);
jbd2_journal_abort_handle(handle);
}
+static void ext4_check_bdev_write_error(struct super_block *sb)
+{
+ struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int err;
+
+ /*
+ * If the block device has write error flag, it may have failed to
+ * async write out metadata buffers in the background. In this case,
+ * we could read old data from disk and write it out again, which
+ * may lead to on-disk filesystem inconsistency.
+ */
+ if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) {
+ spin_lock(&sbi->s_bdev_wb_lock);
+ err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err);
+ spin_unlock(&sbi->s_bdev_wb_lock);
+ if (err)
+ ext4_error_err(sb, -err,
+ "Error while async write back metadata");
+ }
+}
+
int __ext4_journal_get_write_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
might_sleep();
+ if (bh->b_bdev->bd_super)
+ ext4_check_bdev_write_error(bh->b_bdev->bd_super);
+
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);
if (err)
* i_mutex. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
up_write(&EXT4_I(inode)->i_data_sem);
*dropped = 1;
return 0;
*/
if (lblock + len <= lblock)
return 0;
- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ return ext4_inode_block_valid(inode, block, len);
}
static int ext4_valid_extent_idx(struct inode *inode,
{
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
+ return ext4_inode_block_valid(inode, block, 1);
}
static int ext4_valid_extent_entries(struct inode *inode,
}
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
return bh;
- if (!ext4_has_feature_journal(inode->i_sb) ||
- (inode->i_ino !=
- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
- err = __ext4_ext_check(function, line, inode,
- ext_block_hdr(bh), depth, pblk);
- if (err)
- goto errout;
- }
+ err = __ext4_ext_check(function, line, inode,
+ ext_block_hdr(bh), depth, pblk);
+ if (err)
+ goto errout;
set_buffer_verified(bh);
/*
* If this is a leaf block, cache all of its entries
return;
depth = path->p_depth;
for (i = 0; i <= depth; i++, path++) {
- if (path->p_bh) {
- brelse(path->p_bh);
- path->p_bh = NULL;
- }
+ brelse(path->p_bh);
+ path->p_bh = NULL;
}
}
/*
* ext4_ext_insert_extent:
- * tries to merge requsted extent into the existing extent or
+ * tries to merge requested extent into the existing extent or
* inserts requested extent as new one into the tree,
* creating new leaf in the no-space case.
*/
*
*
* Splits extent [a, b] into two extents [a, @split) and [@split, b], states
- * of which are deterimined by split_flag.
+ * of which are determined by split_flag.
*
* There are two cases:
* a> the extent are splitted into two extent.
eof_block = map->m_lblk + map->m_len;
/*
* It is safe to convert extent to initialized via explicit
- * zeroout only if extent is fully insde i_size or new_size.
+ * zeroout only if extent is fully inside i_size or new_size.
*/
depth = ext_depth(inode);
ex = path[depth].p_ext;
* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free().
*/
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
ext4_free_blocks(handle, inode, NULL, newblock,
}
/*
- * Round up offset. This is not fallocate, we neet to zero out
+ * Round up offset. This is not fallocate, we need to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
* range.
}
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
ret = ext4_es_remove_extent(inode, punch_start,
EXT_MAX_BLOCKS - punch_start);
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
punch_stop - punch_start, SHIFT_LEFT);
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
path = ext4_find_extent(inode, offset_lblk, NULL, 0);
if (IS_ERR(path)) {
}
ex1 = path1[path1->p_depth].p_ext;
ex2 = path2[path2->p_depth].p_ext;
- /* Do we have somthing to swap ? */
+ /* Do we have something to swap ? */
if (unlikely(!ex2 || !ex1))
goto finish;
/* if we are the last writer on the inode, drop the block reservation */
if ((filp->f_mode & FMODE_WRITE) &&
(atomic_read(&inode->i_writecount) == 1) &&
- !EXT4_I(inode)->i_reserved_data_blocks)
- {
+ !EXT4_I(inode)->i_reserved_data_blocks) {
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
up_write(&EXT4_I(inode)->i_data_sem);
}
if (is_dx(inode) && filp->private_data)
*/
if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
!ext4_overwrite_io(inode, offset, count))) {
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
inode_unlock_shared(inode);
*ilock_shared = false;
inode_lock(inode);
return err;
}
-static int ext4_file_open(struct inode * inode, struct file * filp)
+static int ext4_file_open(struct inode *inode, struct file *filp)
{
int ret;
break;
case DX_HASH_HALF_MD4_UNSIGNED:
str2hashbuf = str2hashbuf_unsigned;
- /* fall through */
+ fallthrough;
case DX_HASH_HALF_MD4:
p = name;
while (len > 0) {
break;
case DX_HASH_TEA_UNSIGNED:
str2hashbuf = str2hashbuf_unsigned;
- /* fall through */
+ fallthrough;
case DX_HASH_TEA:
p = name;
while (len > 0) {
* i_mutex. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
up_write(&EXT4_I(inode)->i_data_sem);
*dropped = 1;
return 0;
else if (ext4_should_journal_data(inode))
flags |= EXT4_FREE_BLOCKS_FORGET;
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
- count)) {
+ if (!ext4_inode_block_valid(inode, block_to_free, count)) {
EXT4_ERROR_INODE(inode, "attempt to clear invalid "
"blocks %llu len %lu",
(unsigned long long) block_to_free, count);
if (!nr)
continue; /* A hole */
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- nr, 1)) {
+ if (!ext4_inode_block_valid(inode, nr, 1)) {
EXT4_ERROR_INODE(inode,
"invalid indirect mapped "
"block %lu (level %d)",
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
- /* fall through */
+ fallthrough;
case EXT4_IND_BLOCK:
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
- /* fall through */
+ fallthrough;
case EXT4_DIND_BLOCK:
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
- /* fall through */
+ fallthrough;
case EXT4_TIND_BLOCK:
;
}
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
- /* fall through */
+ fallthrough;
case EXT4_IND_BLOCK:
if (++n >= n2)
break;
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
- /* fall through */
+ fallthrough;
case EXT4_DIND_BLOCK:
if (++n >= n2)
break;
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
- /* fall through */
+ fallthrough;
case EXT4_TIND_BLOCK:
;
}
len = 0;
}
- /* Insert the the xttr entry. */
+ /* Insert the xttr entry. */
i.value = value;
i.value_len = len;
if (err)
goto out;
- err = ext4_generic_delete_entry(handle, dir, de_del, bh,
+ err = ext4_generic_delete_entry(dir, de_del, bh,
inline_start, inline_size, 0);
if (err)
goto out;
*/
if ((ei->i_reserved_data_blocks == 0) &&
!inode_is_open_for_write(inode))
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
}
static int __check_block_validity(struct inode *inode, const char *func,
(inode->i_ino ==
le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
return 0;
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
- map->m_len)) {
+ if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock %llu "
"(length %d)", (unsigned long) map->m_lblk,
if (PageChecked(page))
return 0;
if (journal)
- return jbd2_journal_try_to_free_buffers(journal, page, wait);
+ return jbd2_journal_try_to_free_buffers(journal, page);
else
return try_to_free_buffers(page);
}
if (stop_block > first_block) {
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
- return 0;
+ goto out_trace;
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
int has_inline = 1;
err = ext4_inline_data_truncate(inode, &has_inline);
- if (err)
- return err;
- if (has_inline)
- return 0;
+ if (err || has_inline)
+ goto out_trace;
}
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
if (ext4_inode_attach_jinode(inode) < 0)
- return 0;
+ goto out_trace;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_trace;
+ }
if (inode->i_size & (inode->i_sb->s_blocksize - 1))
ext4_block_truncate_page(handle, mapping, inode->i_size);
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
err = ext4_ext_truncate(handle, inode);
err = err2;
ext4_journal_stop(handle);
+out_trace:
trace_ext4_truncate_exit(inode);
return err;
}
ret = 0;
if (ei->i_file_acl &&
- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
+ !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
ext4_error_inode(inode, function, line, 0,
"iget: bad extended attribute block %llu",
ei->i_file_acl);
(inode->i_state & I_DIRTY_TIME)) {
struct ext4_inode_info *ei = EXT4_I(inode);
- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
+ inode->i_state &= ~I_DIRTY_TIME;
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
reset_inode_seed(inode);
reset_inode_seed(inode_bl);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
err = ext4_mark_inode_dirty(handle, inode);
if (err < 0) {
switch (cmd) {
case FS_IOC_GETFSMAP:
return ext4_ioc_getfsmap(sb, (void __user *)arg);
- case EXT4_IOC_GETFLAGS:
+ case FS_IOC_GETFLAGS:
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
if (S_ISREG(inode->i_mode))
flags &= ~EXT4_PROJINHERIT_FL;
return put_user(flags, (int __user *) arg);
- case EXT4_IOC_SETFLAGS: {
+ case FS_IOC_SETFLAGS: {
int err;
if (!inode_owner_or_capable(inode))
case EXT4_IOC_PRECACHE_EXTENTS:
return ext4_ext_precache(inode);
- case EXT4_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_SET_ENCRYPTION_POLICY:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
- case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+ case FS_IOC_GET_ENCRYPTION_PWSALT: {
#ifdef CONFIG_FS_ENCRYPTION
int err, err2;
struct ext4_sb_info *sbi = EXT4_SB(sb);
return -EOPNOTSUPP;
#endif
}
- case EXT4_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
case EXT4_IOC_GET_ES_CACHE:
return ext4_ioctl_get_es_cache(filp, arg);
- case EXT4_IOC_FSGETXATTR:
+ case FS_IOC_FSGETXATTR:
{
struct fsxattr fa;
return -EFAULT;
return 0;
}
- case EXT4_IOC_FSSETXATTR:
+ case FS_IOC_FSSETXATTR:
{
struct fsxattr fa, old_fa;
int err;
{
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
- case EXT4_IOC32_GETFLAGS:
- cmd = EXT4_IOC_GETFLAGS;
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
break;
- case EXT4_IOC32_SETFLAGS:
- cmd = EXT4_IOC_SETFLAGS;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
break;
case EXT4_IOC32_GETVERSION:
cmd = EXT4_IOC_GETVERSION;
case EXT4_IOC_RESIZE_FS:
case FITRIM:
case EXT4_IOC_PRECACHE_EXTENTS:
- case EXT4_IOC_SET_ENCRYPTION_POLICY:
- case EXT4_IOC_GET_ENCRYPTION_PWSALT:
- case EXT4_IOC_GET_ENCRYPTION_POLICY:
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_PWSALT:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
case FS_IOC_GET_ENCRYPTION_POLICY_EX:
case FS_IOC_ADD_ENCRYPTION_KEY:
case FS_IOC_REMOVE_ENCRYPTION_KEY:
case EXT4_IOC_CLEAR_ES_CACHE:
case EXT4_IOC_GETSTATE:
case EXT4_IOC_GET_ES_CACHE:
- case EXT4_IOC_FSGETXATTR:
- case EXT4_IOC_FSSETXATTR:
+ case FS_IOC_FSGETXATTR:
+ case FS_IOC_FSSETXATTR:
break;
default:
return -ENOIOCTLCMD;
bh[i] = NULL;
continue;
}
- bh[i] = ext4_read_block_bitmap_nowait(sb, group);
+ bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
if (IS_ERR(bh[i])) {
err = PTR_ERR(bh[i]);
bh[i] = NULL;
e4b->bd_buddy_page = page;
e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
-
return 0;
err:
}
-/*
- * regular allocator, for general purposes allocation
- */
-
static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b,
int finish_group)
BUG_ON(cr < 0 || cr >= 4);
- free = grp->bb_free;
- if (free == 0)
- return false;
- if (cr <= 2 && free < ac->ac_g_ex.fe_len)
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
return false;
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ free = grp->bb_free;
+ if (free == 0)
return false;
fragments = grp->bb_fragments;
((group % flex_size) == 0))
return false;
- if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
- (free / fragments) >= ac->ac_g_ex.fe_len)
+ if (free < ac->ac_g_ex.fe_len)
+ return false;
+
+ if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1)
return true;
if (grp->bb_largest_free_order < ac->ac_2order)
{
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
ext4_grpblk_t free;
int ret = 0;
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
- ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
+ struct ext4_group_desc *gdp =
+ ext4_get_group_desc(sb, group, NULL);
+ int ret;
+
+ /* cr=0/1 is a very optimistic search to find large
+ * good chunks almost for free. If buddy data is not
+ * ready, then this optimization makes no sense. But
+ * we never skip the first block group in a flex_bg,
+ * since this gets used for metadata block allocation,
+ * and we want to make sure we locate metadata blocks
+ * in the first block group in the flex_bg if possible.
+ */
+ if (cr < 2 &&
+ (!sbi->s_log_groups_per_flex ||
+ ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
+ !(ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
+ return 0;
+ ret = ext4_mb_init_group(sb, group, GFP_NOFS);
if (ret)
return ret;
}
return ret;
}
+/*
+ * Start prefetching @nr block bitmaps starting at @group.
+ * Return the next group which needs to be prefetched.
+ */
+ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
+ unsigned int nr, int *cnt)
+{
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ struct buffer_head *bh;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+ while (nr-- > 0) {
+ struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
+ NULL);
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+
+ /*
+ * Prefetch block groups with free blocks; but don't
+ * bother if it is marked uninitialized on disk, since
+ * it won't require I/O to read. Also only try to
+ * prefetch once, so we avoid getblk() call, which can
+ * be expensive.
+ */
+ if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
+ EXT4_MB_GRP_NEED_INIT(grp) &&
+ ext4_free_group_clusters(sb, gdp) > 0 &&
+ !(ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+ bh = ext4_read_block_bitmap_nowait(sb, group, true);
+ if (bh && !IS_ERR(bh)) {
+ if (!buffer_uptodate(bh) && cnt)
+ (*cnt)++;
+ brelse(bh);
+ }
+ }
+ if (++group >= ngroups)
+ group = 0;
+ }
+ blk_finish_plug(&plug);
+ return group;
+}
+
+/*
+ * Prefetching reads the block bitmap into the buffer cache; but we
+ * need to make sure that the buddy bitmap in the page cache has been
+ * initialized. Note that ext4_mb_init_group() will block if the I/O
+ * is not yet completed, or indeed if it was not initiated by
+ * ext4_mb_prefetch did not start the I/O.
+ *
+ * TODO: We should actually kick off the buddy bitmap setup in a work
+ * queue when the buffer I/O is completed, so that we don't block
+ * waiting for the block allocation bitmap read to finish when
+ * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
+ */
+void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+ unsigned int nr)
+{
+ while (nr-- > 0) {
+ struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
+ NULL);
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+
+ if (!group)
+ group = ext4_get_groups_count(sb);
+ group--;
+ grp = ext4_get_group_info(sb, group);
+
+ if (EXT4_MB_GRP_NEED_INIT(grp) &&
+ ext4_free_group_clusters(sb, gdp) > 0 &&
+ !(ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+ if (ext4_mb_init_group(sb, group, GFP_NOFS))
+ break;
+ }
+ }
+}
+
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
- ext4_group_t ngroups, group, i;
+ ext4_group_t prefetch_grp = 0, ngroups, group, i;
int cr = -1;
int err = 0, first_err = 0;
+ unsigned int nr = 0, prefetch_ios = 0;
struct ext4_sb_info *sbi;
struct super_block *sb;
struct ext4_buddy e4b;
+ int lost;
sb = ac->ac_sb;
sbi = EXT4_SB(sb);
goto out;
/*
- * ac->ac2_order is set only if the fe_len is a power of 2
- * if ac2_order is set we also set criteria to 0 so that we
+ * ac->ac_2order is set only if the fe_len is a power of 2
+ * if ac->ac_2order is set we also set criteria to 0 so that we
* try exact allocation using buddy.
*/
i = fls(ac->ac_g_ex.fe_len);
* from the goal value specified
*/
group = ac->ac_g_ex.fe_group;
+ prefetch_grp = group;
for (i = 0; i < ngroups; group++, i++) {
int ret = 0;
if (group >= ngroups)
group = 0;
+ /*
+ * Batch reads of the block allocation bitmaps
+ * to get multiple READs in flight; limit
+ * prefetching at cr=0/1, otherwise mballoc can
+ * spend a lot of time loading imperfect groups
+ */
+ if ((prefetch_grp == group) &&
+ (cr > 1 ||
+ prefetch_ios < sbi->s_mb_prefetch_limit)) {
+ unsigned int curr_ios = prefetch_ios;
+
+ nr = sbi->s_mb_prefetch;
+ if (ext4_has_feature_flex_bg(sb)) {
+ nr = (group / sbi->s_mb_prefetch) *
+ sbi->s_mb_prefetch;
+ nr = nr + sbi->s_mb_prefetch - group;
+ }
+ prefetch_grp = ext4_mb_prefetch(sb, group,
+ nr, &prefetch_ios);
+ if (prefetch_ios == curr_ios)
+ nr = 0;
+ }
+
/* This now checks without needing the buddy page */
ret = ext4_mb_good_group_nolock(ac, group, cr);
if (ret <= 0) {
* We've been searching too long. Let's try to allocate
* the best chunk we've found so far
*/
-
ext4_mb_try_best_found(ac, &e4b);
if (ac->ac_status != AC_STATUS_FOUND) {
/*
* Someone more lucky has already allocated it.
* The only thing we can do is just take first
* found block(s)
- printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
*/
+ lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
+ mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
+ ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len, lost);
+
ac->ac_b_ex.fe_group = 0;
ac->ac_b_ex.fe_start = 0;
ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_flags |= EXT4_MB_HINT_FIRST;
cr = 3;
- atomic_inc(&sbi->s_mb_lost_chunks);
goto repeat;
}
}
mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
ac->ac_flags, cr, err);
+
+ if (nr)
+ ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
+
return err;
}
for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
sg.info.bb_counters[i] : 0);
- seq_printf(seq, " ]\n");
+ seq_puts(seq, " ]\n");
return 0;
}
goto err_freebuddy;
}
+ if (ext4_has_feature_flex_bg(sb)) {
+ /* a single flex group is supposed to be read by a single IO */
+ sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
+ sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
+ } else {
+ sbi->s_mb_prefetch = 32;
+ }
+ if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
+ sbi->s_mb_prefetch = ext4_get_groups_count(sb);
+ /* now many real IOs to prefetch within a single allocation at cr=0
+ * given cr=0 is an CPU-related optimization we shouldn't try to
+ * load too many groups, at some point we should start to use what
+ * we've got in memory.
+ * with an average random access time 5ms, it'd take a second to get
+ * 200 groups (* N with flex_bg), so let's make this limit 4
+ */
+ sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
+ if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
+ sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
+
return 0;
err_freebuddy:
sbi->s_mb_stats = MB_DEFAULT_STATS;
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+ sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
/*
* The default group preallocation is 512, which for 4k block
* sizes translates to 2 megabytes. However for bigalloc file
block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
- if (!ext4_data_block_valid(sbi, block, len)) {
+ if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata", block, block+len);
/* File system mounted not to panic on error
mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
}
+static void ext4_mb_mark_pa_deleted(struct super_block *sb,
+ struct ext4_prealloc_space *pa)
+{
+ struct ext4_inode_info *ei;
+
+ if (pa->pa_deleted) {
+ ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
+ pa->pa_type, pa->pa_pstart, pa->pa_lstart,
+ pa->pa_len);
+ return;
+ }
+
+ pa->pa_deleted = 1;
+
+ if (pa->pa_type == MB_INODE_PA) {
+ ei = EXT4_I(pa->pa_inode);
+ atomic_dec(&ei->i_prealloc_active);
+ }
+}
+
static void ext4_mb_pa_callback(struct rcu_head *head)
{
struct ext4_prealloc_space *pa;
return;
}
- pa->pa_deleted = 1;
+ ext4_mb_mark_pa_deleted(sb, pa);
spin_unlock(&pa->pa_lock);
grp_blk = pa->pa_pstart;
spin_lock(pa->pa_obj_lock);
list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
spin_unlock(pa->pa_obj_lock);
+ atomic_inc(&ei->i_prealloc_active);
}
/*
}
/* seems this one can be freed ... */
- pa->pa_deleted = 1;
+ ext4_mb_mark_pa_deleted(sb, pa);
/* we can trust pa_free ... */
free += pa->pa_free;
*
* FIXME!! Make sure it is valid at all the call sites
*/
-void ext4_discard_preallocations(struct inode *inode)
+void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct super_block *sb = inode->i_sb;
mb_debug(sb, "discard preallocation for inode %lu\n",
inode->i_ino);
- trace_ext4_discard_preallocations(inode);
+ trace_ext4_discard_preallocations(inode,
+ atomic_read(&ei->i_prealloc_active), needed);
INIT_LIST_HEAD(&list);
+ if (needed == 0)
+ needed = UINT_MAX;
+
repeat:
/* first, collect all pa's in the inode */
spin_lock(&ei->i_prealloc_lock);
- while (!list_empty(&ei->i_prealloc_list)) {
- pa = list_entry(ei->i_prealloc_list.next,
+ while (!list_empty(&ei->i_prealloc_list) && needed) {
+ pa = list_entry(ei->i_prealloc_list.prev,
struct ext4_prealloc_space, pa_inode_list);
BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
spin_lock(&pa->pa_lock);
}
if (pa->pa_deleted == 0) {
- pa->pa_deleted = 1;
+ ext4_mb_mark_pa_deleted(sb, pa);
spin_unlock(&pa->pa_lock);
list_del_rcu(&pa->pa_inode_list);
list_add(&pa->u.pa_tmp_list, &list);
+ needed--;
continue;
}
ac->ac_g_ex = ac->ac_o_ex;
ac->ac_flags = ar->flags;
- /* we have to define context: we'll we work with a file or
+ /* we have to define context: we'll work with a file or
* locality group. this is a policy, actually */
ext4_mb_group_or_file(ac);
BUG_ON(pa->pa_type != MB_GROUP_PA);
/* seems this one can be freed ... */
- pa->pa_deleted = 1;
+ ext4_mb_mark_pa_deleted(sb, pa);
spin_unlock(&pa->pa_lock);
list_del_rcu(&pa->pa_inode_list);
}
/*
+ * if per-inode prealloc list is too long, trim some PA
+ */
+static void ext4_mb_trim_inode_pa(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int count, delta;
+
+ count = atomic_read(&ei->i_prealloc_active);
+ delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
+ if (count > sbi->s_mb_max_inode_prealloc + delta) {
+ count -= sbi->s_mb_max_inode_prealloc;
+ ext4_discard_preallocations(inode, count);
+ }
+}
+
+/*
* release all resource we used in allocation
*/
static int ext4_mb_release_context(struct ext4_allocation_context *ac)
{
+ struct inode *inode = ac->ac_inode;
+ struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_prealloc_space *pa = ac->ac_pa;
if (pa) {
pa->pa_free -= ac->ac_b_ex.fe_len;
pa->pa_len -= ac->ac_b_ex.fe_len;
spin_unlock(&pa->pa_lock);
+
+ /*
+ * We want to add the pa to the right bucket.
+ * Remove it from the list and while adding
+ * make sure the list to which we are adding
+ * doesn't grow big.
+ */
+ if (likely(pa->pa_free)) {
+ spin_lock(pa->pa_obj_lock);
+ list_del_rcu(&pa->pa_inode_list);
+ spin_unlock(pa->pa_obj_lock);
+ ext4_mb_add_n_trim(ac);
+ }
}
- }
- if (pa) {
- /*
- * We want to add the pa to the right bucket.
- * Remove it from the list and while adding
- * make sure the list to which we are adding
- * doesn't grow big.
- */
- if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
+
+ if (pa->pa_type == MB_INODE_PA) {
+ /*
+ * treat per-inode prealloc list as a lru list, then try
+ * to trim the least recently used PA.
+ */
spin_lock(pa->pa_obj_lock);
- list_del_rcu(&pa->pa_inode_list);
+ list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
spin_unlock(pa->pa_obj_lock);
- ext4_mb_add_n_trim(ac);
}
+
ext4_mb_put_pa(ac, ac->ac_sb, pa);
}
if (ac->ac_bitmap_page)
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
mutex_unlock(&ac->ac_lg->lg_mutex);
ext4_mb_collect_stats(ac);
+ ext4_mb_trim_inode_pa(inode);
return 0;
}
sbi = EXT4_SB(sb);
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
- !ext4_data_block_valid(sbi, block, count)) {
+ !ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks not in datazone - "
"block = %llu, count = %lu", block, count);
goto error_return;
*/
#define MB_DEFAULT_GROUP_PREALLOC 512
+/*
+ * maximum length of inode prealloc list
+ */
+#define MB_DEFAULT_MAX_INODE_PREALLOC 512
struct ext4_free_data {
/* this links the free block information from sb_info */
out:
if (*moved_len) {
- ext4_discard_preallocations(orig_inode);
- ext4_discard_preallocations(donor_inode);
+ ext4_discard_preallocations(orig_inode, 0);
+ ext4_discard_preallocations(donor_inode, 0);
}
ext4_ext_drop_refs(path);
ext4_match(dir, fname, de)) {
/* found a match - just to be sure, do
* a full check */
- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
- bh->b_size, offset))
+ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
+ buf_size, offset))
return -1;
*res_dir = de;
return 1;
blocksize, hinfo, map);
map -= count;
dx_sort_map(map, count);
- /* Split the existing block in the middle, size-wise */
+ /* Ensure that neither split block is over half full */
size = 0;
move = 0;
for (i = count-1; i >= 0; i--) {
size += map[i].size;
move++;
}
- /* map index at which we will split */
- split = count - move;
+ /*
+ * map index at which we will split
+ *
+ * If the sum of active entries didn't exceed half the block size, just
+ * split it in half by count; each resulting block will have at least
+ * half the space free.
+ */
+ if (i > 0)
+ split = count - move;
+ else
+ split = count/2;
+
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
* ext4_generic_delete_entry deletes a directory entry by merging it
* with the previous entry
*/
-int ext4_generic_delete_entry(handle_t *handle,
- struct inode *dir,
+int ext4_generic_delete_entry(struct inode *dir,
struct ext4_dir_entry_2 *de_del,
struct buffer_head *bh,
void *entry_buf,
de = (struct ext4_dir_entry_2 *)entry_buf;
while (i < buf_size - csum_size) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
- bh->b_data, bh->b_size, i))
+ entry_buf, buf_size, i))
return -EFSCORRUPTED;
if (de == de_del) {
if (pde)
if (unlikely(err))
goto out;
- err = ext4_generic_delete_entry(handle, dir, de_del,
- bh, bh->b_data,
+ err = ext4_generic_delete_entry(dir, de_del, bh, bh->b_data,
dir->i_sb->s_blocksize, csum_size);
if (err)
goto out;
* in separate transaction */
retval = dquot_initialize(dir);
if (retval)
- return retval;
+ goto out_trace;
retval = dquot_initialize(d_inode(dentry));
if (retval)
- return retval;
+ goto out_trace;
- retval = -ENOENT;
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
- if (IS_ERR(bh))
- return PTR_ERR(bh);
- if (!bh)
- goto end_unlink;
+ if (IS_ERR(bh)) {
+ retval = PTR_ERR(bh);
+ goto out_trace;
+ }
+ if (!bh) {
+ retval = -ENOENT;
+ goto out_trace;
+ }
inode = d_inode(dentry);
- retval = -EFSCORRUPTED;
- if (le32_to_cpu(de->inode) != inode->i_ino)
- goto end_unlink;
+ if (le32_to_cpu(de->inode) != inode->i_ino) {
+ retval = -EFSCORRUPTED;
+ goto out_bh;
+ }
handle = ext4_journal_start(dir, EXT4_HT_DIR,
EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- handle = NULL;
- goto end_unlink;
+ goto out_bh;
}
if (IS_DIRSYNC(dir))
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
- goto end_unlink;
+ goto out_handle;
dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
if (retval)
- goto end_unlink;
+ goto out_handle;
if (inode->i_nlink == 0)
ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
dentry->d_name.len, dentry->d_name.name);
d_invalidate(dentry);
#endif
-end_unlink:
+out_handle:
+ ext4_journal_stop(handle);
+out_bh:
brelse(bh);
- if (handle)
- ext4_journal_stop(handle);
+out_trace:
trace_ext4_unlink_exit(dentry, retval);
return retval;
}
return;
}
ctx->cur_step++;
- /* fall-through */
+ fallthrough;
case STEP_VERITY:
if (ctx->enabled_steps & (1 << STEP_VERITY)) {
INIT_WORK(&ctx->work, verity_work);
return;
}
ctx->cur_step++;
- /* fall-through */
+ fallthrough;
default:
__read_end_io(ctx->bio);
}
unsigned long journal_devnum);
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
static int ext4_commit_super(struct super_block *sb, int sync);
-static void ext4_mark_recovery_complete(struct super_block *sb,
+static int ext4_mark_recovery_complete(struct super_block *sb,
struct ext4_super_block *es);
-static void ext4_clear_journal_err(struct super_block *sb,
- struct ext4_super_block *es);
+static int ext4_clear_journal_err(struct super_block *sb,
+ struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
struct va_format vaf;
va_list args;
+ atomic_inc(&EXT4_SB(sb)->s_msg_count);
if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
return;
va_end(args);
}
-#define ext4_warning_ratelimit(sb) \
- ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
- "EXT4-fs warning")
+static int ext4_warning_ratelimit(struct super_block *sb)
+{
+ atomic_inc(&EXT4_SB(sb)->s_warning_count);
+ return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+ "EXT4-fs warning");
+}
void __ext4_warning(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
inode_set_iversion(&ei->vfs_inode, 1);
spin_lock_init(&ei->i_raw_lock);
INIT_LIST_HEAD(&ei->i_prealloc_list);
+ atomic_set(&ei->i_prealloc_active, 0);
spin_lock_init(&ei->i_prealloc_lock);
ext4_es_init_tree(&ei->i_es_tree);
rwlock_init(&ei->i_es_lock);
{
invalidate_inode_buffers(inode);
clear_inode(inode);
- ext4_discard_preallocations(inode);
+ ext4_discard_preallocations(inode, 0);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
dquot_drop(inode);
if (EXT4_I(inode)->jinode) {
if (!page_has_buffers(page))
return 0;
if (journal)
- return jbd2_journal_try_to_free_buffers(journal, page,
- wait & ~__GFP_DIRECT_RECLAIM);
+ return jbd2_journal_try_to_free_buffers(journal, page);
+
return try_to_free_buffers(page);
}
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
+ Opt_prefetch_block_bitmaps,
};
static const match_table_t tokens = {
{Opt_inlinecrypt, "inlinecrypt"},
{Opt_nombcache, "nombcache"},
{Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
+ {Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"},
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
{Opt_test_dummy_encryption, 0, MOPT_STRING},
{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
+ {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
+ MOPT_SET},
{Opt_err, 0, 0}
};
static int ext4_run_li_request(struct ext4_li_request *elr)
{
struct ext4_group_desc *gdp = NULL;
- ext4_group_t group, ngroups;
- struct super_block *sb;
+ struct super_block *sb = elr->lr_super;
+ ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
+ ext4_group_t group = elr->lr_next_group;
unsigned long timeout = 0;
+ unsigned int prefetch_ios = 0;
int ret = 0;
- sb = elr->lr_super;
- ngroups = EXT4_SB(sb)->s_groups_count;
+ if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
+ elr->lr_next_group = ext4_mb_prefetch(sb, group,
+ EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
+ if (prefetch_ios)
+ ext4_mb_prefetch_fini(sb, elr->lr_next_group,
+ prefetch_ios);
+ trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
+ prefetch_ios);
+ if (group >= elr->lr_next_group) {
+ ret = 1;
+ if (elr->lr_first_not_zeroed != ngroups &&
+ !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
+ elr->lr_next_group = elr->lr_first_not_zeroed;
+ elr->lr_mode = EXT4_LI_MODE_ITABLE;
+ ret = 0;
+ }
+ }
+ return ret;
+ }
- for (group = elr->lr_next_group; group < ngroups; group++) {
+ for (; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp) {
ret = 1;
timeout = jiffies;
ret = ext4_init_inode_table(sb, group,
elr->lr_timeout ? 0 : 1);
+ trace_ext4_lazy_itable_init(sb, group);
if (elr->lr_timeout == 0) {
timeout = (jiffies - timeout) *
- elr->lr_sbi->s_li_wait_mult;
+ EXT4_SB(elr->lr_super)->s_li_wait_mult;
elr->lr_timeout = timeout;
}
elr->lr_next_sched = jiffies + elr->lr_timeout;
*/
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
- struct ext4_sb_info *sbi;
-
if (!elr)
return;
- sbi = elr->lr_sbi;
-
list_del(&elr->lr_request);
- sbi->s_li_request = NULL;
+ EXT4_SB(elr->lr_super)->s_li_request = NULL;
kfree(elr);
}
static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
ext4_group_t start)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_li_request *elr;
elr = kzalloc(sizeof(*elr), GFP_KERNEL);
return NULL;
elr->lr_super = sb;
- elr->lr_sbi = sbi;
- elr->lr_next_group = start;
+ elr->lr_first_not_zeroed = start;
+ if (test_opt(sb, PREFETCH_BLOCK_BITMAPS))
+ elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
+ else {
+ elr->lr_mode = EXT4_LI_MODE_ITABLE;
+ elr->lr_next_group = start;
+ }
/*
* Randomize first schedule time of the request to
goto out;
}
- if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
- !test_opt(sb, INIT_INODE_TABLE))
+ if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) &&
+ (first_not_zeroed == ngroups || sb_rdonly(sb) ||
+ !test_opt(sb, INIT_INODE_TABLE)))
goto out;
elr = ext4_li_request_new(sb, first_not_zeroed);
ext4_set_resv_clusters(sb);
- err = ext4_setup_system_zone(sb);
- if (err) {
- ext4_msg(sb, KERN_ERR, "failed to initialize system "
- "zone (%d)", err);
- goto failed_mount4a;
+ if (test_opt(sb, BLOCK_VALIDITY)) {
+ err = ext4_setup_system_zone(sb);
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "failed to initialize system "
+ "zone (%d)", err);
+ goto failed_mount4a;
+ }
}
ext4_ext_init(sb);
}
#endif /* CONFIG_QUOTA */
+ /*
+ * Save the original bdev mapping's wb_err value which could be
+ * used to detect the metadata async write error.
+ */
+ spin_lock_init(&sbi->s_bdev_wb_lock);
+ if (!sb_rdonly(sb))
+ errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+ &sbi->s_bdev_wb_err);
+ sb->s_bdev->bd_super = sb;
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
if (needs_recovery) {
ext4_msg(sb, KERN_INFO, "recovery complete");
- ext4_mark_recovery_complete(sb, es);
+ err = ext4_mark_recovery_complete(sb, es);
+ if (err)
+ goto failed_mount8;
}
if (EXT4_SB(sb)->s_journal) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+ atomic_set(&sbi->s_warning_count, 0);
+ atomic_set(&sbi->s_msg_count, 0);
kfree(orig_data);
return 0;
ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
goto failed_mount;
-#ifdef CONFIG_QUOTA
failed_mount8:
ext4_unregister_sysfs(sb);
-#endif
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
struct inode *journal_inode;
journal_t *journal;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+ return NULL;
journal_inode = ext4_get_journal_inode(sb, journal_inum);
if (!journal_inode)
struct ext4_super_block *es;
struct block_device *bdev;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+ return NULL;
bdev = ext4_blkdev_get(j_dev, sb);
if (bdev == NULL)
dev_t journal_dev;
int err = 0;
int really_read_only;
+ int journal_dev_ro;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+ return -EFSCORRUPTED;
if (journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
} else
journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
- really_read_only = bdev_read_only(sb->s_bdev);
+ if (journal_inum && journal_dev) {
+ ext4_msg(sb, KERN_ERR,
+ "filesystem has both journal inode and journal device!");
+ return -EINVAL;
+ }
+
+ if (journal_inum) {
+ journal = ext4_get_journal(sb, journal_inum);
+ if (!journal)
+ return -EINVAL;
+ } else {
+ journal = ext4_get_dev_journal(sb, journal_dev);
+ if (!journal)
+ return -EINVAL;
+ }
+
+ journal_dev_ro = bdev_read_only(journal->j_dev);
+ really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
+
+ if (journal_dev_ro && !sb_rdonly(sb)) {
+ ext4_msg(sb, KERN_ERR,
+ "journal device read-only, try mounting with '-o ro'");
+ err = -EROFS;
+ goto err_out;
+ }
/*
* Are we loading a blank journal or performing recovery after a
ext4_msg(sb, KERN_ERR, "write access "
"unavailable, cannot proceed "
"(try mounting with noload)");
- return -EROFS;
+ err = -EROFS;
+ goto err_out;
}
ext4_msg(sb, KERN_INFO, "write access will "
"be enabled during recovery");
}
}
- if (journal_inum && journal_dev) {
- ext4_msg(sb, KERN_ERR, "filesystem has both journal "
- "and inode journals!");
- return -EINVAL;
- }
-
- if (journal_inum) {
- if (!(journal = ext4_get_journal(sb, journal_inum)))
- return -EINVAL;
- } else {
- if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
- return -EINVAL;
- }
-
if (!(journal->j_flags & JBD2_BARRIER))
ext4_msg(sb, KERN_INFO, "barriers disabled");
if (err) {
ext4_msg(sb, KERN_ERR, "error loading journal");
- jbd2_journal_destroy(journal);
- return err;
+ goto err_out;
}
EXT4_SB(sb)->s_journal = journal;
- ext4_clear_journal_err(sb, es);
+ err = ext4_clear_journal_err(sb, es);
+ if (err) {
+ EXT4_SB(sb)->s_journal = NULL;
+ jbd2_journal_destroy(journal);
+ return err;
+ }
if (!really_read_only && journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
}
return 0;
+
+err_out:
+ jbd2_journal_destroy(journal);
+ return err;
}
static int ext4_commit_super(struct super_block *sb, int sync)
return error;
/*
- * The superblock bh should be mapped, but it might not be if the
- * device was hot-removed. Not much we can do but fail the I/O.
- */
- if (!buffer_mapped(sbh))
- return error;
-
- /*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
* write time when we are mounting the root file system
* remounting) the filesystem readonly, then we will end up with a
* consistent fs on disk. Record that fact.
*/
-static void ext4_mark_recovery_complete(struct super_block *sb,
- struct ext4_super_block *es)
+static int ext4_mark_recovery_complete(struct super_block *sb,
+ struct ext4_super_block *es)
{
+ int err;
journal_t *journal = EXT4_SB(sb)->s_journal;
if (!ext4_has_feature_journal(sb)) {
- BUG_ON(journal != NULL);
- return;
+ if (journal != NULL) {
+ ext4_error(sb, "Journal got removed while the fs was "
+ "mounted!");
+ return -EFSCORRUPTED;
+ }
+ return 0;
}
jbd2_journal_lock_updates(journal);
- if (jbd2_journal_flush(journal) < 0)
+ err = jbd2_journal_flush(journal);
+ if (err < 0)
goto out;
if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
ext4_clear_feature_journal_needs_recovery(sb);
ext4_commit_super(sb, 1);
}
-
out:
jbd2_journal_unlock_updates(journal);
+ return err;
}
/*
* has recorded an error from a previous lifetime, move that error to the
* main filesystem now.
*/
-static void ext4_clear_journal_err(struct super_block *sb,
+static int ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es)
{
journal_t *journal;
int j_errno;
const char *errstr;
- BUG_ON(!ext4_has_feature_journal(sb));
+ if (!ext4_has_feature_journal(sb)) {
+ ext4_error(sb, "Journal got removed while the fs was mounted!");
+ return -EFSCORRUPTED;
+ }
journal = EXT4_SB(sb)->s_journal;
jbd2_journal_clear_err(journal);
jbd2_journal_update_sb_errno(journal);
}
+ return 0;
}
/*
{
struct ext4_super_block *es;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- unsigned long old_sb_flags;
+ unsigned long old_sb_flags, vfs_flags;
struct ext4_mount_options old_opts;
int enable_quota = 0;
ext4_group_t g;
if (sbi->s_journal && sbi->s_journal->j_task->io_context)
journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
+ /*
+ * Some options can be enabled by ext4 and/or by VFS mount flag
+ * either way we need to make sure it matches in both *flags and
+ * s_flags. Copy those selected flags from *flags to s_flags
+ */
+ vfs_flags = SB_LAZYTIME | SB_I_VERSION;
+ sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
+
if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
err = -EINVAL;
goto restore_opts;
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
}
- if (*flags & SB_LAZYTIME)
- sb->s_flags |= SB_LAZYTIME;
-
if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
err = -EROFS;
(sbi->s_mount_state & EXT4_VALID_FS))
es->s_state = cpu_to_le16(sbi->s_mount_state);
- if (sbi->s_journal)
+ if (sbi->s_journal) {
+ /*
+ * We let remount-ro finish even if marking fs
+ * as clean failed...
+ */
ext4_mark_recovery_complete(sb, es);
+ }
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
} else {
}
/*
+ * Update the original bdev mapping's wb_err value
+ * which could be used to detect the metadata async
+ * write error.
+ */
+ errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+ &sbi->s_bdev_wb_err);
+
+ /*
* Mounting a RDONLY partition read-write, so reread
* and store the current valid flag. (It may have
* been changed by e2fsck since we originally mounted
* the partition.)
*/
- if (sbi->s_journal)
- ext4_clear_journal_err(sb, es);
+ if (sbi->s_journal) {
+ err = ext4_clear_journal_err(sb, es);
+ if (err)
+ goto restore_opts;
+ }
sbi->s_mount_state = le16_to_cpu(es->s_state);
err = ext4_setup_super(sb, es, 0);
ext4_register_li_request(sb, first_not_zeroed);
}
- ext4_setup_system_zone(sb);
+ /*
+ * Handle creation of system zone data early because it can fail.
+ * Releasing of existing data is done when we are sure remount will
+ * succeed.
+ */
+ if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
+ err = ext4_setup_system_zone(sb);
+ if (err)
+ goto restore_opts;
+ }
+
if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
err = ext4_commit_super(sb, 1);
if (err)
}
}
#endif
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+ ext4_release_system_zone(sb);
+
+ /*
+ * Some options can be enabled by ext4 and/or by VFS mount flag
+ * either way we need to make sure it matches in both *flags and
+ * s_flags. Copy those selected flags from s_flags to *flags
+ */
+ *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
return 0;
sbi->s_commit_interval = old_opts.s_commit_interval;
sbi->s_min_batch_time = old_opts.s_min_batch_time;
sbi->s_max_batch_time = old_opts.s_max_batch_time;
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+ ext4_release_system_zone(sb);
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
#define EXT4_RW_ATTR_SBI_UL(_name,_elname) \
EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname)
+#define EXT4_RO_ATTR_SBI_ATOMIC(_name,_elname) \
+ EXT4_ATTR_OFFSET(_name, 0444, pointer_atomic, ext4_sb_info, _elname)
+
#define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \
static struct ext4_attr ext4_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
#ifdef CONFIG_EXT4_DEBUG
EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail);
#endif
+EXT4_RO_ATTR_SBI_ATOMIC(warning_count, s_warning_count);
+EXT4_RO_ATTR_SBI_ATOMIC(msg_count, s_msg_count);
EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode);
EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode);
EXT4_ATTR(first_error_time, 0444, first_error_time);
EXT4_ATTR(last_error_time, 0444, last_error_time);
EXT4_ATTR(journal_task, 0444, journal_task);
+EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
+EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
ATTR_LIST(mb_order2_req),
ATTR_LIST(mb_stream_req),
ATTR_LIST(mb_group_prealloc),
+ ATTR_LIST(mb_max_inode_prealloc),
ATTR_LIST(max_writeback_mb_bump),
ATTR_LIST(extent_max_zeroout_kb),
ATTR_LIST(trigger_fs_error),
ATTR_LIST(msg_ratelimit_interval_ms),
ATTR_LIST(msg_ratelimit_burst),
ATTR_LIST(errors_count),
+ ATTR_LIST(warning_count),
+ ATTR_LIST(msg_count),
ATTR_LIST(first_error_ino),
ATTR_LIST(last_error_ino),
ATTR_LIST(first_error_block),
#ifdef CONFIG_EXT4_DEBUG
ATTR_LIST(simulate_fail),
#endif
+ ATTR_LIST(mb_prefetch),
+ ATTR_LIST(mb_prefetch_limit),
NULL,
};
ATTRIBUTE_GROUPS(ext4);
block = 0;
while (wsize < bufsize) {
- if (bh != NULL)
- brelse(bh);
+ brelse(bh);
csize = (bufsize - wsize) > blocksize ? blocksize :
bufsize - wsize;
bh = ext4_getblk(handle, ea_inode, block, 0);
case FI_NEW_INODE:
if (set)
return;
- /* fall through */
+ fallthrough;
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
case FI_PIN_FILE:
switch (dn->max_level) {
case 3:
base += 2 * indirect_blks;
- /* fall through */
+ fallthrough;
case 2:
base += 2 * direct_blks;
- /* fall through */
+ fallthrough;
case 1:
base += direct_index;
break;
case F_OFD_SETLK:
case F_OFD_SETLKW:
#endif
- /* Fallthrough */
+ fallthrough;
case F_SETLK:
case F_SETLKW:
if (copy_from_user(&flock, argp, sizeof(flock)))
if (!do_send_sig_info(signum, &si, p, type))
break;
}
- /* fall-through - fall back on the old plain SIGIO signal */
+ fallthrough; /* fall back on the old plain SIGIO signal */
case 0:
do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
}
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
- unsigned long *older_than_this;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
struct bdi_writeback *wb)
{
assert_spin_locked(&wb->list_lock);
+ assert_spin_locked(&inode->i_lock);
+ inode->i_state &= ~I_SYNC_QUEUED;
list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb);
}
struct bdi_writeback *wb;
wb = inode_to_wb_and_lock_list(inode);
+ spin_lock(&inode->i_lock);
inode_io_list_del_locked(inode, wb);
+ spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock);
}
EXPORT_SYMBOL(inode_io_list_del);
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{
+ assert_spin_locked(&inode->i_lock);
+
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
inode->dirtied_when = jiffies;
}
inode_io_list_move_locked(inode, wb, &wb->b_dirty);
+ inode->i_state &= ~I_SYNC_QUEUED;
+}
+
+static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+{
+ spin_lock(&inode->i_lock);
+ redirty_tail_locked(inode, wb);
+ spin_unlock(&inode->i_lock);
}
/*
#define EXPIRE_DIRTY_ATIME 0x0001
/*
- * Move expired (dirtied before work->older_than_this) dirty inodes from
+ * Move expired (dirtied before dirtied_before) dirty inodes from
* @delaying_queue to @dispatch_queue.
*/
static int move_expired_inodes(struct list_head *delaying_queue,
struct list_head *dispatch_queue,
- int flags,
- struct wb_writeback_work *work)
+ unsigned long dirtied_before)
{
- unsigned long *older_than_this = NULL;
- unsigned long expire_time;
LIST_HEAD(tmp);
struct list_head *pos, *node;
struct super_block *sb = NULL;
int do_sb_sort = 0;
int moved = 0;
- if ((flags & EXPIRE_DIRTY_ATIME) == 0)
- older_than_this = work->older_than_this;
- else if (!work->for_sync) {
- expire_time = jiffies - (dirtytime_expire_interval * HZ);
- older_than_this = &expire_time;
- }
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
- if (older_than_this &&
- inode_dirtied_after(inode, *older_than_this))
+ if (inode_dirtied_after(inode, dirtied_before))
break;
list_move(&inode->i_io_list, &tmp);
moved++;
- if (flags & EXPIRE_DIRTY_ATIME)
- set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_SYNC_QUEUED;
+ spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb))
continue;
if (sb && sb != inode->i_sb)
* |
* +--> dequeue for IO
*/
-static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
+static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
+ unsigned long dirtied_before)
{
int moved;
+ unsigned long time_expire_jif = dirtied_before;
assert_spin_locked(&wb->list_lock);
list_splice_init(&wb->b_more_io, &wb->b_io);
- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
+ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
+ if (!work->for_sync)
+ time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
- EXPIRE_DIRTY_ATIME, work);
+ time_expire_jif);
if (moved)
wb_io_lists_populated(wb);
- trace_writeback_queue_io(wb, work, moved);
+ trace_writeback_queue_io(wb, work, dirtied_before, moved);
}
static int write_inode(struct inode *inode, struct writeback_control *wbc)
* writeback is not making progress due to locked
* buffers. Skip this inode for now.
*/
- redirty_tail(inode, wb);
+ redirty_tail_locked(inode, wb);
return;
}
* retrying writeback of the dirty page/inode
* that cannot be performed immediately.
*/
- redirty_tail(inode, wb);
+ redirty_tail_locked(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
/*
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
- redirty_tail(inode, wb);
+ redirty_tail_locked(inode, wb);
} else if (inode->i_state & I_DIRTY_TIME) {
inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
+ inode->i_state &= ~I_SYNC_QUEUED;
} else {
/* The inode is clean. Remove from writeback lists. */
inode_io_list_del_locked(inode, wb);
spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
- if (inode->i_state & I_DIRTY_TIME) {
- if ((dirty & I_DIRTY_INODE) ||
- wbc->sync_mode == WB_SYNC_ALL ||
- unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
- unlikely(time_after(jiffies,
- (inode->dirtied_time_when +
- dirtytime_expire_interval * HZ)))) {
- dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
- trace_writeback_lazytime(inode);
- }
- } else
- inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
+ if ((inode->i_state & I_DIRTY_TIME) &&
+ ((dirty & I_DIRTY_INODE) ||
+ wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+ time_after(jiffies, inode->dirtied_time_when +
+ dirtytime_expire_interval * HZ))) {
+ dirty |= I_DIRTY_TIME;
+ trace_writeback_lazytime(inode);
+ }
inode->i_state &= ~dirty;
/*
*/
spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
- redirty_tail(inode, wb);
continue;
}
if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
blk_start_plug(&plug);
spin_lock(&wb->list_lock);
if (list_empty(&wb->b_io))
- queue_io(wb, &work);
+ queue_io(wb, &work, jiffies);
__writeback_inodes_wb(wb, &work);
spin_unlock(&wb->list_lock);
blk_finish_plug(&plug);
* takes longer than a dirty_writeback_interval interval, then leave a
* one-second gap.
*
- * older_than_this takes precedence over nr_to_write. So we'll only write back
+ * dirtied_before takes precedence over nr_to_write. So we'll only write back
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
{
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages;
- unsigned long oldest_jif;
+ unsigned long dirtied_before = jiffies;
struct inode *inode;
long progress;
struct blk_plug plug;
- oldest_jif = jiffies;
- work->older_than_this = &oldest_jif;
-
blk_start_plug(&plug);
spin_lock(&wb->list_lock);
for (;;) {
* safe.
*/
if (work->for_kupdate) {
- oldest_jif = jiffies -
+ dirtied_before = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
- oldest_jif = jiffies;
+ dirtied_before = jiffies;
trace_writeback_start(wb, work);
if (list_empty(&wb->b_io))
- queue_io(wb, work);
+ queue_io(wb, work, dirtied_before);
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
inode->i_state |= flags;
/*
- * If the inode is being synced, just update its dirty state.
- * The unlocker will place the inode on the appropriate
- * superblock list, based upon its state.
+ * If the inode is queued for writeback by flush worker, just
+ * update its dirty state. Once the flush worker is done with
+ * the inode it will place it on the appropriate superblock
+ * list, based upon its state.
*/
- if (inode->i_state & I_SYNC)
+ if (inode->i_state & I_SYNC_QUEUED)
goto out_unlock_inode;
/*
switch (param->type) {
case fs_value_is_string:
len = 1 + param->size;
- /* Fall through */
+ fallthrough;
case fs_value_is_flag:
len += strlen(param->key);
break;
break;
case FSCONFIG_SET_PATH_EMPTY:
lookup_flags = LOOKUP_EMPTY;
- /* fallthru */
+ fallthrough;
case FSCONFIG_SET_PATH:
param.type = fs_value_is_filename;
param.name = getname_flags(_value, lookup_flags, NULL);
}
if (n == 0)
break;
- /* fall through - To branching from existing tree */
+ fallthrough; /* To branching from existing tree */
case ALLOC_GROW_DEPTH:
if (i > 1 && i < mp->mp_fheight)
gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
state = ALLOC_DATA;
if (n == 0)
break;
- /* fall through - To tree complete, adding data blocks */
+ fallthrough; /* To tree complete, adding data blocks */
case ALLOC_DATA:
BUG_ON(n > dblks);
BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
}
/**
+ * drain_bd - drain the buf and databuf queue for a failed transaction
+ * @tr: the transaction to drain
+ *
+ * When this is called, we're taking an error exit for a log write that failed
+ * but since we bypassed the after_commit functions, we need to remove the
+ * items from the buf and databuf queue.
+ */
+static void trans_drain(struct gfs2_trans *tr)
+{
+ struct gfs2_bufdata *bd;
+ struct list_head *head;
+
+ if (!tr)
+ return;
+
+ head = &tr->tr_buf;
+ while (!list_empty(head)) {
+ bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
+ list_del_init(&bd->bd_list);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
+ }
+ head = &tr->tr_databuf;
+ while (!list_empty(head)) {
+ bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
+ list_del_init(&bd->bd_list);
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
+ }
+}
+
+/**
* gfs2_log_flush - flush incore transaction(s)
* @sdp: the filesystem
* @gl: The glock structure to flush. If NULL, flush the whole incore log
out:
if (gfs2_withdrawn(sdp)) {
+ trans_drain(tr);
/**
* If the tr_list is empty, we're withdrawing during a log
* flush that targets a transaction, but the transaction was
case GFS2_QUOTA_ON:
state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
- /*FALLTHRU*/
+ fallthrough;
case GFS2_QUOTA_ACCOUNT:
state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
QCI_SYSFILE;
tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
+ INIT_LIST_HEAD(&tr->tr_list);
INIT_LIST_HEAD(&tr->tr_ail1_list);
INIT_LIST_HEAD(&tr->tr_ail2_list);
switch (sbi->s_vhdr->signature) {
case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX):
set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
- /*FALLTHRU*/
+ fallthrough;
case cpu_to_be16(HFSPLUS_VOLHEAD_SIG):
break;
case cpu_to_be16(HFSP_WRAP_MAGIC):
return match->nr_running && !match->cancel_all;
}
+static inline void io_wqe_remove_pending(struct io_wqe *wqe,
+ struct io_wq_work *work,
+ struct io_wq_work_node *prev)
+{
+ unsigned int hash = io_get_work_hash(work);
+ struct io_wq_work *prev_work = NULL;
+
+ if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
+ if (prev)
+ prev_work = container_of(prev, struct io_wq_work, list);
+ if (prev_work && io_get_work_hash(prev_work) == hash)
+ wqe->hash_tail[hash] = prev_work;
+ else
+ wqe->hash_tail[hash] = NULL;
+ }
+ wq_list_del(&wqe->work_list, &work->list, prev);
+}
+
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
struct io_cb_cancel_data *match)
{
work = container_of(node, struct io_wq_work, list);
if (!match->fn(work, match->data))
continue;
-
- wq_list_del(&wqe->work_list, node, prev);
+ io_wqe_remove_pending(wqe, work, prev);
spin_unlock_irqrestore(&wqe->lock, flags);
io_run_cancel(work, wqe);
match->nr_pending++;
REQ_F_ISREG_BIT,
REQ_F_COMP_LOCKED_BIT,
REQ_F_NEED_CLEANUP_BIT,
- REQ_F_OVERFLOW_BIT,
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_NO_FILE_TABLE_BIT,
REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
/* needs cleanup */
REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
- /* in overflow list */
- REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
/* already went through poll handler */
REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
/* buffer already selected */
static inline void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+ REQ_F_INFLIGHT))
__io_clean_op(req);
}
io_req_init_async(req);
if (req->flags & REQ_F_ISREG) {
- if (def->hash_reg_file)
+ if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
compl.list);
list_move(&req->compl.list, &list);
- req->flags &= ~REQ_F_OVERFLOW;
if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, req->result);
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
}
io_clean_op(req);
- req->flags |= REQ_F_OVERFLOW;
req->result = res;
req->compl.cflags = cflags;
refcount_inc(&req->refs);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- list_del(&req->inflight_entry);
- if (waitqueue_active(&ctx->inflight_wait))
- wake_up(&ctx->inflight_wait);
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
- }
-
return io_req_clean_work(req);
}
return __io_req_find_next(req);
}
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
+ bool twa_signal_ok)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
* will do the job.
*/
notify = 0;
- if (!(ctx->flags & IORING_SETUP_SQPOLL))
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify);
init_task_work(&req->task_work, io_req_task_submit);
percpu_ref_get(&req->ctx->refs);
- ret = io_req_task_work_add(req, &req->task_work);
+ ret = io_req_task_work_add(req, &req->task_work, true);
if (unlikely(ret)) {
struct task_struct *tsk;
req = list_first_entry(done, struct io_kiocb, inflight_entry);
if (READ_ONCE(req->result) == -EAGAIN) {
+ req->result = 0;
req->iopoll_completed = 0;
list_move_tail(&req->inflight_entry, &again);
continue;
io_req_complete(req, ret);
return false;
}
-
-static void io_rw_resubmit(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
- struct io_ring_ctx *ctx = req->ctx;
- int err;
-
- err = io_sq_thread_acquire_mm(ctx, req);
-
- if (io_resubmit_prep(req, err)) {
- refcount_inc(&req->refs);
- io_queue_async_work(req);
- }
-
- percpu_ref_put(&ctx->refs);
-}
#endif
static bool io_rw_reissue(struct io_kiocb *req, long res)
{
#ifdef CONFIG_BLOCK
+ umode_t mode = file_inode(req->file)->i_mode;
int ret;
+ if (!S_ISBLK(mode) && !S_ISREG(mode))
+ return false;
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
- init_task_work(&req->task_work, io_rw_resubmit);
- percpu_ref_get(&req->ctx->refs);
+ ret = io_sq_thread_acquire_mm(req->ctx, req);
- ret = io_req_task_work_add(req, &req->task_work);
- if (!ret)
+ if (io_resubmit_prep(req, ret)) {
+ refcount_inc(&req->refs);
+ io_queue_async_work(req);
return true;
+ }
+
#endif
return false;
}
* IO with EINTR.
*/
ret = -EINTR;
- /* fall through */
+ fallthrough;
default:
kiocb->ki_complete(kiocb, ret, 0);
}
return __io_iov_buffer_select(req, iov, needs_lock);
}
-static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
- struct iovec **iovec, struct iov_iter *iter,
- bool needs_lock)
+static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
+ struct iovec **iovec, struct iov_iter *iter,
+ bool needs_lock)
{
void __user *buf = u64_to_user_ptr(req->rw.addr);
size_t sqe_len = req->rw.len;
ssize_t ret;
u8 opcode;
- if (req->io) {
- struct io_async_rw *iorw = &req->io->rw;
-
- *iovec = NULL;
- return iov_iter_count(&iorw->iter);
- }
-
opcode = req->opcode;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
if (req->flags & REQ_F_BUFFER_SELECT) {
buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
- if (IS_ERR(buf)) {
- *iovec = NULL;
+ if (IS_ERR(buf))
return PTR_ERR(buf);
- }
req->rw.len = sqe_len;
}
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
}
+static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
+ struct iovec **iovec, struct iov_iter *iter,
+ bool needs_lock)
+{
+ if (!req->io)
+ return __io_import_iovec(rw, req, iovec, iter, needs_lock);
+ *iovec = NULL;
+ return iov_iter_count(&req->io->rw.iter);
+}
+
+static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
+{
+ return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
+}
+
/*
* For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually.
if (rw == READ) {
nr = file->f_op->read(file, iovec.iov_base,
- iovec.iov_len, &kiocb->ki_pos);
+ iovec.iov_len, io_kiocb_ppos(kiocb));
} else {
nr = file->f_op->write(file, iovec.iov_base,
- iovec.iov_len, &kiocb->ki_pos);
+ iovec.iov_len, io_kiocb_ppos(kiocb));
}
if (iov_iter_is_bvec(iter))
bool force_nonblock)
{
struct io_async_rw *iorw = &req->io->rw;
+ struct iovec *iov;
ssize_t ret;
- iorw->iter.iov = iorw->fast_iov;
- /* reset ->io around the iovec import, we don't want to use it */
- req->io = NULL;
- ret = io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
- &iorw->iter, !force_nonblock);
- req->io = container_of(iorw, struct io_async_ctx, rw);
+ iorw->iter.iov = iov = iorw->fast_iov;
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
if (unlikely(ret < 0))
return ret;
+ iorw->iter.iov = iov;
io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter);
return 0;
}
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
- ret = io_req_task_work_add(req, &req->task_work);
+ ret = io_req_task_work_add(req, &req->task_work, true);
if (unlikely(ret)) {
struct task_struct *tsk;
return 1;
}
-static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
- struct wait_page_queue *wait,
- wait_queue_func_t func,
- void *data)
-{
- /* Can't support async wakeup with polled IO */
- if (kiocb->ki_flags & IOCB_HIPRI)
- return -EINVAL;
- if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
- wait->wait.func = func;
- wait->wait.private = data;
- wait->wait.flags = 0;
- INIT_LIST_HEAD(&wait->wait.entry);
- kiocb->ki_flags |= IOCB_WAITQ;
- kiocb->ki_waitq = wait;
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
/*
* This controls whether a given IO request should be armed for async page
* based retry. If we return false here, the request is handed to the async
*/
static bool io_rw_should_retry(struct io_kiocb *req)
{
+ struct wait_page_queue *wait = &req->io->rw.wpq;
struct kiocb *kiocb = &req->rw.kiocb;
- int ret;
/* never retry for NOWAIT, we just complete with -EAGAIN */
if (req->flags & REQ_F_NOWAIT)
return false;
/* Only for buffered IO */
- if (kiocb->ki_flags & IOCB_DIRECT)
+ if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
return false;
+
/*
* just use poll if we can, and don't attempt if the fs doesn't
* support callback based unlocks
if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
return false;
- ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
- io_async_buf_func, req);
- if (!ret) {
- io_get_req_task(req);
- return true;
- }
+ wait->wait.func = io_async_buf_func;
+ wait->wait.private = req;
+ wait->wait.flags = 0;
+ INIT_LIST_HEAD(&wait->wait.entry);
+ kiocb->ki_flags |= IOCB_WAITQ;
+ kiocb->ki_waitq = wait;
- return false;
+ io_get_req_task(req);
+ return true;
}
static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
ret = 0;
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
- iov_count = iov_iter_count(iter);
- ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
ret = 0;
goto out_free;
} else if (ret == -EAGAIN) {
- if (!force_nonblock)
+ /* IOPOLL retry should happen for io-wq threads */
+ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
+ /* no retry on NONBLOCK marked file */
+ if (req->file->f_flags & O_NONBLOCK)
+ goto done;
+ /* some cases will consume bytes even on error returns */
+ iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (ret)
goto out_free;
return -EAGAIN;
} else if (ret < 0) {
- goto out_free;
+ /* make sure -ERESTARTSYS -> -EINTR is done */
+ goto done;
}
/* read it all, or we did blocking attempt. no retry. */
kiocb_done(kiocb, ret, cs);
ret = 0;
out_free:
+ /* it's reportedly faster than delegating the null check to kfree() */
if (iovec)
kfree(iovec);
return ret;
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
(req->flags & REQ_F_ISREG))
goto copy_iov;
- iov_count = iov_iter_count(iter);
- ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
+ /* no retry on NONBLOCK marked file */
+ if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
+ goto done;
if (!force_nonblock || ret2 != -EAGAIN) {
+ /* IOPOLL retry should happen for io-wq threads */
+ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
+ goto copy_iov;
+done:
kiocb_done(kiocb, ret2, cs);
} else {
copy_iov:
+ /* some cases will consume bytes even on error returns */
+ iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (!ret)
return -EAGAIN;
}
out_free:
+ /* it's reportedly faster than delegating the null check to kfree() */
if (iovec)
kfree(iovec);
return ret;
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
+ bool twa_signal_ok;
int ret;
/* for instances that support it check for an event match first: */
percpu_ref_get(&req->ctx->refs);
/*
+ * If we using the signalfd wait_queue_head for this wakeup, then
+ * it's not safe to use TWA_SIGNAL as we could be recursing on the
+ * tsk->sighand->siglock on doing the wakeup. Should not be needed
+ * either, as the normal wakeup will suffice.
+ */
+ twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
+
+ /*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req, &req->task_work);
+ ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
if (unlikely(ret)) {
struct task_struct *tsk;
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask, ret;
+ int rw;
if (!req->file || !file_can_poll(req->file))
return false;
if (req->flags & REQ_F_POLLED)
return false;
- if (!def->pollin && !def->pollout)
+ if (def->pollin)
+ rw = READ;
+ else if (def->pollout)
+ rw = WRITE;
+ else
+ return false;
+ /* if we can't nonblock try, then no point in arming a poll handler */
+ if (!io_file_supports_async(req->file, rw))
return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
}
req->flags &= ~REQ_F_NEED_CLEANUP;
}
+
+ if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ if (waitqueue_active(&ctx->inflight_wait))
+ wake_up(&ctx->inflight_wait);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ req->flags &= ~REQ_F_INFLIGHT;
+ }
}
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
if (table->files[index]) {
- file = io_file_from_index(ctx, index);
+ file = table->files[index];
err = io_queue_file_removal(data, file);
if (err)
break;
table->files[index] = file;
err = io_sqe_file_register(ctx, file, i);
if (err) {
+ table->files[index] = NULL;
fput(file);
break;
}
{
int ret;
- mmgrab(current->mm);
- ctx->sqo_mm = current->mm;
-
if (ctx->flags & IORING_SETUP_SQPOLL) {
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
return 0;
err:
io_finish_async(ctx);
- if (ctx->sqo_mm) {
- mmdrop(ctx->sqo_mm);
- ctx->sqo_mm = NULL;
- }
return ret;
}
ACCT_LOCKED);
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
- queue_work(system_wq, &ctx->exit_work);
+ /*
+ * Use system_unbound_wq to avoid spawning tons of event kworkers
+ * if we're exiting a ton of rings at the same time. It just adds
+ * noise and overhead, there's no discernable change in runtime
+ * over using system_wq.
+ */
+ queue_work(system_unbound_wq, &ctx->exit_work);
}
static int io_uring_release(struct inode *inode, struct file *file)
return false;
}
+static inline bool io_match_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
+}
+
+static bool io_match_link_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ struct io_kiocb *link;
+
+ if (io_match_files(req, files))
+ return true;
+ if (req->flags & REQ_F_LINK_HEAD) {
+ list_for_each_entry(link, &req->link_list, link_list) {
+ if (io_match_files(link, files))
+ return true;
+ }
+ }
+ return false;
+}
+
/*
* We're looking to cancel 'req' because it's holding on to our files, but
* 'req' could be a link to another request. See if it is, and cancel that
return found;
}
+static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
+{
+ return io_match_link(container_of(work, struct io_kiocb, work), data);
+}
+
+static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+{
+ enum io_wq_cancel cret;
+
+ /* cancel this particular work, if it's running */
+ cret = io_wq_cancel_work(ctx->io_wq, &req->work);
+ if (cret != IO_WQ_CANCEL_NOTFOUND)
+ return;
+
+ /* find links that hold this pending, cancel those */
+ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
+ if (cret != IO_WQ_CANCEL_NOTFOUND)
+ return;
+
+ /* if we have a poll link holding this pending, cancel that */
+ if (io_poll_remove_link(ctx, req))
+ return;
+
+ /* final option, timeout link is holding this req pending */
+ io_timeout_remove_link(ctx, req);
+}
+
+static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_defer_entry *de = NULL;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_link_files(de->req, files)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ req_set_fail_links(de->req);
+ io_put_req(de->req);
+ io_req_complete(de->req, -ECANCELED);
+ kfree(de);
+ }
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return;
+ io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
/* We need to keep going until we don't find a matching req */
if (!cancel_req)
break;
-
- if (cancel_req->flags & REQ_F_OVERFLOW) {
- spin_lock_irq(&ctx->completion_lock);
- list_del(&cancel_req->compl.list);
- cancel_req->flags &= ~REQ_F_OVERFLOW;
-
- io_cqring_mark_overflow(ctx);
- WRITE_ONCE(ctx->rings->cq_overflow,
- atomic_inc_return(&ctx->cached_cq_overflow));
- io_commit_cqring(ctx);
- spin_unlock_irq(&ctx->completion_lock);
-
- /*
- * Put inflight ref and overflow ref. If that's
- * all we had, then we're done with this request.
- */
- if (refcount_sub_and_test(2, &cancel_req->refs)) {
- io_free_req(cancel_req);
- finish_wait(&ctx->inflight_wait, &wait);
- continue;
- }
- } else {
- io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
- /* could be a link, check and remove if it is */
- if (!io_poll_remove_link(ctx, cancel_req))
- io_timeout_remove_link(ctx, cancel_req);
- io_put_req(cancel_req);
- }
-
+ /* cancel this request, or head link requests */
+ io_attempt_cancel(ctx, cancel_req);
+ io_put_req(cancel_req);
schedule();
finish_wait(&ctx->inflight_wait, &wait);
}
ctx->user = user;
ctx->creds = get_current_cred();
+ mmgrab(current->mm);
+ ctx->sqo_mm = current->mm;
+
/*
* Account memory _before_ installing the file descriptor. Once
* the descriptor is installed, it can get closed at any time. Also
SEEK_HOLE);
if (offset < 0)
return length;
- /* fall through */
+ fallthrough;
case IOMAP_HOLE:
*(loff_t *)data = offset;
return 0;
SEEK_DATA);
if (offset < 0)
return length;
- /*FALLTHRU*/
+ fallthrough;
default:
*(loff_t *)data = offset;
return 0;
* superblock as being NULL to prevent the journal destroy from writing
* back a bogus superblock.
*/
-static void journal_fail_superblock (journal_t *journal)
+static void journal_fail_superblock(journal_t *journal)
{
struct buffer_head *bh = journal->j_sb_buffer;
brelse(bh);
int ret;
/* Buffer got discarded which means block device got invalidated */
- if (!buffer_mapped(bh))
+ if (!buffer_mapped(bh)) {
+ unlock_buffer(bh);
return -EIO;
+ }
trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
/**
- *int jbd2_journal_check_used_features () - Check if features specified are used.
+ *int jbd2_journal_check_used_features() - Check if features specified are used.
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
* features. Return true (non-zero) if it does.
**/
-int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
* all of a given set of features on this journal. Return true
* (non-zero) if it can. */
-int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
if (!compat && !ro && !incompat)
}
/**
- * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
+ * int jbd2_journal_set_features() - Mark a given journal feature in the superblock
* @journal: Journal to act on.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
*
*/
-int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
#define INCOMPAT_FEATURE_ON(f) \
* number. */
if (pass == PASS_SCAN &&
jbd2_has_feature_checksum(journal)) {
- int chksum_err, chksum_seen;
struct commit_header *cbh =
(struct commit_header *)bh->b_data;
unsigned found_chksum =
be32_to_cpu(cbh->h_chksum[0]);
- chksum_err = chksum_seen = 0;
-
if (info->end_transaction) {
journal->j_failed_commit =
info->end_transaction;
break;
}
- if (crc32_sum == found_chksum &&
- cbh->h_chksum_type == JBD2_CRC32_CHKSUM &&
- cbh->h_chksum_size ==
- JBD2_CRC32_CHKSUM_SIZE)
- chksum_seen = 1;
- else if (!(cbh->h_chksum_type == 0 &&
- cbh->h_chksum_size == 0 &&
- found_chksum == 0 &&
- !chksum_seen))
- /*
- * If fs is mounted using an old kernel and then
- * kernel with journal_chksum is used then we
- * get a situation where the journal flag has
- * checksum flag set but checksums are not
- * present i.e chksum = 0, in the individual
- * commit blocks.
- * Hence to avoid checksum failures, in this
- * situation, this extra check is added.
- */
- chksum_err = 1;
-
- if (chksum_err) {
- info->end_transaction = next_commit_ID;
-
- if (!jbd2_has_feature_async_commit(journal)) {
- journal->j_failed_commit =
- next_commit_ID;
- brelse(bh);
- break;
- }
- }
+ /* Neither checksum match nor unused? */
+ if (!((crc32_sum == found_chksum &&
+ cbh->h_chksum_type ==
+ JBD2_CRC32_CHKSUM &&
+ cbh->h_chksum_size ==
+ JBD2_CRC32_CHKSUM_SIZE) ||
+ (cbh->h_chksum_type == 0 &&
+ cbh->h_chksum_size == 0 &&
+ found_chksum == 0)))
+ goto chksum_error;
+
crc32_sum = ~0;
}
if (pass == PASS_SCAN &&
!jbd2_commit_block_csum_verify(journal,
bh->b_data)) {
+ chksum_error:
info->end_transaction = next_commit_ID;
if (!jbd2_has_feature_async_commit(journal)) {
*/
static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
+ J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
}
* int jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
* @page: to try and free
- * @gfp_mask: we use the mask to detect how hard should we try to release
- * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit
- * code to release the buffers.
- *
*
* For all the buffers on this page,
* if they are fully written out ordered data, move them onto BUF_CLEAN
*
* Return 0 on failure, 1 on success
*/
-int jbd2_journal_try_to_free_buffers(journal_t *journal,
- struct page *page, gfp_t gfp_mask)
+int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
{
struct buffer_head *head;
struct buffer_head *bh;
+ bool has_write_io_error = false;
int ret = 0;
J_ASSERT(PageLocked(page));
jbd2_journal_put_journal_head(jh);
if (buffer_jbd(bh))
goto busy;
+
+ /*
+ * If we free a metadata buffer which has been failed to
+ * write out, the jbd2 checkpoint procedure will not detect
+ * this failure and may lead to filesystem inconsistency
+ * after cleanup journal tail.
+ */
+ if (buffer_write_io_error(bh)) {
+ pr_err("JBD2: Error while async write back metadata bh %llu.",
+ (unsigned long long)bh->b_blocknr);
+ has_write_io_error = true;
+ }
} while ((bh = bh->b_this_page) != head);
ret = try_to_free_buffers(page);
busy:
+ if (has_write_io_error)
+ jbd2_journal_abort(journal, -EIO);
+
return ret;
}
was_dirty = test_clear_buffer_jbddirty(bh);
__jbd2_journal_temp_unlink_buffer(jh);
+
+ /*
+ * b_transaction must be set, otherwise the new b_transaction won't
+ * be holding jh reference
+ */
+ J_ASSERT_JH(jh, jh->b_transaction != NULL);
+
/*
* We set b_transaction here because b_next_transaction will inherit
* our jh reference and thus __jbd2_journal_file_buffer() must not
rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
else
rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
- /* fall through */
+ fallthrough;
case S_IFSOCK:
case S_IFIFO:
dbg_readinode("symlink's target '%s' cached\n", f->target);
}
- /* fall through... */
+ fallthrough;
case S_IFBLK:
case S_IFCHR:
switch (whence) {
case 1:
offset += file->f_pos;
- /* fall through */
+ fallthrough;
case 0:
if (offset >= 0)
break;
- /* fall through */
+ fallthrough;
default:
return -EINVAL;
}
switch (arg) {
case F_UNLCK:
fl->fl_flags &= ~FL_UNLOCK_PENDING;
- /* fall through */
+ fallthrough;
case F_RDLCK:
fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
}
cmd = F_SETLKW;
file_lock->fl_flags |= FL_OFDLCK;
file_lock->fl_owner = filp;
- /* Fallthrough */
+ fallthrough;
case F_SETLKW:
file_lock->fl_flags |= FL_SLEEP;
}
cmd = F_SETLKW64;
file_lock->fl_flags |= FL_OFDLCK;
file_lock->fl_owner = filp;
- /* Fallthrough */
+ fallthrough;
case F_SETLKW64:
file_lock->fl_flags |= FL_SLEEP;
}
case -ENODEV:
/* Our extent block devices are unavailable */
set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
- /* Fall through */
+ fallthrough;
case 0:
return lseg;
default:
/* A NFSv4 OPEN will revalidate later */
if (server->caps & NFS_CAP_ATOMIC_OPEN)
goto out;
- /* Fallthrough */
+ fallthrough;
case S_IFDIR:
if (server->flags & NFS_MOUNT_NOCTO)
break;
pnfs_error_mark_layout_for_return(inode, lseg);
pnfs_set_lo_fail(lseg);
rpc_wake_up(&tbl->slot_tbl_waitq);
- /* fall through */
+ fallthrough;
default:
reset:
dprintk("%s Retry through MDS. Error %d\n", __func__,
nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid);
rpc_wake_up(&tbl->slot_tbl_waitq);
- /* fall through */
+ fallthrough;
default:
if (ff_layout_avoid_mds_available_ds(lseg))
return -NFS4ERR_RESET_TO_PNFS;
*/
if (opnum == OP_READ)
break;
- /* Fallthrough */
+ fallthrough;
default:
pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
lseg);
switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
case Opt_xprt_udp6:
protofamily = AF_INET6;
- /* fall through */
+ fallthrough;
case Opt_xprt_udp:
ctx->flags &= ~NFS_MOUNT_TCP;
ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
break;
case Opt_xprt_tcp6:
protofamily = AF_INET6;
- /* fall through */
+ fallthrough;
case Opt_xprt_tcp:
ctx->flags |= NFS_MOUNT_TCP;
ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
break;
case Opt_xprt_rdma6:
protofamily = AF_INET6;
- /* fall through */
+ fallthrough;
case Opt_xprt_rdma:
/* vector side protocols to TCP */
ctx->flags |= NFS_MOUNT_TCP;
switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
case Opt_xprt_udp6:
mountfamily = AF_INET6;
- /* fall through */
+ fallthrough;
case Opt_xprt_udp:
ctx->mount_server.protocol = XPRT_TRANSPORT_UDP;
break;
case Opt_xprt_tcp6:
mountfamily = AF_INET6;
- /* fall through */
+ fallthrough;
case Opt_xprt_tcp:
ctx->mount_server.protocol = XPRT_TRANSPORT_TCP;
break;
ctx->version = NFS_DEFAULT_VERSION;
switch (data->version) {
case 1:
- data->namlen = 0; /* fall through */
+ data->namlen = 0;
+ fallthrough;
case 2:
- data->bsize = 0; /* fall through */
+ data->bsize = 0;
+ fallthrough;
case 3:
if (data->flags & NFS_MOUNT_VER3)
goto out_no_v3;
memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
/* Turn off security negotiation */
extra_flags |= NFS_MOUNT_SECFLAVOUR;
- /* fall through */
+ fallthrough;
case 4:
if (data->flags & NFS_MOUNT_SECFLAVOUR)
goto out_no_sec;
- /* fall through */
+ fallthrough;
case 5:
memset(data->context, 0, sizeof(data->context));
- /* fall through */
+ fallthrough;
case 6:
if (data->flags & NFS_MOUNT_VER3) {
if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
case -EPROTONOSUPPORT:
dprintk("NFS_V3_ACL extension not supported; disabling\n");
server->caps &= ~NFS_CAP_ACLS;
- /* fall through */
+ fallthrough;
case -ENOTSUPP:
status = -EOPNOTSUPP;
default:
dprintk("NFS_V3_ACL SETACL RPC not supported"
"(will not retry)\n");
server->caps &= ~NFS_CAP_ACLS;
- /* fall through */
+ fallthrough;
case -ENOTSUPP:
status = -EOPNOTSUPP;
}
ret = nfs42_proc_llseek(filep, offset, whence);
if (ret != -ENOTSUPP)
return ret;
- /* Fall through */
+ fallthrough;
default:
return nfs_file_llseek(filep, offset, whence);
}
switch (token) {
case Opt_find_uid:
im->im_type = IDMAP_TYPE_USER;
- /* Fall through */
+ fallthrough;
case Opt_find_gid:
im->im_conv = IDMAP_CONV_NAMETOID;
ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ);
case Opt_find_user:
im->im_type = IDMAP_TYPE_USER;
- /* Fall through */
+ fallthrough;
case Opt_find_group:
im->im_conv = IDMAP_CONV_IDTONAME;
ret = match_int(&substr, &im->im_id);
stateid);
goto wait_on_recovery;
}
- /* Fall through */
+ fallthrough;
case -NFS4ERR_OPENMODE:
if (inode) {
int err;
ret = -EBUSY;
break;
}
- /* Fall through */
+ fallthrough;
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
- /* Fall through */
+ fallthrough;
case -NFS4ERR_GRACE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
break;
- /* Fall through */
+ fallthrough;
default:
return 0;
}
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
- /* Fall through */
+ fallthrough;
case NFS4_OPEN_CLAIM_FH:
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
}
nfs4_free_revoked_stateid(server,
&calldata->arg.stateid,
task->tk_msg.rpc_cred);
- /* Fallthrough */
+ fallthrough;
case -NFS4ERR_BAD_STATEID:
if (calldata->arg.fmode == 0)
break;
- /* Fallthrough */
+ fallthrough;
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
nfs4_free_revoked_stateid(data->res.server,
data->args.stateid,
task->tk_msg.rpc_cred);
- /* Fallthrough */
+ fallthrough;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -ETIMEDOUT:
data->res.fattr = NULL;
goto out_restart;
}
- /* Fallthrough */
+ fallthrough;
default:
task->tk_status = nfs4_async_handle_exception(task,
data->res.server, task->tk_status,
if (nfs4_update_lock_stateid(calldata->lsp,
&calldata->res.stateid))
break;
- /* Fall through */
+ fallthrough;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_EXPIRED:
nfs4_free_revoked_stateid(calldata->server,
&calldata->arg.stateid,
task->tk_msg.rpc_cred);
- /* Fall through */
+ fallthrough;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
- /* fall through */
+ fallthrough;
case -NFS4ERR_RETRY_UNCACHED_REP:
rpc_restart_call_prepare(task);
return;
switch(task->tk_status) {
case 0:
wake_up_all(&clp->cl_lock_waitq);
- /* Fallthrough */
+ fallthrough;
case -NFS4ERR_COMPLETE_ALREADY:
case -NFS4ERR_WRONG_CRED: /* What to do here? */
break;
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
- /* fall through */
+ fallthrough;
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
case -NFS4ERR_BADSESSION:
&lrp->args.range,
lrp->args.inode))
goto out_restart;
- /* Fallthrough */
+ fallthrough;
default:
task->tk_status = 0;
- /* Fallthrough */
+ fallthrough;
case 0:
break;
case -NFS4ERR_DELAY:
default:
pr_err("NFS: %s: unhandled error %d\n",
__func__, status);
- /* Fall through */
+ fallthrough;
case -ENOMEM:
case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
break;
}
printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
- /* Fall through */
+ fallthrough;
case -ENOENT:
case -ENOMEM:
case -EACCES:
set_bit(ops->state_flag_bit, &state->flags);
break;
}
- /* Fall through */
+ fallthrough;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
- /* Fall through */
+ fallthrough;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -ETIMEDOUT:
if (clnt->cl_softrtry)
break;
- /* Fall through */
+ fallthrough;
case -NFS4ERR_DELAY:
case -EAGAIN:
ssleep(1);
- /* Fall through */
+ fallthrough;
case -NFS4ERR_STALE_CLIENTID:
dprintk("NFS: %s after status %d, retrying\n",
__func__, status);
}
if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
break;
- /* Fall through */
+ fallthrough;
case -NFS4ERR_CLID_INUSE:
case -NFS4ERR_WRONGSEC:
/* No point in retrying if we already used RPC_AUTH_UNIX */
case FLUSH_COND_STABLE:
if (nfs_reqs_to_commit(cinfo))
break;
- /* fall through */
+ fallthrough;
default:
hdr->args.stable = NFS_FILE_SYNC;
}
case 0:
if (res->lrs_present)
res_stateid = &res->stateid;
- /* Fallthrough */
+ fallthrough;
default:
arg_stateid = &args->stateid;
}
break;
case ACL_MASK:
mask = pa;
- /* fall through */
+ fallthrough;
case ACL_OTHER:
break;
}
bex->soff = iomap.addr;
break;
}
- /*FALLTHRU*/
+ fallthrough;
case IOMAP_HOLE:
if (seg->iomode == IOMODE_READ) {
bex->es = PNFS_BLOCK_NONE_DATA;
break;
}
- /*FALLTHRU*/
+ fallthrough;
case IOMAP_DELALLOC:
default:
WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
break;
case -ESERVERFAULT:
++session->se_cb_seq_nr;
- /* Fall through */
+ fallthrough;
case 1:
case -NFS4ERR_BADSESSION:
nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
rpc_delay(task, HZ/100); /* 10 mili-seconds */
return 0;
}
- /* Fallthrough */
+ fallthrough;
default:
/*
* Unknown error or non-responding client, we'll need to fence.
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
reclaim = true;
- /* fall through */
+ fallthrough;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, cstate, open);
break;
default: /* checked by xdr code */
WARN_ON_ONCE(1);
- /* fall through */
+ fallthrough;
case SP4_SSV:
status = nfserr_encr_alg_unsupp;
goto out_nolock;
rpc_delay(task, 2 * HZ);
return 0;
}
- /*FALLTHRU*/
+ fallthrough;
default:
return 1;
}
if (!i_am_nfsd())
return NULL;
rqst = kthread_data(current);
+ if (!rqst->rq_lease_breaker)
+ return NULL;
clp = *(rqst->rq_lease_breaker);
return dl->dl_stid.sc_client == clp;
}
break;
default:
printk("unknown stateid type %x\n", s->sc_type);
- /* Fallthrough */
+ fallthrough;
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
status = nfserr_bad_stateid;
case NFS4_READW_LT:
if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
- /* Fallthrough */
+ fallthrough;
case NFS4_READ_LT:
spin_lock(&fp->fi_lock);
nf = find_readable_file_locked(fp);
case NFS4_WRITEW_LT:
if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
- /* Fallthrough */
+ fallthrough;
case NFS4_WRITE_LT:
spin_lock(&fp->fi_lock);
nf = find_writeable_file_locked(fp);
break;
case FILE_LOCK_DEFERRED:
nbl = NULL;
- /* Fallthrough */
+ fallthrough;
case -EAGAIN: /* conflock holds conflicting lock */
status = nfserr_denied;
dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
case FSID_DEV:
if (!old_valid_dev(exp_sb(exp)->s_dev))
return false;
- /* FALL THROUGH */
+ fallthrough;
case FSID_MAJOR_MINOR:
case FSID_ENCODE_DEV:
return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV;
case FSID_UUID16:
if (!is_root_export(exp))
return false;
- /* fall through */
+ fallthrough;
case FSID_UUID4_INUM:
case FSID_UUID16_INUM:
return exp->ex_uuid != NULL;
rdev = inode->i_rdev;
attr->ia_valid |= ATTR_SIZE;
- /* FALLTHROUGH */
+ fallthrough;
case S_IFIFO:
/* this is probably a permission check..
* at least IRIX implements perm checking on
case NFSD_TEST:
if (nn->nfsd_versions)
return nn->nfsd_versions[vers];
- /* Fallthrough */
+ fallthrough;
case NFSD_AVAIL:
return nfsd_support_version(vers);
}
*created = true;
break;
}
- /* fall through */
+ fallthrough;
case NFS4_CREATE_EXCLUSIVE4_1:
if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
&& d_inode(dchild)->i_atime.tv_sec == v_atime
*created = true;
goto set_attr;
}
- /* fall through */
+ fallthrough;
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
}
break;
case NILFS_IFILE_INO:
lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key);
- /* Fall through */
+ fallthrough;
default:
bmap->b_ptr_type = NILFS_BMAP_PTR_VM;
bmap->b_last_allocated_key = 0;
!(flags & NILFS_SS_SYNDT))
goto try_next_pseg;
state = RF_DSYNC_ST;
- /* Fall through */
+ fallthrough;
case RF_DSYNC_ST:
if (!(flags & NILFS_SS_SYNDT))
goto confused;
nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
goto dat_stage;
}
- nilfs_sc_cstage_inc(sci); /* Fall through */
+ nilfs_sc_cstage_inc(sci);
+ fallthrough;
case NILFS_ST_GC:
if (nilfs_doing_gc()) {
head = &sci->sc_gc_inodes;
}
sci->sc_stage.gc_inode_ptr = NULL;
}
- nilfs_sc_cstage_inc(sci); /* Fall through */
+ nilfs_sc_cstage_inc(sci);
+ fallthrough;
case NILFS_ST_FILE:
head = &sci->sc_dirty_files;
ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
}
nilfs_sc_cstage_inc(sci);
sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
- /* Fall through */
+ fallthrough;
case NILFS_ST_IFILE:
err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
&nilfs_sc_file_ops);
err = nilfs_segctor_create_checkpoint(sci);
if (unlikely(err))
break;
- /* Fall through */
+ fallthrough;
case NILFS_ST_CPFILE:
err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
&nilfs_sc_file_ops);
if (unlikely(err))
break;
- nilfs_sc_cstage_inc(sci); /* Fall through */
+ nilfs_sc_cstage_inc(sci);
+ fallthrough;
case NILFS_ST_SUFILE:
err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
sci->sc_nfreesegs, &ndone);
&nilfs_sc_file_ops);
if (unlikely(err))
break;
- nilfs_sc_cstage_inc(sci); /* Fall through */
+ nilfs_sc_cstage_inc(sci);
+ fallthrough;
case NILFS_ST_DAT:
dat_stage:
err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
return 0;
}
- nilfs_sc_cstage_inc(sci); /* Fall through */
+ nilfs_sc_cstage_inc(sci);
+ fallthrough;
case NILFS_ST_SR:
if (mode == SC_LSEG_SR) {
/* Appending a super root */
}
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
- case FAN_MARK_ADD: /* fallthrough */
+ case FAN_MARK_ADD:
case FAN_MARK_REMOVE:
if (!mask)
return -EINVAL;
default:
WARN_ON(o2nm_single_cluster->cl_fence_method >=
O2NM_FENCE_METHODS);
- /* fall through */
+ fallthrough;
case O2NM_FENCE_RESET:
printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this "
"system by restarting ***\n");
readop = psz_ftrace_read;
break;
case PSTORE_TYPE_CONSOLE:
- fallthrough;
case PSTORE_TYPE_PMSG:
readop = psz_record_read;
break;
if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
(type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
break;
- /*FALLTHROUGH*/
+ fallthrough;
default:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
size_t limit;
limit = romfs_maxsize(sb);
- if (pos >= limit)
+ if (pos >= limit || buflen > limit - pos)
return -EIO;
- if (buflen > limit - pos)
- buflen = limit - pos;
#ifdef CONFIG_ROMFS_ON_MTD
if (sb->s_mtd)
switch (whence) {
case SEEK_CUR:
offset += file->f_pos;
- /* fall through */
+ fallthrough;
case SEEK_SET:
if (offset < 0)
break;
if (!nonblock)
break;
ret = -EAGAIN;
- /* fall through */
+ fallthrough;
default:
spin_unlock_irq(¤t->sighand->siglock);
return ret;
int error, i;
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, page_count);
+ if (page_count <= BIO_MAX_PAGES)
+ bio = bio_alloc(GFP_NOIO, page_count);
+ else
+ bio = bio_kmalloc(GFP_NOIO, page_count);
+
if (!bio)
return -ENOMEM;
break;
/* No more room on heap so make it un-categorized */
cat = LPROPS_UNCAT;
- /* Fall through */
+ fallthrough;
case LPROPS_UNCAT:
list_add(&lprops->list, &c->uncat_list);
break;
case LPROPS_FREEABLE:
c->freeable_cnt -= 1;
ubifs_assert(c, c->freeable_cnt >= 0);
- /* Fall through */
+ fallthrough;
case LPROPS_UNCAT:
case LPROPS_EMPTY:
case LPROPS_FRDI_IDX:
elen += pc->lengthComponentIdent;
break;
}
- /* Fall through */
+ fallthrough;
case 2:
if (tolen == 0)
return -ENAMETOOLONG;
case UFS_ST_SUNOS:
if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT)
return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state);
- /* Fall Through - to UFS_ST_SUN */
+ fallthrough; /* to UFS_ST_SUN */
case UFS_ST_SUN:
return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
case UFS_ST_SUNx86:
usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
break;
}
- /* Fall Through - to UFS_ST_SUN */
+ fallthrough; /* to UFS_ST_SUN */
case UFS_ST_SUN:
usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
break;
case UFS_UID_EFT:
if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
- /* Fall through */
+ fallthrough;
default:
return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
}
inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
if (value > 0xFFFF)
value = 0xFFFF;
- /* Fall through */
+ fallthrough;
default:
inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
break;
case UFS_UID_EFT:
if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
- /* Fall through */
+ fallthrough;
default:
return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
}
inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
if (value > 0xFFFF)
value = 0xFFFF;
- /* Fall through */
+ fallthrough;
default:
inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value);
break;
break;
case AT_STATX_FORCE_SYNC:
sf_i->force_restat = 1;
- /* fall-through */
+ fallthrough;
default:
err = vboxsf_inode_revalidate(dentry);
}
ASSERT(ifp->if_flags & XFS_IFINLINE);
}
xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
- hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
- hdr->count = 0;
+ hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data;
+ memset(hdr, 0, sizeof(*hdr));
hdr->totsize = cpu_to_be16(sizeof(*hdr));
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
* struct xfs_attr_sf_entry has a variable length.
* Check the fixed-offset parts of the structure are
* within the data buffer.
+ * xfs_attr_sf_entry is defined with a 1-byte variable
+ * array at the end, so we must subtract that off.
*/
- if (((char *)sfep + sizeof(*sfep)) >= endp)
+ if (((char *)sfep + sizeof(*sfep) - 1) >= endp)
return __this_address;
/* Don't allow names with known bad length. */
isrt = XFS_IS_REALTIME_INODE(ip);
endfsb = irec->br_startblock + irec->br_blockcount - 1;
- if (isrt) {
+ if (isrt && whichfork == XFS_DATA_FORK) {
if (!xfs_verify_rtbno(mp, irec->br_startblock))
return __this_address;
if (!xfs_verify_rtbno(mp, endfsb))
args.minalignslop = igeo->cluster_align - 1;
/* Allow space for the inode btree to split. */
- args.minleft = igeo->inobt_maxlevels - 1;
+ args.minleft = igeo->inobt_maxlevels;
if ((error = xfs_alloc_vextent(&args)))
return error;
/*
* Allow space for the inode btree to split.
*/
- args.minleft = igeo->inobt_maxlevels - 1;
+ args.minleft = igeo->inobt_maxlevels;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
* to log the timestamps, or will clear already cleared fields in the
* worst case.
*/
- if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) {
+ if (inode->i_state & I_DIRTY_TIME) {
spin_lock(&inode->i_lock);
- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
+ inode->i_state &= ~I_DIRTY_TIME;
spin_unlock(&inode->i_lock);
}
#define XFS_IALLOC_SPACE_RES(mp) \
(M_IGEO(mp)->ialloc_blks + \
((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \
- (M_IGEO(mp)->inobt_maxlevels - 1)))
+ M_IGEO(mp)->inobt_maxlevels))
/*
* Space reservation values for various transactions.
goto out_trans_cancel;
do {
- error = xfs_trans_roll_inode(&tp, ip);
+ error = xfs_defer_finish(&tp);
if (error)
goto out_trans_cancel;
return ret;
}
+static inline bool
+xfs_is_write_fault(
+ struct vm_fault *vmf)
+{
+ return (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
+}
+
static vm_fault_t
xfs_filemap_fault(
struct vm_fault *vmf)
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
IS_DAX(file_inode(vmf->vma->vm_file)) &&
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, pe_size,
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
/* Slave address for the HDCP registers in the receiver */
#define DRM_HDCP_DDC_ADDR 0x3A
+/* Value to use at the end of the SHA-1 bytestream used for repeaters */
+#define DRM_HDCP_SHA1_TERMINATOR 0x80
+
/* HDCP register offsets for HDMI/DVI devices */
#define DRM_HDCP_DDC_BKSV 0x00
#define DRM_HDCP_DDC_RI_PRIME 0x08
* is 0, so no error checking is necessary
*/
#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
+ if (!drm_drv_uses_atomic_modeset(dev)) \
+ mutex_lock(&dev->mode_config.mutex); \
drm_modeset_acquire_init(&ctx, flags); \
modeset_lock_retry: \
ret = drm_modeset_lock_all_ctx(dev, &ctx); \
/**
* DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @dev: drm device
* @ctx: local modeset acquire context, will be dereferenced
* @ret: local ret/err/etc variable to track error status
*
* to that failure. In both of these cases the code between BEGIN/END will not
* be run, so the failure will reflect the inability to grab the locks.
*/
-#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
+#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \
modeset_lock_fail: \
if (ret == -EDEADLK) { \
ret = drm_modeset_backoff(&ctx); \
goto modeset_lock_retry; \
} \
drm_modeset_drop_locks(&ctx); \
- drm_modeset_acquire_fini(&ctx);
+ drm_modeset_acquire_fini(&ctx); \
+ if (!drm_drv_uses_atomic_modeset(dev)) \
+ mutex_unlock(&dev->mode_config.mutex);
#endif /* DRM_MODESET_LOCK_H_ */
return true;
}
+static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
+{
+ iter->bi_bvec_done = 0;
+ iter->bi_idx++;
+}
+
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
+ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- static const uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
- /* fall through */
+ fallthrough;
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
- /* fall through */
+ fallthrough;
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
- /* fall through */
+ fallthrough;
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
/*
* __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * In the meantime, to support gcc < 5, we implement __has_attribute
* by hand.
- *
- * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
- * depending on the compiler used to build it; however, these attributes have
- * no semantic effects for sparse, so it does not matter. Also note that,
- * in order to avoid sparse's warnings, even the unsupported ones must be
- * defined to 0.
*/
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
# define __iomem __attribute__((noderef, address_space(__iomem)))
# define __percpu __attribute__((noderef, address_space(__percpu)))
# define __rcu __attribute__((noderef, address_space(__rcu)))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
case CPUFREQ_RELATION_C:
return cpufreq_table_find_index_c(policy, target_freq);
default:
- pr_err("%s: Invalid relation: %d\n", __func__, relation);
- return -EINVAL;
+ WARN_ON_ONCE(1);
+ return 0;
}
}
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
+ CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
-#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
-#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
-#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
-#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
}
u64 dma_direct_get_required_mask(struct device *dev);
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
- u64 *phys_mask);
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);
-void *dma_alloc_from_pool(struct device *dev, size_t size,
- struct page **ret_page, gfp_t flags);
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t flags,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
int
#endif
/**
- * syscall_enter_from_user_mode - Check and handle work before invoking
- * a syscall
+ * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
* @regs: Pointer to currents pt_regs
- * @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
* disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct and the subsequent functions can be
- * instrumented.
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This handles lockdep, RCU (context tracking) and tracing state.
+ *
+ * This is invoked when there is extra architecture specific functionality
+ * to be done between establishing state and handling user mode entry work.
+ */
+void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
+ * architecture specific work.
*
* Returns: The original or a modified syscall number
*
* syscall_set_return_value() first. If neither of those are called and -1
* is returned, then the syscall will fail with ENOSYS.
*
- * The following functionality is handled here:
+ * It handles the following work items:
*
- * 1) Establish state (lockdep, RCU (context tracking), tracing)
- * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
+ * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
* __secure_computing(), trace_sys_enter()
- * 3) Invocation of audit_syscall_entry()
+ * 2) Invocation of audit_syscall_entry()
+ */
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is combination of syscall_enter_from_user_mode_prepare() and
+ * syscall_enter_from_user_mode_work().
+ *
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
*/
long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
BPF_ANCILLARY(RANDOM);
BPF_ANCILLARY(VLAN_TPID);
}
- /* Fallthrough. */
+ fallthrough;
default:
return ftest->code;
}
*
* I_DONTCACHE Evict inode as soon as it is not used anymore.
*
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
-#define __I_DIRTY_TIME_EXPIRED 12
-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
#define I_OVL_INUSE (1 << 14)
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
+#define I_SYNC_QUEUED (1 << 17)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
break;
}
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
+ }
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
- extern void trace_hardirqs_on_prepare(void);
- extern void trace_hardirqs_off_finish(void);
- extern void trace_hardirqs_on(void);
- extern void trace_hardirqs_off(void);
-# define lockdep_hardirq_context() (this_cpu_read(hardirq_context))
+extern void trace_hardirqs_on_prepare(void);
+extern void trace_hardirqs_off_finish(void);
+extern void trace_hardirqs_on(void);
+extern void trace_hardirqs_off(void);
+
+# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
# define lockdep_softirq_context(p) ((p)->softirq_context)
# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \
do { \
- if (this_cpu_inc_return(hardirq_context) == 1) \
+ if (__this_cpu_inc_return(hardirq_context) == 1)\
current->hardirq_threaded = 0; \
} while (0)
# define lockdep_hardirq_threaded() \
} while (0)
# define lockdep_hardirq_exit() \
do { \
- this_cpu_dec(hardirq_context); \
+ __this_cpu_dec(hardirq_context); \
} while (0)
# define lockdep_softirq_enter() \
do { \
#else
# define trace_hardirqs_on_prepare() do { } while (0)
# define trace_hardirqs_off_finish() do { } while (0)
-# define trace_hardirqs_on() do { } while (0)
-# define trace_hardirqs_off() do { } while (0)
-# define lockdep_hardirq_context() 0
-# define lockdep_softirq_context(p) 0
-# define lockdep_hardirqs_enabled() 0
-# define lockdep_softirqs_enabled(p) 0
-# define lockdep_hardirq_enter() do { } while (0)
-# define lockdep_hardirq_threaded() do { } while (0)
-# define lockdep_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define lockdep_hardirq_context() 0
+# define lockdep_softirq_context(p) 0
+# define lockdep_hardirqs_enabled() 0
+# define lockdep_softirqs_enabled(p) 0
+# define lockdep_hardirq_enter() do { } while (0)
+# define lockdep_hardirq_threaded() do { } while (0)
+# define lockdep_hardirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
# define lockdep_hrtimer_exit(__context) do { } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-#define local_irq_enable() \
- do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
-#define local_irq_disable() \
- do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+
+#define local_irq_enable() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_local_irq_enable(); \
+ } while (0)
+
+#define local_irq_disable() \
+ do { \
+ bool was_disabled = raw_irqs_disabled();\
+ raw_local_irq_disable(); \
+ if (!was_disabled) \
+ trace_hardirqs_off(); \
+ } while (0)
+
#define local_irq_save(flags) \
do { \
raw_local_irq_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while (0)
-
#define local_irq_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_restore(flags); \
- } \
+ raw_local_irq_restore(flags); \
} while (0)
#define safe_halt() \
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
-#define local_irq_save(flags) \
- do { \
- raw_local_irq_save(flags); \
- } while (0)
+#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define safe_halt() do { raw_safe_halt(); } while (0)
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern int jbd2_journal_invalidatepage(journal_t *,
struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush (journal_t *);
extern void jbd2_journal_lock_updates (journal_t *);
}
/* Last block: affect all 32 bits of (c) */
switch (length) {
- case 12: c += (u32)k[11]<<24; /* fall through */
- case 11: c += (u32)k[10]<<16; /* fall through */
- case 10: c += (u32)k[9]<<8; /* fall through */
- case 9: c += k[8]; /* fall through */
- case 8: b += (u32)k[7]<<24; /* fall through */
- case 7: b += (u32)k[6]<<16; /* fall through */
- case 6: b += (u32)k[5]<<8; /* fall through */
- case 5: b += k[4]; /* fall through */
- case 4: a += (u32)k[3]<<24; /* fall through */
- case 3: a += (u32)k[2]<<16; /* fall through */
- case 2: a += (u32)k[1]<<8; /* fall through */
+ case 12: c += (u32)k[11]<<24; fallthrough;
+ case 11: c += (u32)k[10]<<16; fallthrough;
+ case 10: c += (u32)k[9]<<8; fallthrough;
+ case 9: c += k[8]; fallthrough;
+ case 8: b += (u32)k[7]<<24; fallthrough;
+ case 7: b += (u32)k[6]<<16; fallthrough;
+ case 6: b += (u32)k[5]<<8; fallthrough;
+ case 5: b += k[4]; fallthrough;
+ case 4: a += (u32)k[3]<<24; fallthrough;
+ case 3: a += (u32)k[2]<<16; fallthrough;
+ case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
/* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2]; /* fall through */
- case 2: b += k[1]; /* fall through */
+ case 3: c += k[2]; fallthrough;
+ case 2: b += k[1]; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
* lower_32_bits - return bits 0-31 of a number
* @n: the number we're accessing
*/
-#define lower_32_bits(n) ((u32)(n))
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
struct completion;
struct pt_regs;
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
-bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
#else /* !CONFIG_KSM */
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
-static inline bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
-{
- return false;
-}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
+ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
+/*
+ * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
+ * per-cpu variables. This is required because this_cpu_read() will potentially
+ * call into preempt/irq-disable and that obviously isn't right. This is also
+ * correct because when IRQs are enabled, it doesn't matter if we accidentally
+ * read the value from our previous CPU.
+ */
+
#define lockdep_assert_irqs_enabled() \
do { \
- WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_irqs_disabled() \
do { \
- WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_in_irq() \
do { \
- WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \
+ WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
} while (0)
#define lockdep_assert_preemption_enabled() \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
debug_locks && \
(preempt_count() != 0 || \
- !this_cpu_read(hardirqs_enabled))); \
+ !raw_cpu_read(hardirqs_enabled))); \
} while (0)
#define lockdep_assert_preemption_disabled() \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
debug_locks && \
(preempt_count() == 0 && \
- this_cpu_read(hardirqs_enabled))); \
+ raw_cpu_read(hardirqs_enabled))); \
} while (0)
#else
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
- * MEMORY_DEVICE_DEVDAX:
+ * MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA
- * coherent and supports page pinning. In contrast to
- * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
- * character device.
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_FS_DAX,
- MEMORY_DEVICE_DEVDAX,
+ MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
};
switch (sizeof(struct page)) {
case 80:
- _pp[9] = 0; /* fallthrough */
+ _pp[9] = 0;
+ fallthrough;
case 72:
- _pp[8] = 0; /* fallthrough */
+ _pp[8] = 0;
+ fallthrough;
case 64:
- _pp[7] = 0; /* fallthrough */
+ _pp[7] = 0;
+ fallthrough;
case 56:
_pp[6] = 0;
_pp[5] = 0;
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
+#elif defined(CONFIG_PPC)
+# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_IA64)
#define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
#endif
+#ifndef leave_mm
+static inline void leave_mm(int cpu) { }
+#endif
+
#endif
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 last_dir;
+ u8 flags;
};
#endif /* _NF_CONNTRACK_SCTP_H */
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{
int (*output)(struct net *, struct sock *, struct sk_buff *));
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
#if IS_MODULE(CONFIG_IPV6)
- int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
int (*br_fragment)(struct net *net, struct sock *sk,
struct sk_buff *skb,
struct nf_bridge_frag_data *data,
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
- u32 user)
-{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (!v6_ops)
- return 1;
-
- return v6_ops->br_defrag(net, skb, user);
-#elif IS_BUILTIN(CONFIG_IPV6)
- return nf_ct_frag6_gather(net, skb, user);
-#else
- return 1;
-#endif
-}
-
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
+#ifndef pgd_offset_k
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+#endif
/*
* In many cases it is known that a virtual address is mapped at PMD or PTE
* For most 10GBASE-R, there is no advertisement.
*/
int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
- phy_interface_t interface, const unsigned long *advertising);
+ phy_interface_t interface, const unsigned long *advertising,
+ bool permit_pause_to_mac);
/**
* pcs_an_restart() - restart 802.3z BaseX autonegotiation
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
-static inline bool is_idle_task(const struct task_struct *p)
+static __always_inline bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & PF_IDLE);
}
defined(CONFIG_NET) || defined(CONFIG_IO_URING)
atomic_long_t locked_vm;
#endif
+#ifdef CONFIG_WATCH_QUEUE
+ atomic_t nr_watches; /* The number of watches this user currently has */
+#endif
/* Miscellaneous per-user rate limit */
struct ratelimit_state ratelimit;
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
- /* fall through */ \
+ fallthrough; \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
- /* fall through */ \
+ fallthrough; \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
- /* fall through */ \
+ fallthrough; \
case 2: set->sig[1] = op(set->sig[1]); \
- /* fall through */ \
+ fallthrough; \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
- /* fall through */
+ fallthrough;
case 1: set->sig[0] = 0;
break;
}
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
- /* fall through */
+ fallthrough;
case 1: set->sig[0] = -1;
break;
}
* NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv6. These are specifically
* unencapsulated packets of the form IPv6|TCP or
- * IPv4|UDP where the Next Header field in the IPv6
+ * IPv6|UDP where the Next Header field in the IPv6
* header is either TCP or UDP. IPv6 extension headers
* are not supported with this feature. This feature
* cannot be set in features for a device with
void kfree_skb_list(struct sk_buff *segs);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
+
+#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
+#else
+static inline void consume_skb(struct sk_buff *skb)
+{
+ return kfree_skb(skb);
+}
+#endif
+
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
- * get_rps_cpus() for example only access one 64 bytes aligned block :
+ * get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
case 32: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 24: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 16: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 8: diffs |= __it_diff(a, b, 64);
break;
case 28: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 20: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 12: diffs |= __it_diff(a, b, 64);
- /* fall through */
+ fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
}
u16 *range_start, u16 *range_num);
};
+#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0
+#define TI_SCI_RESASG_SUBTYPE_IA_VINT 0xa
+#define TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT 0xd
/**
* struct ti_sci_rm_irq_ops: IRQ management operations
* @set_irq: Set an IRQ route between the requested source
struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
struct device *dev, u32 dev_id, char *of_prop);
+struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type);
#else /* CONFIG_TI_SCI_PROTOCOL */
{
return ERR_PTR(-EINVAL);
}
+
+static inline struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type);
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_TI_SCI_PROTOCOL */
#endif /* __TISCI_PROTOCOL_H */
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
+ PGREUSE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSCAN_KSWAPD,
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
R##_e = X##_e; \
- /* Fall through */ \
+ fallthrough; \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
\
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
R##_e = Y##_e; \
- /* Fall through */ \
+ fallthrough; \
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
R##_s = X##_s; \
- /* Fall through */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
R##_s = Y##_s; \
- /* Fall through */ \
+ fallthrough; \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
FP_SET_EXCEPTION(FP_EX_DIVZERO); \
- /* Fall through */ \
+ fallthrough; \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
R##_c = FP_CLS_INF; \
int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
-struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr);
+struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
const struct in6_addr *addr,
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
-u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
#ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len)
{
+ if (len % NFT_REG32_SIZE)
+ dst[len / NFT_REG32_SIZE] = 0;
memcpy(dst, src, len);
}
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
+#ifndef AUX_IENABLE
+#define AUX_IENABLE 0x40c
+#endif
+
+#define CTOP_AUX_IACK (0xFFFFF800 + 0x088)
+
#ifndef __ASSEMBLY__
/* In order to increase compilation test coverage */
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
- TP_ARGS(inode),
+ TP_ARGS(inode, len, needed),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( unsigned int, len )
+ __field( unsigned int, needed )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->len = len;
+ __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu",
+ TP_printk("dev %d,%d ino %lu len: %u needed %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
+ (unsigned long) __entry->ino, __entry->len,
+ __entry->needed)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
TP_ARGS(sb, group)
);
-DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
+DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
TP_PROTO(struct super_block *sb, unsigned long group),
TP_ARGS(sb, group)
);
-DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
+TRACE_EVENT(ext4_read_block_bitmap_load,
+ TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch),
- TP_PROTO(struct super_block *sb, unsigned long group),
+ TP_ARGS(sb, group, prefetch),
- TP_ARGS(sb, group)
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ __field( bool, prefetch )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ __entry->prefetch = prefetch;
+ ),
+
+ TP_printk("dev %d,%d group %u prefetch %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->group, __entry->prefetch)
);
TRACE_EVENT(ext4_direct_IO_enter,
__entry->function, __entry->line)
);
+TRACE_EVENT(ext4_prefetch_bitmaps,
+ TP_PROTO(struct super_block *sb, ext4_group_t group,
+ ext4_group_t next, unsigned int prefetch_ios),
+
+ TP_ARGS(sb, group, next, prefetch_ios),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ __field( __u32, next )
+ __field( __u32, ios )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ __entry->next = next;
+ __entry->ios = prefetch_ios;
+ ),
+
+ TP_printk("dev %d,%d group %u next %u ios %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->group, __entry->next, __entry->ios)
+);
+
+TRACE_EVENT(ext4_lazy_itable_init,
+ TP_PROTO(struct super_block *sb, ext4_group_t group),
+
+ TP_ARGS(sb, group),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ ),
+
+ TP_printk("dev %d,%d group %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
+#elif defined(CONFIG_PPC)
+#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
};
enum rxrpc_rtt_tx_trace {
+ rxrpc_rtt_tx_cancel,
rxrpc_rtt_tx_data,
+ rxrpc_rtt_tx_no_slot,
rxrpc_rtt_tx_ping,
};
enum rxrpc_rtt_rx_trace {
+ rxrpc_rtt_rx_cancel,
+ rxrpc_rtt_rx_lost,
+ rxrpc_rtt_rx_obsolete,
rxrpc_rtt_rx_ping_response,
rxrpc_rtt_rx_requested_ack,
};
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
+ EM(rxrpc_rtt_tx_cancel, "CNCE") \
EM(rxrpc_rtt_tx_data, "DATA") \
+ EM(rxrpc_rtt_tx_no_slot, "FULL") \
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
+ EM(rxrpc_rtt_rx_cancel, "CNCL") \
+ EM(rxrpc_rtt_rx_obsolete, "OBSL") \
+ EM(rxrpc_rtt_rx_lost, "LOST") \
EM(rxrpc_rtt_rx_ping_response, "PONG") \
E_(rxrpc_rtt_rx_requested_ack, "RACK")
TRACE_EVENT(rxrpc_rtt_tx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
- rxrpc_serial_t send_serial),
+ int slot, rxrpc_serial_t send_serial),
- TP_ARGS(call, why, send_serial),
+ TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(enum rxrpc_rtt_tx_trace, why )
+ __field(int, slot )
__field(rxrpc_serial_t, send_serial )
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
+ __entry->slot = slot;
__entry->send_serial = send_serial;
),
- TP_printk("c=%08x %s sr=%08x",
+ TP_printk("c=%08x [%d] %s sr=%08x",
__entry->call,
+ __entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial)
);
TRACE_EVENT(rxrpc_rtt_rx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
u32 rtt, u32 rto),
- TP_ARGS(call, why, send_serial, resp_serial, rtt, rto),
+ TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(enum rxrpc_rtt_rx_trace, why )
+ __field(int, slot )
__field(rxrpc_serial_t, send_serial )
__field(rxrpc_serial_t, resp_serial )
__field(u32, rtt )
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
+ __entry->slot = slot;
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
__entry->rtt = rtt;
__entry->rto = rto;
),
- TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u",
+ TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
__entry->call,
+ __entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
__entry->resp_serial,
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
{I_REFERENCED, "I_REFERENCED"} \
)
TRACE_EVENT(writeback_queue_io,
TP_PROTO(struct bdi_writeback *wb,
struct wb_writeback_work *work,
+ unsigned long dirtied_before,
int moved),
- TP_ARGS(wb, work, moved),
+ TP_ARGS(wb, work, dirtied_before, moved),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, older)
__field(ino_t, cgroup_ino)
),
TP_fast_assign(
- unsigned long *older_than_this = work->older_than_this;
strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
- __entry->older = older_than_this ? *older_than_this : 0;
- __entry->age = older_than_this ?
- (jiffies - *older_than_this) * 1000 / HZ : -1;
+ __entry->older = dirtied_before;
+ __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
__entry->moved = moved;
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
__entry->name,
- __entry->older, /* older_than_this in jiffies */
- __entry->age, /* older_than_this in relative milliseconds */
+ __entry->older, /* dirtied_before in jiffies */
+ __entry->age, /* dirtied_before in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
(unsigned long)__entry->cgroup_ino
*
* Also, note that **bpf_trace_printk**\ () is slow, and should
* only be used for debugging purposes. For this reason, a notice
- * bloc (spanning several lines) is printed to kernel logs and
+ * block (spanning several lines) is printed to kernel logs and
* states that the helper should not be used "for production use"
* the first time this helper is used (or more precisely, when
* **trace_printk**\ () buffers are allocated). For passing values
*
* int ret;
* struct bpf_tunnel_key key = {};
- *
+ *
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
* if (ret < 0)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* if (key.remote_ipv4 != 0x0a000001)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* return TC_ACT_OK; // accept packet
*
* This interface can also be used with all encapsulation devices
* Description
* Retrieve the realm or the route, that is to say the
* **tclassid** field of the destination for the *skb*. The
- * indentifier retrieved is a user-provided tag, similar to the
+ * identifier retrieved is a user-provided tag, similar to the
* one used with the net_cls cgroup (see description for
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
* held by a route (a destination entry), not by a task.
* this socket to prevent accepting spoofed ones.
*/
#define IP_PMTUDISC_INTERFACE 4
-/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exeed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5
* @NFTA_LIST_ELEM: list element (NLA_NESTED)
*/
enum nft_list_attributes {
- NFTA_LIST_UNPEC,
+ NFTA_LIST_UNSPEC,
NFTA_LIST_ELEM,
__NFTA_LIST_MAX
};
#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
/* VIRT <-> GUEST conversion */
-#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
+#define virt_to_gfn(v) \
+ ({ \
+ WARN_ON_ONCE(!virt_addr_valid(v)); \
+ pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT); \
+ })
#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
/* Only used in PV code. But ARM guests are always HVM. */
/******************************************************************************
* Xen balloon functionality
*/
+#ifndef _XEN_BALLOON_H
+#define _XEN_BALLOON_H
#define RETRY_UNLIMITED 0
{
}
#endif
+
+#endif /* _XEN_BALLOON_H */
extern u64 xen_saved_max_mem_size;
#endif
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#else
+#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
+#define xen_free_unpopulated_pages free_xenballooned_pages
+#include <xen/balloon.h>
+#endif
+
#endif /* _XEN_XEN_H */
{
struct kstat st;
- if (init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
+ if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
(st.mode ^ fmode) & S_IFMT) {
if (S_ISDIR(st.mode))
init_rmdir(path);
}
static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, semmni;
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
case IPC_SET:
if (copy_semid_from_user(&semid64, p, version))
return -EFAULT;
- /* fall through */
+ fallthrough;
case IPC_RMID:
return semctl_down(ns, semid, cmd, &semid64);
default:
case IPC_SET:
if (copy_compat_semid_from_user(&semid64, p, version))
return -EFAULT;
- /* fallthru */
+ fallthrough;
case IPC_RMID:
return semctl_down(ns, semid, cmd, &semid64);
default:
case IPC_SET:
if (copy_shmid_from_user(&sem64, buf, version))
return -EFAULT;
- /* fallthru */
+ fallthrough;
case IPC_RMID:
return shmctl_down(ns, shmid, cmd, &sem64);
case SHM_LOCK:
case IPC_SET:
if (copy_compat_shmid_from_user(&sem64, uptr, version))
return -EFAULT;
- /* fallthru */
+ fallthrough;
case IPC_RMID:
return shmctl_down(ns, shmid, cmd, &sem64);
case SHM_LOCK:
data->values[i] = AUDIT_UID_UNSET;
break;
}
- /* fall through - if set */
+ fallthrough; /* if set */
default:
data->values[i] = f->val;
}
iter_priv->done_stop = true;
}
+/* maximum visited objects before bailing out */
+#define MAX_ITER_OBJECTS 1000000
+
/* bpf_seq_read, a customized and simpler version for bpf iterator.
* no_llseek is assumed for this file.
* The following are differences from seq_read():
{
struct seq_file *seq = file->private_data;
size_t n, offs, copied = 0;
- int err = 0;
+ int err = 0, num_objs = 0;
void *p;
mutex_lock(&seq->lock);
while (1) {
loff_t pos = seq->index;
+ num_objs++;
offs = seq->count;
p = seq->op->next(seq, p, &seq->index);
if (pos == seq->index) {
if (seq->count >= size)
break;
+ if (num_objs >= MAX_ITER_OBJECTS) {
+ if (offs == 0) {
+ err = -EAGAIN;
+ seq->op->stop(seq, p);
+ goto done;
+ }
+ break;
+ }
+
err = seq->op->show(seq, p);
if (err > 0) {
bpf_iter_dec_seq_num(seq);
return prog->expected_attach_type ==
BPF_CGROUP_GETSOCKOPT;
case offsetof(struct bpf_sockopt, optname):
- /* fallthrough */
+ fallthrough;
case offsetof(struct bpf_sockopt, level):
if (size != size_default)
return false;
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough */
+ fallthrough;
case XDP_DROP:
xdp_return_frame(xdpf);
stats->drop++;
phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
- for (i = 0; i < ehdr->e_phnum; ++i)
- if (phdr[i].p_type == PT_NOTE)
- return stack_map_parse_build_id(page_addr, build_id,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz);
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !stack_map_parse_build_id(page_addr, build_id,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
return -EINVAL;
}
phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
- for (i = 0; i < ehdr->e_phnum; ++i)
- if (phdr[i].p_type == PT_NOTE)
- return stack_map_parse_build_id(page_addr, build_id,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz);
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !stack_map_parse_build_id(page_addr, build_id,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
return -EINVAL;
}
case BPF_PROG_TYPE_EXT:
if (expected_attach_type)
return -EINVAL;
- /* fallthrough */
+ fallthrough;
default:
return 0;
}
u32 ulen = info->raw_tracepoint.tp_name_len;
size_t tp_len = strlen(tp_name);
- if (ulen && !ubuf)
+ if (!ulen ^ !ubuf)
return -EINVAL;
info->raw_tracepoint.tp_name_len = tp_len + 1;
rcu_read_lock();
retry:
- pid = idr_get_next(&ns->idr, tid);
+ pid = find_ge_pid(*tid, ns);
if (pid) {
+ *tid = pid_nr_ns(pid, ns);
task = get_pid_task(pid, PIDTYPE_PID);
if (!task) {
++*tid;
f = fcheck_files(curr_files, curr_fd);
if (!f)
continue;
+ if (!get_file_rcu(f))
+ continue;
/* set info->fd */
info->fd = curr_fd;
- get_file(f);
rcu_read_unlock();
return f;
}
off_reg == dst_reg ? dst : src);
return -EACCES;
}
- /* fall-through */
+ fallthrough;
default:
break;
}
default:
if (!prog_extension)
return -EINVAL;
- /* fallthrough */
+ fallthrough;
case BPF_MODIFY_RETURN:
case BPF_LSM_MAC:
case BPF_TRACE_FENTRY:
break;
case _LINUX_CAPABILITY_VERSION_2:
warn_deprecated_v2();
- /* fall through - v3 is otherwise equivalent to v2. */
+ fallthrough; /* v3 is otherwise equivalent to v2 */
case _LINUX_CAPABILITY_VERSION_3:
*tocopy = _LINUX_CAPABILITY_U32S_3;
break;
return -EFAULT;
switch (_NSIG_WORDS) {
case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 );
- /* fall through */
+ fallthrough;
case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 );
- /* fall through */
+ fallthrough;
case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 );
- /* fall through */
+ fallthrough;
case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 );
}
#else
return DBG_PASS_EVENT;
}
#endif
- /* Fall through */
+ fallthrough;
case 'C': /* Exception passing */
tmp = gdb_cmd_exception_pass(ks);
if (tmp > 0)
goto default_handle;
if (tmp == 0)
break;
- /* Fall through - on tmp < 0 */
+ fallthrough; /* on tmp < 0 */
case 'c': /* Continue packet */
case 's': /* Single step packet */
if (kgdb_contthread && kgdb_contthread != current) {
break;
}
dbg_activate_sw_breakpoints();
- /* Fall through - to default processing */
+ fallthrough; /* to default processing */
default:
default_handle:
error = kgdb_arch_handle_exception(ks->ex_vector,
case KT_LATIN:
if (isprint(keychar))
break; /* printable characters */
- /* fall through */
+ fallthrough;
case KT_SPEC:
if (keychar == K_ENTER)
break;
- /* fall through */
+ fallthrough;
default:
return -1; /* ignore unprintables */
}
*word = w8;
break;
}
- /* fall through */
+ fallthrough;
default:
diag = KDB_BADWIDTH;
kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
*word = w8;
break;
}
- /* fall through */
+ fallthrough;
default:
diag = KDB_BADWIDTH;
kdb_printf("kdb_getword: bad width %ld\n", (long) size);
diag = kdb_putarea(addr, w8);
break;
}
- /* fall through */
+ fallthrough;
default:
diag = KDB_BADWIDTH;
kdb_printf("kdb_putword: bad width %ld\n", (long) size);
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
+static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_limit)
{
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
return 0;
}
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
return phys_to_dma_direct(dev, phys) + size - 1 <=
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
size = PAGE_ALIGN(size);
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
- ret = dma_alloc_from_pool(dev, size, &page, gfp);
- if (!ret)
+ u64 phys_mask;
+
+ gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
+ &phys_mask);
+ page = dma_alloc_from_pool(dev, size, &ret, gfp,
+ dma_coherent_ok);
+ if (!page)
return NULL;
goto done;
}
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2020 Google LLC
*/
+#include <linux/cma.h>
#include <linux/debugfs.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/init.h>
pool_size_kernel += size;
}
+static bool cma_in_zone(gfp_t gfp)
+{
+ unsigned long size;
+ phys_addr_t end;
+ struct cma *cma;
+
+ cma = dev_get_cma_area(NULL);
+ if (!cma)
+ return false;
+
+ size = cma_get_size(cma);
+ if (!size)
+ return false;
+
+ /* CMA can't cross zone boundaries, see cma_activate_area() */
+ end = cma_get_base(cma) + size - 1;
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+ return end <= DMA_BIT_MASK(zone_dma_bits);
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
+ return end <= DMA_BIT_MASK(32);
+ return true;
+}
+
static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
gfp_t gfp)
{
unsigned int order;
- struct page *page;
+ struct page *page = NULL;
void *addr;
int ret = -ENOMEM;
do {
pool_size = 1 << (PAGE_SHIFT + order);
- page = alloc_pages(gfp, order);
+ if (cma_in_zone(gfp))
+ page = dma_alloc_from_contiguous(NULL, 1 << order,
+ order, false);
+ if (!page)
+ page = alloc_pages(gfp, order);
} while (!page && order-- > 0);
if (!page)
goto out;
}
postcore_initcall(dma_atomic_pool_init);
-static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
+static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
{
- u64 phys_mask;
- gfp_t gfp;
-
- gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
- &phys_mask);
- if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
+ if (prev == NULL) {
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
+ return atomic_pool_dma32;
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+ return atomic_pool_dma;
+ return atomic_pool_kernel;
+ }
+ if (prev == atomic_pool_kernel)
+ return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
+ if (prev == atomic_pool_dma32)
return atomic_pool_dma;
- if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
- return atomic_pool_dma32;
- return atomic_pool_kernel;
+ return NULL;
}
-static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
+static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
+ struct gen_pool *pool, void **cpu_addr,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
{
- if (bad_pool == atomic_pool_kernel)
- return atomic_pool_dma32 ? : atomic_pool_dma;
+ unsigned long addr;
+ phys_addr_t phys;
- if (bad_pool == atomic_pool_dma32)
- return atomic_pool_dma;
+ addr = gen_pool_alloc(pool, size);
+ if (!addr)
+ return NULL;
- return NULL;
-}
+ phys = gen_pool_virt_to_phys(pool, addr);
+ if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
+ gen_pool_free(pool, addr, size);
+ return NULL;
+ }
-static inline struct gen_pool *dma_guess_pool(struct device *dev,
- struct gen_pool *bad_pool)
-{
- if (bad_pool)
- return dma_get_safer_pool(bad_pool);
+ if (gen_pool_avail(pool) < atomic_pool_size)
+ schedule_work(&atomic_pool_work);
- return dma_guess_pool_from_device(dev);
+ *cpu_addr = (void *)addr;
+ memset(*cpu_addr, 0, size);
+ return pfn_to_page(__phys_to_pfn(phys));
}
-void *dma_alloc_from_pool(struct device *dev, size_t size,
- struct page **ret_page, gfp_t flags)
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t gfp,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
{
struct gen_pool *pool = NULL;
- unsigned long val = 0;
- void *ptr = NULL;
- phys_addr_t phys;
-
- while (1) {
- pool = dma_guess_pool(dev, pool);
- if (!pool) {
- WARN(1, "Failed to get suitable pool for %s\n",
- dev_name(dev));
- break;
- }
-
- val = gen_pool_alloc(pool, size);
- if (!val)
- continue;
-
- phys = gen_pool_virt_to_phys(pool, val);
- if (dma_coherent_ok(dev, phys, size))
- break;
-
- gen_pool_free(pool, val, size);
- val = 0;
- }
-
-
- if (val) {
- *ret_page = pfn_to_page(__phys_to_pfn(phys));
- ptr = (void *)val;
- memset(ptr, 0, size);
+ struct page *page;
- if (gen_pool_avail(pool) < atomic_pool_size)
- schedule_work(&atomic_pool_work);
+ while ((pool = dma_guess_pool(pool, gfp))) {
+ page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
+ phys_addr_ok);
+ if (page)
+ return page;
}
- return ptr;
+ WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
+ return NULL;
}
bool dma_free_from_pool(struct device *dev, void *start, size_t size)
{
struct gen_pool *pool = NULL;
- while (1) {
- pool = dma_guess_pool(dev, pool);
- if (!pool)
- return false;
-
- if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
- gen_pool_free(pool, (unsigned long)start, size);
- return true;
- }
+ while ((pool = dma_guess_pool(pool, 0))) {
+ if (!gen_pool_has_addr(pool, (unsigned long)start, size))
+ continue;
+ gen_pool_free(pool, (unsigned long)start, size);
+ return true;
}
+
+ return false;
}
syscall_enter_audit(regs, syscall);
- return ret ? : syscall;
+ /* The above might have changed the syscall number */
+ return ret ? : syscall_get_nr(current, regs);
}
-noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+static __always_inline long
+__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
{
unsigned long ti_work;
- enter_from_user_mode(regs);
- instrumentation_begin();
-
- local_irq_enable();
ti_work = READ_ONCE(current_thread_info()->flags);
if (ti_work & SYSCALL_ENTER_WORK)
syscall = syscall_trace_enter(regs, syscall, ti_work);
- instrumentation_end();
return syscall;
}
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ return __syscall_enter_from_user_work(regs, syscall);
+}
+
+noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = __syscall_enter_from_user_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
+
+noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ instrumentation_begin();
+ local_irq_enable();
+ instrumentation_end();
+}
+
/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
case IF_SRC_KERNELADDR:
case IF_SRC_KERNEL:
kernel = 1;
- /* fall through */
+ fallthrough;
case IF_SRC_FILEADDR:
case IF_SRC_FILE:
try_to_free_swap(old_page);
page_vma_mapped_walk_done(&pvmw);
- if (vma->vm_flags & VM_LOCKED)
+ if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
munlock_vma_page(old_page);
put_page(old_page);
}
int sysctl_max_threads(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int ret;
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
+ depends on !CC_IS_GCC || GCC_VERSION < 100000
select CONSTRUCTORS if !UML
default n
help
__irq_wake_thread(desc, action);
- /* Fall through - to add to randomness */
+ fallthrough; /* to add to randomness */
case IRQ_HANDLED:
*flags |= action->flags;
break;
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask);
- /* fall through */
+ fallthrough;
case IRQ_SET_MASK_OK_NOCOPY:
irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc);
case IRQ_SET_MASK_OK_DONE:
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
irqd_set(&desc->irq_data, flags);
- /* fall through */
+ fallthrough;
case IRQ_SET_MASK_OK_NOCOPY:
flags = irqd_get_trigger_type(&desc->irq_data);
unsigned int cpu, bit;
struct cpumap *cm;
+ /*
+ * Not required in theory, but matrix_find_best_cpu() uses
+ * for_each_cpu() which ignores the cpumask on UP .
+ */
+ if (cpumask_empty(msk))
+ return -EINVAL;
+
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
case 0:
if (kallsyms_for_perf())
return true;
- /* fallthrough */
+ fallthrough;
case 1:
if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
CAP_OPT_NOAUDIT) == 0)
return true;
- /* fallthrough */
+ fallthrough;
default:
return false;
}
skip_checks:
/* we'll do an OFF -> ON transition: */
- this_cpu_write(hardirqs_enabled, 1);
+ __this_cpu_write(hardirqs_enabled, 1);
trace->hardirq_enable_ip = ip;
trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events);
/*
* We have done an ON -> OFF transition:
*/
- this_cpu_write(hardirqs_enabled, 0);
+ __this_cpu_write(hardirqs_enabled, 0);
trace->hardirq_disable_ip = ip;
trace->hardirq_disable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_off_events);
{
unsigned long flags;
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
if (unlikely(current->lockdep_recursion)) {
/* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) {
check_flags(flags);
current->lockdep_recursion++;
- trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
lockdep_recursion_finish();
{
unsigned long flags;
+ trace_lock_release(lock, ip);
+
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
+
current->lockdep_recursion++;
- trace_lock_release(lock, ip);
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
hlock->holdtime_stamp = now;
}
- trace_lock_acquired(lock, ip);
-
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
{
unsigned long flags;
+ trace_lock_acquired(lock, ip);
+
if (unlikely(!lock_stat || !debug_locks))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion++;
- trace_lock_contended(lock, ip);
__lock_contended(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
{
unsigned long flags;
+ trace_lock_contended(lock, ip);
+
if (unlikely(!lock_stat || !debug_locks))
return;
break;
case HIBERNATION_PLATFORM:
hibernation_platform_enter();
- /* Fall through */
+ fallthrough;
case HIBERNATION_SHUTDOWN:
if (pm_power_off)
kernel_power_off();
* and add, then see if the aggregate has changed.
*/
plist_del(node, &c->list);
- /* fall through */
+ fallthrough;
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
break;
case PM_QOS_UPDATE_REQ:
pm_qos_flags_remove_req(pqf, req);
- /* fall through */
+ fallthrough;
case PM_QOS_ADD_REQ:
req->flags = val;
INIT_LIST_HEAD(&req->node);
static void relay_destroy_channel(struct kref *kref)
{
struct rchan *chan = container_of(kref, struct rchan, kref);
+ free_percpu(chan->buf);
kfree(chan);
}
state = possible;
break;
}
- /* Fall-through */
+ fallthrough;
case possible:
do_set_cpus_allowed(p, cpu_possible_mask);
state = fail;
static noinline int __cpuidle cpu_idle_poll(void)
{
+ trace_cpu_idle(0, smp_processor_id());
+ stop_critical_timings();
rcu_idle_enter();
- trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
- stop_critical_timings();
while (!tif_need_resched() &&
- (cpu_idle_force_poll || tick_check_broadcast_expired()))
+ (cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
- start_critical_timings();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+
rcu_idle_exit();
+ start_critical_timings();
+ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
return 1;
}
if (current_clr_polling_and_test()) {
local_irq_enable();
} else {
+
+ trace_cpu_idle(1, smp_processor_id());
stop_critical_timings();
+ rcu_idle_enter();
arch_cpu_idle();
+ rcu_idle_exit();
start_critical_timings();
+ trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
}
if (cpuidle_not_available(drv, dev)) {
tick_nohz_idle_stop_tick();
- rcu_idle_enter();
default_idle_call();
goto exit_idle;
u64 max_latency_ns;
if (idle_should_enter_s2idle()) {
- rcu_idle_enter();
entered_state = call_cpuidle_s2idle(drv, dev);
if (entered_state > 0)
goto exit_idle;
- rcu_idle_exit();
-
max_latency_ns = U64_MAX;
} else {
max_latency_ns = dev->forced_idle_latency_limit_ns;
}
tick_nohz_idle_stop_tick();
- rcu_idle_enter();
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
else
tick_nohz_idle_retain_tick();
- rcu_idle_enter();
-
entered_state = call_cpuidle(drv, dev, next_state);
/*
* Give the governor an opportunity to reflect on the outcome
*/
if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();
-
- rcu_idle_exit();
}
/*
case sa_rootdomain:
if (!atomic_read(&d->rd->refcount))
free_rootdomain(&d->rd->rcu);
- /* Fall through */
+ fallthrough;
case sa_sd:
free_percpu(d->sd);
- /* Fall through */
+ fallthrough;
case sa_sd_storage:
__sdt_free(cpu_map);
- /* Fall through */
+ fallthrough;
case sa_none:
break;
}
*/
if (!sid || sid == task_session(current))
break;
- /* fall through */
+ fallthrough;
default:
return -EPERM;
}
if (who == RUSAGE_CHILDREN)
break;
- /* fall through */
+ fallthrough;
case RUSAGE_SELF:
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int bpf_stats_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct static_key *key = (struct static_key *)table->data;
static int saved_val;
switch (state) {
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
- /* fall through */
+ fallthrough;
default:
return false;
}
rtn = pid_task(pid, PIDTYPE_PID);
if (!rtn || !same_thread_group(rtn, current))
return NULL;
- /* FALLTHRU */
+ fallthrough;
case SIGEV_SIGNAL:
case SIGEV_THREAD:
if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
return NULL;
- /* FALLTHRU */
+ fallthrough;
case SIGEV_NONE:
return pid;
default:
switch (mode) {
case TICK_BROADCAST_FORCE:
tick_broadcast_forced = 1;
- /* fall through */
+ fallthrough;
case TICK_BROADCAST_ON:
cpumask_set_cpu(cpu, tick_broadcast_on);
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
case ODEBUG_STATE_ACTIVE:
WARN_ON(1);
- /* fall through */
+ fallthrough;
default:
return false;
}
#endif
case BLKTRACESTART:
start = 1;
- /* fall through */
+ fallthrough;
case BLKTRACESTOP:
ret = __blk_trace_startstop(q, start);
break;
ptr++;
break;
}
- /* fall through */
+ fallthrough;
default:
parse_error(pe, FILT_ERR_TOO_MANY_PREDS,
next - str);
switch (op) {
case OP_NE:
pred->not = 1;
- /* Fall through */
+ fallthrough;
case OP_GLOB:
case OP_EQ:
break;
struct watch *watch = container_of(rcu, struct watch, rcu);
put_watch_queue(rcu_access_pointer(watch->queue));
+ atomic_dec(&watch->cred->user->nr_watches);
put_cred(watch->cred);
}
watch->cred = get_current_cred();
rcu_assign_pointer(watch->watch_list, wlist);
+ if (atomic_inc_return(&watch->cred->user->nr_watches) >
+ task_rlimit(current, RLIMIT_NOFILE)) {
+ atomic_dec(&watch->cred->user->nr_watches);
+ put_cred(watch->cred);
+ return -EAGAIN;
+ }
+
spin_lock_bh(&wqueue->lock);
kref_get(&wqueue->usage);
kref_get(&watch->usage);
KCOV_INSTRUMENT_dynamic_debug.o := n
KCOV_INSTRUMENT_fault-inject.o := n
+# string.o implements standard library functions like memset/memcpy etc.
+# Use -ffreestanding to ensure that the compiler does not try to "optimize"
+# them into calls to themselves.
+CFLAGS_string.o := -ffreestanding
+
# Early boot use of cmdline, don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KASAN_SANITIZE_string.o := n
-CFLAGS_string.o := -fno-stack-protector
+CFLAGS_string.o += -fno-stack-protector
endif
# Used by KCSAN while enabled, avoid recursion.
q - 2);
break;
}
- /* Fall through */
+ /* fall through */
case '=':
ret = xbc_parse_kv(&p, q, c);
break;
break;
case '\\':
d = *pat++;
- /*FALLTHROUGH*/
+ /* fall through */
default: /* Literal character */
literal:
if (c == d) {
switch (*(++fmt)) {
case 'L':
- uc = true; /* fall-through */
+ uc = true;
+ /* fall through */
case 'l':
index = guid_index;
break;
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
- /* Fallthrough */
+ /* fall through */
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
- /* Fall-through */
+ /* fall through */
default:
WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
s->lzma2.sequence = SEQ_LZMA_PREPARE;
- /* Fall through */
+ /* fall through */
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
- /* Fall through */
+ /* fall through */
case SEQ_LZMA_RUN:
/*
if (ret != XZ_OK)
return ret;
- /* Fall through */
+ /* fall through */
case SEQ_BLOCK_START:
/* We need one byte of input to continue. */
s->temp.pos = 0;
s->sequence = SEQ_BLOCK_HEADER;
- /* Fall through */
+ /* fall through */
case SEQ_BLOCK_HEADER:
if (!fill_temp(s, b))
s->sequence = SEQ_BLOCK_UNCOMPRESS;
- /* Fall through */
+ /* fall through */
case SEQ_BLOCK_UNCOMPRESS:
ret = dec_block(s, b);
s->sequence = SEQ_BLOCK_PADDING;
- /* Fall through */
+ /* fall through */
case SEQ_BLOCK_PADDING:
/*
s->sequence = SEQ_BLOCK_CHECK;
- /* Fall through */
+ /* fall through */
case SEQ_BLOCK_CHECK:
if (s->check_type == XZ_CHECK_CRC32) {
s->sequence = SEQ_INDEX_PADDING;
- /* Fall through */
+ /* fall through */
case SEQ_INDEX_PADDING:
while ((s->index.size + (b->in_pos - s->in_start))
s->sequence = SEQ_INDEX_CRC32;
- /* Fall through */
+ /* fall through */
case SEQ_INDEX_CRC32:
ret = crc32_validate(s, b);
s->temp.size = STREAM_HEADER_SIZE;
s->sequence = SEQ_STREAM_FOOTER;
- /* Fall through */
+ /* fall through */
case SEQ_STREAM_FOOTER:
if (!fill_temp(s, b))
case set_repeat:
if (dctx->litEntropy == 0)
return ERROR(dictionary_corrupted);
- /* fall-through */
+ /* fall through */
case set_compressed:
if (srcSize < 5)
return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
switch (zds->stage) {
case zdss_init:
ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
- /* fall-through */
+ /* fall through */
case zdss_loadHeader: {
size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
}
zds->stage = zdss_read;
}
- /* fall through */
+ /* fall through */
case zdss_read: {
size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
zds->stage = zdss_load;
/* pass-through */
}
- /* fall through */
+ /* fall through */
case zdss_load: {
size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
/* pass-through */
}
}
- /* fall through */
+ /* fall through */
case zdss_flush: {
size_t const toFlushSize = zds->outEnd - zds->outStart;
}
/*
- * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
- * but only after we've gone through a COW cycle and they are dirty.
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
-}
-
-/*
- * A (separate) COW fault might break the page the other way and
- * get_user_pages() would return the page from what is now the wrong
- * VM. So we need to force a COW break at GUP time even for reads.
- */
-static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
-{
- return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
goto unmap;
*page = pte_page(*pte);
}
- if (unlikely(!try_get_page(*page))) {
+ if (unlikely(!try_grab_page(*page, gup_flags))) {
ret = -ENOMEM;
goto unmap;
}
goto out;
}
if (is_vm_hugetlb_page(vma)) {
- if (should_force_cow_break(vma, foll_flags))
- foll_flags |= FOLL_WRITE;
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
- foll_flags, locked);
+ gup_flags, locked);
if (locked && *locked == 0) {
/*
* We've got a VM_FAULT_RETRY
continue;
}
}
-
- if (should_force_cow_break(vma, foll_flags))
- foll_flags |= FOLL_WRITE;
-
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
return -EFAULT;
/*
- * The FAST_GUP case requires FOLL_WRITE even for pure reads,
- * because get_user_pages() may need to cause an early COW in
- * order to avoid confusing the normal COW routines. So only
- * targets that are already writable are safe to do by just
- * looking at the page tables.
- *
- * NOTE! With FOLL_FAST_ONLY we allow read-only gup_fast() here,
- * because there is no slow path to fall back on. But you'd
- * better be careful about possible COW pages - you'll get _a_
- * COW page, but not necessarily the one you intended to get
- * depending on what COW event happens after this. COW may break
- * the page copy in a random direction.
- *
* Disable interrupts. The nested form is used, in order to allow
* full, general purpose use of this routine.
*
*/
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) {
unsigned long fast_flags = gup_flags;
- if (!(gup_flags & FOLL_FAST_ONLY))
- fast_flags |= FOLL_WRITE;
local_irq_save(flags);
gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned);
}
/*
- * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
- * but only after we've gone through a COW cycle and they are dirty.
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
- return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
+ return pmd_write(pmd) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
int nid, nodemask_t *nodemask)
{
unsigned long nr_pages = 1UL << huge_page_order(h);
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
#ifdef CONFIG_CMA
{
struct page *page;
int node;
- for_each_node_mask(node, *nodemask) {
- if (!hugetlb_cma[node])
- continue;
-
- page = cma_alloc(hugetlb_cma[node], nr_pages,
- huge_page_order(h), true);
+ if (hugetlb_cma[nid]) {
+ page = cma_alloc(hugetlb_cma[nid], nr_pages,
+ huge_page_order(h), true);
if (page)
return page;
}
+
+ if (!(gfp_mask & __GFP_THISNODE)) {
+ for_each_node_mask(node, *nodemask) {
+ if (node == nid || !hugetlb_cma[node])
+ continue;
+
+ page = cma_alloc(hugetlb_cma[node], nr_pages,
+ huge_page_order(h), true);
+ if (page)
+ return page;
+ }
+ }
}
#endif
}
#ifdef CONFIG_SYSCTL
+static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *length,
+ loff_t *ppos, unsigned long *out)
+{
+ struct ctl_table dup_table;
+
+ /*
+ * In order to avoid races with __do_proc_doulongvec_minmax(), we
+ * can duplicate the @table and alter the duplicate of it.
+ */
+ dup_table = *table;
+ dup_table.data = out;
+
+ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
+}
+
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
if (!hugepages_supported())
return -EOPNOTSUPP;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
if (write && hstate_is_gigantic(h))
return -EINVAL;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
cft->private = MEMFILE_PRIVATE(idx, 0);
cft->seq_show = hugetlb_events_show;
- cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]),
+ cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
cft->flags = CFTYPE_NOT_ON_ROOT;
/* Add the events.local file */
cft->private = MEMFILE_PRIVATE(idx, 0);
cft->seq_show = hugetlb_events_local_show;
cft->file_offset = offsetof(struct hugetlb_cgroup,
- events_local_file[idx]),
+ events_local_file[idx]);
cft->flags = CFTYPE_NOT_ON_ROOT;
/* NULL terminate the last cft */
return -ENOMEM;
/* __khugepaged_exit() must not run from under us */
- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
+ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
free_mm_slot(mm_slot);
return 0;
xas_unlock_irq(&xas);
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
- PAGE_SIZE);
+ end - index);
/* drain pagevecs to help isolate_lru_page() */
lru_add_drain();
page = find_lock_page(mapping, index);
if (vma_is_dax(vma))
return 0;
+#ifdef VM_SAO
+ if (*vm_flags & VM_SAO)
+ return 0;
+#endif
#ifdef VM_SPARC_ADI
if (*vm_flags & VM_SPARC_ADI)
return 0;
goto again;
}
-bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma,
- unsigned long address)
-{
-#ifdef CONFIG_DEBUG_VM
- if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
- WARN_ON(!page_mapped(page)) ||
- WARN_ON(!PageLocked(page))) {
- dump_page(page, "reuse_ksm_page");
- return false;
- }
-#endif
-
- if (PageSwapCache(page) || !page_stable_node(page))
- return false;
- /* Prohibit parallel get_ksm_page() */
- if (!page_ref_freeze(page, 1))
- return false;
-
- page_move_anon_rmap(page, vma);
- page->index = linear_page_index(vma, address);
- page_ref_unfreeze(page, 1);
-
- return true;
-}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
*/
*prev = NULL; /* tell sys_madvise we drop mmap_lock */
get_file(file);
- mmap_read_unlock(current->mm);
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ mmap_read_unlock(current->mm);
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
fput(file);
mmap_read_lock(current->mm);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);
+
+ /* drop reference from uncharge_page */
+ css_put(&ug->memcg->css);
}
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = page->mem_cgroup;
+
+ /* pairs with css_put in uncharge_batch */
+ css_get(&ug->memcg->css);
}
nr_pages = compound_nr(page);
#include <linux/numa.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
+#include <linux/vmalloc.h>
#include <trace/events/kmem.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include "pgalloc-track.h"
#include "internal.h"
#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pte_t *pte;
int err = 0;
if (create) {
pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
+ pte_alloc_kernel_track(pmd, addr, mask) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
break;
}
} while (addr += PAGE_SIZE, addr != end);
+ *mask |= PGTBL_PTE_MODIFIED;
arch_leave_lazy_mmu_mode();
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
BUG_ON(pud_huge(*pud));
if (create) {
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc_track(mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
} else {
next = pmd_addr_end(addr, end);
if (create || !pmd_none_or_clear_bad(pmd)) {
err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
int err = 0;
if (create) {
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc_track(mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
} else {
next = pud_addr_end(addr, end);
if (create || !pud_none_or_clear_bad(pud)) {
err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
int err = 0;
if (create) {
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc_track(mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
} else {
next = p4d_addr_end(addr, end);
if (create || !p4d_none_or_clear_bad(p4d)) {
err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
void *data, bool create)
{
pgd_t *pgd;
- unsigned long next;
+ unsigned long start = addr, next;
unsigned long end = addr + size;
+ pgtbl_mod_mask mask = 0;
int err = 0;
if (WARN_ON(addr >= end))
next = pgd_addr_end(addr, end);
if (!create && pgd_none_or_clear_bad(pgd))
continue;
- err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
+ err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
if (err)
break;
} while (pgd++, addr = next, addr != end);
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+ arch_sync_kernel_mappings(start, start + size);
+
return err;
}
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
update_mmu_cache(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
+ count_vm_event(PGREUSE);
}
/*
* not dirty accountable.
*/
if (PageAnon(vmf->page)) {
- int total_map_swapcount;
- if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
- page_count(vmf->page) != 1))
+ struct page *page = vmf->page;
+
+ /* PageKsm() doesn't necessarily raise the page refcount */
+ if (PageKsm(page) || page_count(page) != 1)
+ goto copy;
+ if (!trylock_page(page))
+ goto copy;
+ if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
+ unlock_page(page);
goto copy;
- if (!trylock_page(vmf->page)) {
- get_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- lock_page(vmf->page);
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
- if (!pte_same(*vmf->pte, vmf->orig_pte)) {
- update_mmu_tlb(vma, vmf->address, vmf->pte);
- unlock_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- put_page(vmf->page);
- return 0;
- }
- put_page(vmf->page);
- }
- if (PageKsm(vmf->page)) {
- bool reused = reuse_ksm_page(vmf->page, vmf->vma,
- vmf->address);
- unlock_page(vmf->page);
- if (!reused)
- goto copy;
- wp_page_reuse(vmf);
- return VM_FAULT_WRITE;
- }
- if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
- if (total_map_swapcount == 1) {
- /*
- * The page is all ours. Move it to
- * our anon_vma so the rmap code will
- * not search our parent or siblings.
- * Protected against the rmap code by
- * the page lock.
- */
- page_move_anon_rmap(vmf->page, vma);
- }
- unlock_page(vmf->page);
- wp_page_reuse(vmf);
- return VM_FAULT_WRITE;
}
- unlock_page(vmf->page);
+ /*
+ * Ok, we've got the only map reference, and the only
+ * page count reference, and the page is locked,
+ * it's dark out, and we're wearing sunglasses. Hit it.
+ */
+ wp_page_reuse(vmf);
+ unlock_page(page);
+ return VM_FAULT_WRITE;
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
return wp_page_shared(vmf);
vmf->flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
} else {
+ /* Skip spurious TLB flush for retried page fault */
+ if (vmf->flags & FAULT_FLAG_TRIED)
+ goto unlock;
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
return ERR_PTR(-EINVAL);
}
break;
- case MEMORY_DEVICE_DEVDAX:
+ case MEMORY_DEVICE_GENERIC:
need_devmap_managed = false;
break;
case MEMORY_DEVICE_PCI_P2PDMA:
else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
- if (unlikely(is_zone_device_page(new))) {
- if (is_device_private_page(new)) {
- entry = make_device_private_entry(new, pte_write(pte));
- pte = swp_entry_to_pte(entry);
- if (pte_swp_uffd_wp(*pvmw.pte))
- pte = pte_mkuffd_wp(pte);
- }
+ if (unlikely(is_device_private_page(new))) {
+ entry = make_device_private_entry(new, pte_write(pte));
+ pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(*pvmw.pte))
+ pte = pte_swp_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(*pvmw.pte))
+ pte = pte_swp_mkuffd_wp(pte);
}
#ifdef CONFIG_HUGETLB_PAGE
entry = make_migration_entry(page, mpfn &
MIGRATE_PFN_WRITE);
swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pte))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pte))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ if (pte_present(pte)) {
+ if (pte_soft_dirty(pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ } else {
+ if (pte_swp_soft_dirty(pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_swp_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ }
set_pte_at(mm, addr, ptep, swp_pte);
/*
struct page *page, *tmp;
LIST_HEAD(head);
+ /*
+ * Ensure proper count is passed which otherwise would stuck in the
+ * below while (list_empty(list)) loop.
+ */
+ count = min(pcp->count, count);
while (count) {
struct list_head *list;
return 0;
}
-core_initcall(init_per_zone_wmark_min)
+postcore_initcall(init_per_zone_wmark_min)
/*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
*/
entry = make_migration_entry(page, 0);
swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
+
+ /*
+ * pteval maps a zone device page and is therefore
+ * a swap pte.
+ */
+ if (pte_swp_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
+ if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
/*
*/
#define pr_fmt(fmt) "rodata_test: " fmt
+#include <linux/rodata_test.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
- void *freelist, void *nextfree)
+ void **freelist, void *nextfree)
{
if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
- !check_valid_pointer(s, page, nextfree)) {
- object_err(s, page, freelist, "Freechain corrupt");
- freelist = NULL;
+ !check_valid_pointer(s, page, nextfree) && freelist) {
+ object_err(s, page, *freelist, "Freechain corrupt");
+ *freelist = NULL;
slab_fix(s, "Isolate corrupted freechain");
return true;
}
int objects) {}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
- void *freelist, void *nextfree)
+ void **freelist, void *nextfree)
{
return false;
}
* 'freelist' is already corrupted. So isolate all objects
* starting at 'freelist'.
*/
- if (freelist_corrupted(s, page, freelist, nextfree))
+ if (freelist_corrupted(s, page, &freelist, nextfree))
break;
do {
if (pmd_none_or_clear_bad(pmd))
continue;
vunmap_pte_range(pmd, addr, next, mask);
+
+ cond_resched();
} while (pmd++, addr = next, addr != end);
}
unsigned long reclaimed;
unsigned long scanned;
+ /*
+ * This loop can become CPU-bound when target memcgs
+ * aren't eligible for reclaim - either because they
+ * don't have any reclaimable pages, or because their
+ * memory is explicitly protected. Avoid soft lockups.
+ */
+ cond_resched();
+
mem_cgroup_calculate_protection(target_memcg, memcg);
if (mem_cgroup_below_min(memcg)) {
"pglazyfreed",
"pgrefill",
+ "pgreuse",
"pgsteal_kswapd",
"pgsteal_direct",
"pgscan_kswapd",
case SIOCSHWTSTAMP:
if (!net_eq(dev_net(dev), &init_net))
break;
- /* fall through */
+ fallthrough;
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
break;
default:
pr_warn("AAL problems ... (%d)\n", aal);
- /* fall through */
+ fallthrough;
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
}
case ATM_NO_AAL:
/* ATM_AAL5 is also used in the "0 for default" case */
vcc->qos.aal = ATM_AAL5;
- /* fall through */
+ fallthrough;
case ATM_AAL5:
error = atm_init_aal5(vcc);
vcc->stats = &dev->stats.aal5;
if (mesg->content.normal.no_source_le_narp)
break;
- /* FALL THROUGH */
+ fallthrough;
case l_arp_update:
lec_arp_update(priv, mesg->content.normal.mac_addr,
mesg->content.normal.atm_addr,
goto done;
}
}
- /* fall through */
+ fallthrough;
case ATM_SETESIF:
{
unsigned char esi[ESI_LEN];
error = -EPERM;
goto done;
}
- /* fall through */
+ fallthrough;
case ATM_GETSTAT:
size = sizeof(struct atm_dev_stats);
error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
error = -EINVAL;
goto done;
}
- /* fall through */
+ fallthrough;
case ATM_SETCIRANGE:
case SONET_GETSTATZ:
case SONET_SETDIAG:
error = -EPERM;
goto done;
}
- /* fall through */
+ fallthrough;
default:
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
- goto free_skb;
-
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- netif_rx(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
out:
if (primary_if)
batadv_hardif_put(primary_if);
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
- if (ret == BATADV_DHCP_TO_CLIENT &&
- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
+ if (ret == BATADV_DHCP_TO_CLIENT) {
+ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+ return BATADV_DHCP_NO;
+
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
is_l2 = true;
- /* fall through */
+ fallthrough;
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_XMIT:
struct ebt_table *t;
struct net *net = sock_net(sk);
+ if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) &&
+ *len != sizeof(struct compat_ebt_replace))
+ return -EINVAL;
+
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
const struct nf_hook_state *state)
{
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
enum ip_conntrack_info ctinfo;
struct br_input_skb_cb cb;
br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
- err = nf_ipv6_br_defrag(state->net, skb,
- IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
+ err = nf_ct_frag6_gather(state->net, skb,
+ IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
/* queued */
if (err == -EINPROGRESS)
return NF_STOLEN;
br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
return err == 0 ? NF_ACCEPT : NF_DROP;
+#else
+ return NF_ACCEPT;
+#endif
}
static int nf_ct_br_ip_check(const struct sk_buff *skb)
if (segmented) {
if (rfml->incomplete_frm == NULL) {
/* Initial Segment */
- if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+ if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0)
goto out;
rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
err = cfpkt_peek_head(pkt, head, 6);
- if (err < 0)
+ if (err != 0)
goto out;
while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
sk->sk_destruct = j1939_sk_sock_destruct;
+ sk->sk_protocol = CAN_J1939;
return 0;
}
goto out_release_sock;
}
+ if (!ndev->ml_priv) {
+ netdev_warn_once(ndev,
+ "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+ dev_put(ndev);
+ ret = -ENODEV;
+ goto out_release_sock;
+ }
+
priv = j1939_netdev_start(ndev);
dev_put(ndev);
if (IS_ERR(priv)) {
static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
const struct j1939_sock *jsk, int peer)
{
+ /* There are two holes (2 bytes and 3 bytes) to clear to avoid
+ * leaking kernel information to user space.
+ */
+ memset(addr, 0, J1939_MIN_NAMELEN);
+
addr->can_family = AF_CAN;
addr->can_ifindex = jsk->ifindex;
addr->can_addr.j1939.pgn = jsk->addr.pgn;
break;
case -ERESTARTSYS:
ret = -EINTR;
- /* fall through */
+ fallthrough;
case -EAGAIN: /* OK */
if (todo_size != size)
ret = size - todo_size;
skb_queue_tail(&session->skb_queue, skb);
}
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct
+sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+ unsigned int offset_start)
{
struct j1939_priv *priv = session->priv;
+ struct j1939_sk_buff_cb *do_skcb;
struct sk_buff *skb = NULL;
struct sk_buff *do_skb;
- struct j1939_sk_buff_cb *do_skcb;
- unsigned int offset_start;
unsigned long flags;
- offset_start = session->pkt.dpo * 7;
-
spin_lock_irqsave(&session->skb_queue.lock, flags);
skb_queue_walk(&session->skb_queue, do_skb) {
do_skcb = j1939_skb_to_cb(do_skb);
return skb;
}
+static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+{
+ unsigned int offset_start;
+
+ offset_start = session->pkt.dpo * 7;
+ return j1939_session_skb_find_by_offset(session, offset_start);
+}
+
/* see if we are receiver
* returns 0 for broadcasts, although we will receive them
*/
return ret;
session->last_txcmd = dat[0];
- if (dat[0] == J1939_TP_CMD_BAM)
+ if (dat[0] == J1939_TP_CMD_BAM) {
j1939_tp_schedule_txtimer(session, 50);
-
- j1939_tp_set_rxtimeout(session, 1250);
+ j1939_tp_set_rxtimeout(session, 250);
+ } else {
+ j1939_tp_set_rxtimeout(session, 1250);
+ }
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
int ret = 0;
u8 dat[8];
- se_skb = j1939_session_skb_find(session);
+ se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
if (!se_skb)
return -ENOBUFS;
if (len > 7)
len = 7;
+ if (offset + len > se_skb->len) {
+ netdev_err_once(priv->ndev,
+ "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
+ __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
+ return -EOVERFLOW;
+ }
+
+ if (!len) {
+ ret = -ENOBUFS;
+ break;
+ }
+
memcpy(&dat[1], &tpdat[offset], len);
ret = j1939_tp_tx_dat(session, dat, len + 1);
if (ret < 0) {
return ret;
}
- /* fall through */
+ fallthrough;
case J1939_TP_CMD_CTS:
case 0xff: /* did some data */
case J1939_ETP_CMD_DPO:
lockdep_assert_held(&session->priv->active_session_list_lock);
session->err = j1939_xtp_abort_to_errno(priv, err);
+ session->state = J1939_SESSION_WAITING_ABORT;
/* do not send aborts on incoming broadcasts */
if (!j1939_cb_is_broadcast(&session->skcb)) {
- session->state = J1939_SESSION_WAITING_ABORT;
j1939_xtp_tx_abort(priv, &session->skcb,
!session->transmission,
err, session->skcb.addr.pgn);
* cleanup including propagation of the error to user space.
*/
break;
+ case -EOVERFLOW:
+ j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
+ break;
case 0:
session->tx_retry = 0;
break;
return;
}
session = j1939_xtp_rx_rts_session_new(priv, skb);
- if (!session)
+ if (!session) {
+ if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
+ netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
+ __func__);
return;
+ }
} else {
if (j1939_xtp_rx_rts_session_active(session, skb)) {
j1939_session_put(session);
}
session->last_cmd = cmd;
- j1939_tp_set_rxtimeout(session, 1250);
-
- if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
- j1939_session_txtimer_cancel(session);
- j1939_tp_schedule_txtimer(session, 0);
+ if (cmd == J1939_TP_CMD_BAM) {
+ if (!session->transmission)
+ j1939_tp_set_rxtimeout(session, 750);
+ } else {
+ if (!session->transmission) {
+ j1939_session_txtimer_cancel(session);
+ j1939_tp_schedule_txtimer(session, 0);
+ }
+ j1939_tp_set_rxtimeout(session, 1250);
}
j1939_session_put(session);
int offset;
int nbytes;
bool final = false;
+ bool remain = false;
bool do_cts_eoma = false;
int packet;
case J1939_ETP_CMD_DPO:
if (skcb->addr.type == J1939_ETP)
break;
- /* fall through */
- case J1939_TP_CMD_BAM: /* fall through */
+ fallthrough;
+ case J1939_TP_CMD_BAM:
case J1939_TP_CMD_CTS: /* fall through */
if (skcb->addr.type != J1939_ETP)
break;
- /* fall through */
+ fallthrough;
default:
netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__,
session, session->last_cmd);
__func__, session);
goto out_session_cancel;
}
- se_skb = j1939_session_skb_find(session);
+
+ se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
if (!se_skb) {
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
session);
}
tpdat = se_skb->data;
- memcpy(&tpdat[offset], &dat[1], nbytes);
+ if (!session->transmission) {
+ memcpy(&tpdat[offset], &dat[1], nbytes);
+ } else {
+ int err;
+
+ err = memcmp(&tpdat[offset], &dat[1], nbytes);
+ if (err)
+ netdev_err_once(priv->ndev,
+ "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
+ __func__, session,
+ nbytes, &dat[1],
+ nbytes, &tpdat[offset]);
+ }
+
if (packet == session->pkt.rx)
session->pkt.rx++;
j1939_cb_is_broadcast(&session->skcb)) {
if (session->pkt.rx >= session->pkt.total)
final = true;
+ else
+ remain = true;
} else {
/* never final, an EOMA must follow */
if (session->pkt.rx >= session->pkt.last)
}
if (final) {
+ j1939_session_timers_cancel(session);
j1939_session_completed(session);
+ } else if (remain) {
+ if (!session->transmission)
+ j1939_tp_set_rxtimeout(session, 750);
} else if (do_cts_eoma) {
j1939_tp_set_rxtimeout(session, 1250);
if (!session->transmission)
else
j1939_xtp_rx_dat_one(session, skb);
}
+
+ if (j1939_cb_is_broadcast(skcb)) {
+ session = j1939_session_get_by_addr(priv, &skcb->addr, false,
+ false);
+ if (session)
+ j1939_xtp_rx_dat_one(session, skb);
+ }
}
/* j1939 main intf */
switch (cmd) {
case J1939_ETP_CMD_RTS:
extd = J1939_ETP;
- /* fall through */
- case J1939_TP_CMD_BAM: /* fall through */
+ fallthrough;
+ case J1939_TP_CMD_BAM:
case J1939_TP_CMD_RTS: /* fall through */
if (skcb->addr.type != extd)
return;
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_rts(priv, skb, true);
- if (j1939_tp_im_receiver(skcb))
+ if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
j1939_xtp_rx_rts(priv, skb, false);
break;
case J1939_ETP_CMD_CTS:
extd = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_CMD_CTS:
if (skcb->addr.type != extd)
return;
case J1939_ETP_CMD_EOMA:
extd = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_CMD_EOMA:
if (skcb->addr.type != extd)
return;
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
- if (!j1939_tp_im_involved_anydir(skcb))
+ if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
return 0;
switch (skcb->addr.pgn) {
case J1939_ETP_PGN_DAT:
skcb->addr.type = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_PGN_DAT:
j1939_xtp_rx_dat(priv, skb);
break;
case J1939_ETP_PGN_CTL:
skcb->addr.type = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_PGN_CTL:
if (skb->len < 8)
return 0; /* Don't care. Nothing to extract here */
if (!skb->sk)
return;
+ if (skb->sk->sk_family != AF_CAN ||
+ skb->sk->sk_protocol != CAN_J1939)
+ return;
+
j1939_session_list_lock(priv);
session = j1939_session_get_simple(priv, skb);
j1939_session_list_unlock(priv);
switch (len) {
case 11:
c = c + ((__u32)k[10] << 24);
- /* fall through */
+ fallthrough;
case 10:
c = c + ((__u32)k[9] << 16);
- /* fall through */
+ fallthrough;
case 9:
c = c + ((__u32)k[8] << 8);
/* the first byte of c is reserved for the length */
- /* fall through */
+ fallthrough;
case 8:
b = b + ((__u32)k[7] << 24);
- /* fall through */
+ fallthrough;
case 7:
b = b + ((__u32)k[6] << 16);
- /* fall through */
+ fallthrough;
case 6:
b = b + ((__u32)k[5] << 8);
- /* fall through */
+ fallthrough;
case 5:
b = b + k[4];
- /* fall through */
+ fallthrough;
case 4:
a = a + ((__u32)k[3] << 24);
- /* fall through */
+ fallthrough;
case 3:
a = a + ((__u32)k[2] << 16);
- /* fall through */
+ fallthrough;
case 2:
a = a + ((__u32)k[1] << 8);
- /* fall through */
+ fallthrough;
case 1:
a = a + k[0];
/* case 0: nothing left to add */
case CRUSH_RULE_CHOOSELEAF_FIRSTN:
case CRUSH_RULE_CHOOSE_FIRSTN:
firstn = 1;
- /* fall through */
+ fallthrough;
case CRUSH_RULE_CHOOSELEAF_INDEP:
case CRUSH_RULE_CHOOSE_INDEP:
if (wsize == 0)
switch (sk->sk_state) {
case TCP_CLOSE:
dout("%s TCP_CLOSE\n", __func__);
- /* fall through */
+ fallthrough;
case TCP_CLOSE_WAIT:
dout("%s TCP_CLOSE_WAIT\n", __func__);
con_sock_state_closing(con);
switch (ret) {
case -EBADMSG:
con->error_msg = "bad crc/signature";
- /* fall through */
+ fallthrough;
case -EBADE:
ret = -EIO;
break;
* request had a non-zero tid. Work around this weirdness
* by allocating a new message.
*/
- /* fall through */
+ fallthrough;
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
if (!force_resend && !force_resend_writes)
break;
- /* fall through */
+ fallthrough;
case CALC_TARGET_NEED_RESEND:
cancel_linger_map_check(lreq);
/*
!force_resend_writes))
break;
- /* fall through */
+ fallthrough;
case CALC_TARGET_NEED_RESEND:
cancel_map_check(req);
unlink_request(osd, req);
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(skb->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
do_drop:
kfree_skb(skb);
netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight);
napi->weight = weight;
- list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
#ifdef CONFIG_NETPOLL
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
}
EXPORT_SYMBOL(netif_napi_add);
int flags;
};
-static enum bpf_xdp_mode dev_xdp_mode(u32 flags)
+static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
{
if (flags & XDP_FLAGS_HW_MODE)
return XDP_MODE_HW;
if (flags & XDP_FLAGS_DRV_MODE)
return XDP_MODE_DRV;
- return XDP_MODE_SKB;
+ if (flags & XDP_FLAGS_SKB_MODE)
+ return XDP_MODE_SKB;
+ return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
}
static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
return -EINVAL;
}
- mode = dev_xdp_mode(flags);
+ mode = dev_xdp_mode(dev, flags);
/* can't replace attached link */
if (dev_xdp_link(dev, mode)) {
NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
NL_SET_ERR_MSG(extack, "Active program does not match expected");
return -EEXIST;
}
- if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
- NL_SET_ERR_MSG(extack, "XDP program already attached");
- return -EBUSY;
- }
/* put effective new program into new_prog */
if (link)
enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
? XDP_MODE_DRV : XDP_MODE_SKB;
+ if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
+ NL_SET_ERR_MSG(extack, "XDP program already attached");
+ return -EBUSY;
+ }
if (!offload && dev_xdp_prog(dev, other_mode)) {
NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
return -EEXIST;
ASSERT_RTNL();
- mode = dev_xdp_mode(link->flags);
+ mode = dev_xdp_mode(dev, link->flags);
if (dev_xdp_link(dev, mode) != link)
return -EINVAL;
goto out_unlock;
}
- mode = dev_xdp_mode(xdp_link->flags);
+ mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
xdp_link->flags, new_prog);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags)
{
- enum bpf_xdp_mode mode = dev_xdp_mode(flags);
+ enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
struct bpf_prog *new_prog = NULL, *old_prog = NULL;
int err;
err = net_hwtstamp_validate(ifr);
if (err)
return err;
- /* fall through */
+ fallthrough;
/*
* Unknown or private ioctl
case SIOCSIFTXQLEN:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
/*
* These ioctl calls:
* - require local superuser power.
case SIOCSHWTSTAMP:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
dev_load(net, ifr->ifr_name);
{
lockdep_assert_held(&devlink->lock);
- if (WARN_ON(xa_load(&devlink->snapshot_ids, id)))
+ if (xa_load(&devlink->snapshot_ids, id))
return -EEXIST;
return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
switch (val) {
- case DEVLINK_TRAP_ACTION_DROP: /* fall-through */
- case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */
+ case DEVLINK_TRAP_ACTION_DROP:
+ case DEVLINK_TRAP_ACTION_TRAP:
case DEVLINK_TRAP_ACTION_MIRROR:
*p_trap_action = val;
break;
val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
switch (val) {
- case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */
+ case NET_DM_ALERT_MODE_SUMMARY:
case NET_DM_ALERT_MODE_PACKET:
*p_alert_mode = val;
break;
/* Helper macro for adding read access to tcp_sock or sock fields. */
#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
do { \
+ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ fullsock_reg = reg; \
+ jmp += 2; \
+ } \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \
is_fullsock), \
- si->dst_reg, si->src_reg, \
+ fullsock_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
is_fullsock)); \
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
+ if (si->dst_reg == si->src_reg) \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \
OBJ_FIELD), \
si->dst_reg, si->dst_reg, \
offsetof(OBJ, OBJ_FIELD)); \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_JMP_A(1); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } \
+ } while (0)
+
+#define SOCK_OPS_GET_SK() \
+ do { \
+ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ fullsock_reg = reg; \
+ jmp += 2; \
+ } \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ fullsock_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
+ if (si->dst_reg == si->src_reg) \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_JMP_A(1); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } \
} while (0)
#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
break;
case offsetof(struct bpf_sock_ops, sk):
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern,
- is_fullsock),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern,
- is_fullsock));
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern, sk),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern, sk));
+ SOCK_OPS_GET_SK();
break;
}
return insn - insn_buf;
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
if (size < sizeof_field(struct sk_buff, protocol))
return false;
- /* fall through */
+ fallthrough;
case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
case bpf_ctx_range(struct sk_reuseport_md, len):
struct napi_struct *napi;
int cpu = smp_processor_id();
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1);
net_info_ratelimited("%s xmit error: %d\n",
pkt_dev->odevname, ret);
pkt_dev->errors++;
- /* fall through */
+ fallthrough;
case NETDEV_TX_BUSY:
/* Retry it next time */
refcount_dec(&(pkt_dev->skb->users));
cpu_to_node(cpu),
"kpktgend_%d", cpu);
if (IS_ERR(p)) {
- pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
+ pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
list_del(&t->th_list);
kfree(t);
return PTR_ERR(p);
}
EXPORT_SYMBOL(skb_tx_error);
+#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
+#endif
/**
* consume_stateless_skb - free an skbuff, assuming it is stateless
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto err_free;
-
- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+ /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
+ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
goto err_free;
vhdr = (struct vlan_hdr *)skb->data;
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- if (k == 0) {
- /* split line is in frag list */
- pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
+ /* split line is in frag list */
+ if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
+ /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
+ if (skb_has_frag_list(skb))
+ kfree_skb_list(skb_shinfo(skb)->frag_list);
+ kfree(data);
+ return -ENOMEM;
}
skb_release_data(skb);
sk_psock_skb_redirect(skb);
break;
case __SK_DROP:
- /* fall-through */
default:
out_free:
kfree_skb(skb);
break;
case SO_TIMESTAMPING_NEW:
sock_set_flag(sk, SOCK_TSTAMP_NEW);
- /* fall through */
+ fallthrough;
case SO_TIMESTAMPING_OLD:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
sk->sk_prot->destroy(sk);
/*
- * Observation: when sock_common_release is called, processes have
+ * Observation: when sk_common_release is called, processes have
* no access to socket. But net still has.
* Step one, detach it from networking:
*
*/
if (hc->rx_x_recv > 0)
break;
- /* fall through */
+ fallthrough;
case CCID3_FBACK_PERIODIC:
delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
if (delta <= 0)
* Negotiation during connection setup
*/
case DCCP_LISTEN:
- server = true; /* fall through */
+ server = true;
+ fallthrough;
case DCCP_REQUESTING:
switch (opt) {
case DCCPO_CHANGE_L:
*/
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
break;
- /* fall through */
+ fallthrough;
case DCCP_REQUESTING:
case DCCP_ACTIVE_CLOSEREQ:
dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
queued = 1;
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
- /* fall through */
+ fallthrough;
case DCCP_PASSIVE_CLOSE:
/*
* Retransmitted Close: we have already enqueued the first one.
queued = 1;
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
- /* fall through */
+ fallthrough;
case DCCP_PASSIVE_CLOSEREQ:
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
}
case DCCP_PKT_DATA:
if (sk->sk_state == DCCP_RESPOND)
break;
- /* fall through */
+ fallthrough;
case DCCP_PKT_DATAACK:
case DCCP_PKT_ACK:
/*
/* Step 8: if using Ack Vectors, mark packet acknowledgeable */
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
- /* fall through */
+ fallthrough;
case DCCP_RESPOND:
queued = dccp_rcv_respond_partopen_state_process(sk, skb,
dh, len);
* interested. The RX CCID need not parse Ack Vectors,
* since it is only interested in clearing old state.
*/
- /* fall through */
+ fallthrough;
case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
pkt_type, opt, value, len))
switch (dcb->dccpd_type) {
case DCCP_PKT_DATA:
set_ack = 0;
- /* fall through */
+ fallthrough;
case DCCP_PKT_DATAACK:
case DCCP_PKT_RESET:
break;
/* Use ISS on the first (non-retransmitted) Request. */
if (icsk->icsk_retransmits == 0)
dcb->dccpd_seq = dp->dccps_iss;
- /* fall through */
+ fallthrough;
case DCCP_PKT_SYNC:
case DCCP_PKT_SYNCACK:
ackno = dcb->dccpd_ack_seq;
- /* fall through */
+ fallthrough;
default:
/*
* Set owner/destructor: some skbs are allocated via
case DCCP_RESET_CODE_PACKET_ERROR:
dhr->dccph_reset_data[0] = rxdh->dccph_type;
break;
- case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
+ case DCCP_RESET_CODE_OPTION_ERROR:
case DCCP_RESET_CODE_MANDATORY_ERROR:
memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
break;
if (inet_csk(sk)->icsk_bind_hash != NULL &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(sk);
- /* fall through */
+ fallthrough;
default:
if (oldstate == DCCP_OPEN)
DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
case DCCP_PKT_CLOSEREQ:
if (!(flags & MSG_PEEK))
dccp_finish_passive_close(sk);
- /* fall through */
+ fallthrough;
case DCCP_PKT_RESET:
dccp_pr_debug("found fin (%s) ok!\n",
dccp_packet_name(dh->dccph_type));
case DCCP_PARTOPEN:
dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
- /* fall through */
+ fallthrough;
case DCCP_OPEN:
dccp_send_close(sk, 1);
next_state = DCCP_ACTIVE_CLOSEREQ;
else
next_state = DCCP_CLOSING;
- /* fall through */
+ fallthrough;
default:
dccp_set_state(sk, next_state);
}
goto disc_reject;
case DN_RUN:
scp->state = DN_DI;
- /* fall through */
+ fallthrough;
case DN_DI:
case DN_DR:
disc_reject:
dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
- /* fall through */
+ fallthrough;
case DN_NC:
case DN_NR:
case DN_RJ:
break;
default:
printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
- /* fall through */
+ fallthrough;
case DN_O:
dn_stop_slow_timer(sk);
break;
case DN_RUN:
sk->sk_shutdown |= SHUTDOWN_MASK;
- /* fall through */
+ fallthrough;
case DN_CC:
scp->state = DN_CN;
}
default:
printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
old_divisor);
- /* fall through */
+ fallthrough;
case 256:
new_divisor = 1024;
new_hashmask = 0x3FF;
case '\r':
case ':':
*str = 0;
- /* Fallthrough */
+ fallthrough;
case 0:
return;
}
switchdev_work->event = event;
switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
goto err_fdb_work_init;
DECLARE_BITMAP(wanted_diff_mask, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(active_diff_mask, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(old_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(old_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(new_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(new_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT);
struct nlattr *tb[ETHTOOL_A_FEATURES_MAX + 1];
rtnl_lock();
ethnl_features_to_bitmap(old_active, dev->features);
+ ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
tb[ETHTOOL_A_FEATURES_WANTED],
netdev_features_strings, info->extack);
goto out_rtnl;
}
- /* set req_wanted bits not in req_mask from old_active */
+ /* set req_wanted bits not in req_mask from old_wanted */
bitmap_and(req_wanted, req_wanted, req_mask, NETDEV_FEATURE_COUNT);
- bitmap_andnot(new_active, old_active, req_mask, NETDEV_FEATURE_COUNT);
- bitmap_or(req_wanted, new_active, req_wanted, NETDEV_FEATURE_COUNT);
- if (bitmap_equal(req_wanted, old_active, NETDEV_FEATURE_COUNT)) {
- ret = 0;
- goto out_rtnl;
+ bitmap_andnot(new_wanted, old_wanted, req_mask, NETDEV_FEATURE_COUNT);
+ bitmap_or(req_wanted, new_wanted, req_wanted, NETDEV_FEATURE_COUNT);
+ if (!bitmap_equal(req_wanted, old_wanted, NETDEV_FEATURE_COUNT)) {
+ dev->wanted_features &= ~dev->hw_features;
+ dev->wanted_features |= ethnl_bitmap_to_features(req_wanted) & dev->hw_features;
+ __netdev_update_features(dev);
}
-
- dev->wanted_features = ethnl_bitmap_to_features(req_wanted);
- __netdev_update_features(dev);
ethnl_features_to_bitmap(new_active, dev->features);
mod = !bitmap_equal(old_active, new_active, NETDEV_FEATURE_COUNT);
net_warn_ratelimited("%s: received unknown dispatch\n",
__func__);
- /* fall-through */
+ fallthrough;
default:
/* all others failure */
return NET_RX_DROP;
net_warn_ratelimited("%s: received unknown dispatch\n",
__func__);
- /* fall-through */
+ fallthrough;
case RX_DROP_UNUSABLE:
kfree_skb(skb);
- /* fall-through */
+ fallthrough;
case RX_DROP:
return NET_RX_DROP;
case RX_QUEUED:
BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to
maximize network utilization and minimize queues. It builds an explicit
- model of the the bottleneck delivery rate and path round-trip
- propagation delay. It tolerates packet loss and delay unrelated to
- congestion. It can operate over LAN, WAN, cellular, wifi, or cable
- modem links. It can coexist with flows that use loss-based congestion
- control, and can operate with shallow buffers, deep buffers,
- bufferbloat, policers, or AQM schemes that do not provide a delay
- signal. It requires the fq ("Fair Queue") pacing packet scheduler.
+ model of the bottleneck delivery rate and path round-trip propagation
+ delay. It tolerates packet loss and delay unrelated to congestion. It
+ can operate over LAN, WAN, cellular, wifi, or cable modem links. It can
+ coexist with flows that use loss-based congestion control, and can
+ operate with shallow buffers, deep buffers, bufferbloat, policers, or
+ AQM schemes that do not provide a delay signal. It requires the fq
+ ("Fair Queue") pacing packet scheduler.
choice
prompt "Default TCP congestion control"
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, head, tb_hlist)
+ hlist_for_each_entry_rcu(tb, head, tb_hlist,
+ lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info);
}
}
* nf_nat_pptp.c
*
* NAT support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
+ * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
unsigned int i, j;
u8 nhg_fdb = 0;
- if (len & (sizeof(struct nexthop_grp) - 1)) {
+ if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
NL_SET_ERR_MSG(extack,
"Invalid length for nexthop group attribute");
return -EINVAL;
struct nexthop *nh;
int i;
+ if (WARN_ON(!num_nh))
+ return ERR_PTR(-EINVAL);
+
nh = nexthop_alloc();
if (!nh)
return ERR_PTR(-ENOMEM);
} else if (!ipc.oif) {
ipc.oif = inet->uc_index;
} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
- /* oif is set, packet is to local broadcast and
+ /* oif is set, packet is to local broadcast
* and uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
* 2. does the address exist on the specific device
* (skip_dev_check = false)
*/
-int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
- const struct net_device *dev, bool skip_dev_check,
- int strict, u32 banned_flags)
+static struct net_device *
+__ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, bool skip_dev_check,
+ int strict, u32 banned_flags)
{
unsigned int hash = inet6_addr_hash(net, addr);
- const struct net_device *l3mdev;
+ struct net_device *l3mdev, *ndev;
struct inet6_ifaddr *ifp;
u32 ifp_flags;
dev = NULL;
hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
- if (!net_eq(dev_net(ifp->idev->dev), net))
+ ndev = ifp->idev->dev;
+ if (!net_eq(dev_net(ndev), net))
continue;
- if (l3mdev_master_dev_rcu(ifp->idev->dev) != l3mdev)
+ if (l3mdev_master_dev_rcu(ndev) != l3mdev)
continue;
/* Decouple optimistic from tentative for evaluation here.
: ifp->flags;
if (ipv6_addr_equal(&ifp->addr, addr) &&
!(ifp_flags&banned_flags) &&
- (!dev || ifp->idev->dev == dev ||
+ (!dev || ndev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
rcu_read_unlock();
- return 1;
+ return ndev;
}
}
rcu_read_unlock();
- return 0;
+ return NULL;
+}
+
+int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, bool skip_dev_check,
+ int strict, u32 banned_flags)
+{
+ return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
+ strict, banned_flags) ? 1 : 0;
}
EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
*
* The caller should be protected by RCU, or RTNL.
*/
-struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr)
+struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev)
{
- unsigned int hash = inet6_addr_hash(net, addr);
- struct inet6_ifaddr *ifp, *result = NULL;
- struct net_device *dev = NULL;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
- if (net_eq(dev_net(ifp->idev->dev), net) &&
- ipv6_addr_equal(&ifp->addr, addr)) {
- result = ifp;
- break;
- }
- }
-
- if (!result) {
- struct rt6_info *rt;
-
- rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
- if (rt) {
- dev = rt->dst.dev;
- ip6_rt_put(rt);
- }
- } else {
- dev = result->idev->dev;
- }
- rcu_read_unlock();
-
- return dev;
+ return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
+ IFA_F_TENTATIVE);
}
EXPORT_SYMBOL(ipv6_dev_find);
struct metadata_dst *tun_dst,
bool log_ecn_err)
{
- return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
+ int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+ const struct ipv6hdr *ipv6h,
+ struct sk_buff *skb);
+
+ dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
+ if (tpi->proto == htons(ETH_P_IP))
+ dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
+
+ return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_err);
}
EXPORT_SYMBOL(ip6_tnl_rcv);
.route_input = ip6_route_input,
.fragment = ip6_fragment,
.reroute = nf_ip6_reroute,
-#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
- .br_defrag = nf_ct_frag6_gather,
-#endif
#if IS_MODULE(CONFIG_IPV6)
.br_fragment = br_ip6_fragment,
#endif
#include <net/calipso.h>
#endif
+static int two = 2;
static int flowlabel_reflect_max = 0x7;
static int auto_flowlabels_min;
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
.mode = 0644,
.proc_handler = proc_rt6_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = &two,
},
{
.procname = "seg6_flowlabel",
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
- /* fall through */
+ fallthrough;
case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
- /* fall through */
+ fallthrough;
case IUCV_CLOSING:
sk->sk_state = IUCV_CLOSED;
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
- /* fall through */
+ fallthrough;
default:
iucv_sever_path(sk, 1);
kfree_skb(skb);
break;
}
- /* fall through - and receive non-zero length data */
+ fallthrough; /* and receive non-zero length data */
case (AF_IUCV_FLAG_SHT):
/* shutdown request */
- /* fall through - and receive zero length data */
+ fallthrough; /* and receive zero length data */
case 0:
/* plain data frame */
IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
/**
- * l3mdev_fib_table - get FIB table id associated with an L3
+ * l3mdev_fib_table_rcu - get FIB table id associated with an L3
* master interface
* @dev: targeted interface
*/
return duration;
}
-u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *status,
- int len)
+static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ u32 *overhead)
{
- struct ieee80211_supported_band *sband;
- const struct ieee80211_rate *rate;
bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
- bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
int bw, streams;
int group, idx;
u32 duration;
- bool cck;
switch (status->bw) {
case RATE_INFO_BW_20:
}
switch (status->encoding) {
- case RX_ENC_LEGACY:
- if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
- return 0;
-
- sband = hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx >= sband->n_bitrates)
- return 0;
-
- rate = &sband->bitrates[status->rate_idx];
- cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
-
- return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
- cck, len);
-
case RX_ENC_VHT:
streams = status->nss;
idx = status->rate_idx;
duration = airtime_mcs_groups[group].duration[idx];
duration <<= airtime_mcs_groups[group].shift;
+ *overhead = 36 + (streams << 2);
+
+ return duration;
+}
+
+
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ u32 duration, overhead = 0;
+
+ if (status->encoding == RX_ENC_LEGACY) {
+ const struct ieee80211_rate *rate;
+ bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
+ bool cck;
+
+ if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+ return 0;
+
+ sband = hw->wiphy->bands[status->band];
+ if (!sband || status->rate_idx >= sband->n_bitrates)
+ return 0;
+
+ rate = &sband->bitrates[status->rate_idx];
+ cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
+
+ return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
+ cck, len);
+ }
+
+ duration = ieee80211_get_rate_duration(hw, status, &overhead);
+ if (!duration)
+ return 0;
+
duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
- duration += 36 + (streams << 2);
-
- return duration;
+ return duration + overhead;
}
EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime);
-static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
- struct ieee80211_tx_rate *rate,
- u8 band, int len)
+static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *stat, u8 band,
+ struct rate_info *ri)
{
- struct ieee80211_rx_status stat = {
- .band = band,
- };
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ int i;
- if (rate->idx < 0 || !rate->count)
+ if (!ri || !sband)
+ return false;
+
+ stat->bw = ri->bw;
+ stat->nss = ri->nss;
+ stat->rate_idx = ri->mcs;
+
+ if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
+ stat->encoding = RX_ENC_HE;
+ else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
+ stat->encoding = RX_ENC_VHT;
+ else if (ri->flags & RATE_INFO_FLAGS_MCS)
+ stat->encoding = RX_ENC_HT;
+ else
+ stat->encoding = RX_ENC_LEGACY;
+
+ if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
+ stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ stat->he_gi = ri->he_gi;
+
+ if (stat->encoding != RX_ENC_LEGACY)
+ return true;
+
+ stat->rate_idx = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (ri->legacy != sband->bitrates[i].bitrate)
+ continue;
+
+ stat->rate_idx = i;
+ return true;
+ }
+
+ return false;
+}
+
+static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat,
+ struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ struct rate_info *ri, u8 band, int len)
+{
+ memset(stat, 0, sizeof(*stat));
+ stat->band = band;
+
+ if (ieee80211_fill_rate_info(hw, stat, band, ri))
return 0;
+ if (rate->idx < 0 || !rate->count)
+ return -1;
+
if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_80;
+ stat->bw = RATE_INFO_BW_80;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_40;
+ stat->bw = RATE_INFO_BW_40;
else
- stat.bw = RATE_INFO_BW_20;
+ stat->bw = RATE_INFO_BW_20;
- stat.enc_flags = 0;
+ stat->enc_flags = 0;
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ stat->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- stat.rate_idx = rate->idx;
+ stat->rate_idx = rate->idx;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- stat.encoding = RX_ENC_VHT;
- stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
- stat.nss = ieee80211_rate_get_vht_nss(rate);
+ stat->encoding = RX_ENC_VHT;
+ stat->rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ stat->nss = ieee80211_rate_get_vht_nss(rate);
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- stat.encoding = RX_ENC_HT;
+ stat->encoding = RX_ENC_HT;
} else {
- stat.encoding = RX_ENC_LEGACY;
+ stat->encoding = RX_ENC_LEGACY;
}
+ return 0;
+}
+
+static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ struct rate_info *ri,
+ u8 band, int len)
+{
+ struct ieee80211_rx_status stat;
+
+ if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ return 0;
+
return ieee80211_calc_rx_airtime(hw, &stat, len);
}
struct ieee80211_tx_rate *rate = &info->status.rates[i];
u32 cur_duration;
- cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate,
+ cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL,
info->band, len);
if (!cur_duration)
break;
if (pubsta) {
struct sta_info *sta = container_of(pubsta, struct sta_info,
sta);
+ struct ieee80211_rx_status stat;
struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate;
- u32 airtime;
+ struct rate_info *ri = &sta->tx_stats.last_rate_info;
+ u32 duration, overhead;
+ u8 agg_shift;
- if (!(rate->flags & (IEEE80211_TX_RC_VHT_MCS |
- IEEE80211_TX_RC_MCS)))
- ampdu = false;
+ if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ return 0;
+ if (stat.encoding == RX_ENC_LEGACY || !ampdu)
+ return ieee80211_calc_rx_airtime(hw, &stat, len);
+
+ duration = ieee80211_get_rate_duration(hw, &stat, &overhead);
/*
* Assume that HT/VHT transmission on any AC except VO will
* use aggregation. Since we don't have reliable reporting
- * of aggregation length, assume an average of 16.
+ * of aggregation length, assume an average size based on the
+ * tx rate.
* This will not be very accurate, but much better than simply
- * assuming un-aggregated tx.
+ * assuming un-aggregated tx in all cases.
*/
- airtime = ieee80211_calc_tx_airtime_rate(hw, rate, band,
- ampdu ? len * 16 : len);
- if (ampdu)
- airtime /= 16;
-
- return airtime;
+ if (duration > 400) /* <= VHT20 MCS2 1S */
+ agg_shift = 1;
+ else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */
+ agg_shift = 2;
+ else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */
+ agg_shift = 3;
+ else
+ agg_shift = 4;
+
+ duration *= len;
+ duration /= AVG_PKT_SIZE;
+ duration /= 1024;
+
+ return duration + (overhead >> agg_shift);
}
if (!conf)
* @status_stats.retry_failed: # of frames that failed after retry
* @status_stats.retry_count: # of retries attempted
* @status_stats.lost_packets: # of lost packets
- * @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet
+ * @status_stats.last_pkt_time: timestamp of last ACKed packet
* @status_stats.msdu_retries: # of MSDU retries
* @status_stats.msdu_failed: # of failed MSDUs
* @status_stats.last_ack: last ack timestamp (jiffies)
unsigned long filtered;
unsigned long retry_failed, retry_count;
unsigned int lost_packets;
- unsigned long last_tdls_pkt_time;
+ unsigned long last_pkt_time;
u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
unsigned long last_ack;
u64 packets[IEEE80211_NUM_ACS];
u64 bytes[IEEE80211_NUM_ACS];
struct ieee80211_tx_rate last_rate;
+ struct rate_info last_rate_info;
u64 msdu[IEEE80211_NUM_TIDS + 1];
} tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
* - current throughput (higher value for higher tpt)?
*/
#define STA_LOST_PKT_THRESHOLD 50
+#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */
#define STA_LOST_TDLS_PKT_THRESHOLD 10
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info)
{
+ unsigned long pkt_time = STA_LOST_PKT_TIME;
+ unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD;
+
/* If driver relies on its own algorithm for station kickout, skip
* mac80211 packet loss mechanism.
*/
return;
sta->status_stats.lost_packets++;
- if (!sta->sta.tdls &&
- sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
- return;
+ if (sta->sta.tdls) {
+ pkt_time = STA_LOST_TDLS_PKT_TIME;
+ pkt_thr = STA_LOST_PKT_THRESHOLD;
+ }
/*
* If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
* of the last packets were lost, and that no ACK was received in the
* last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
* mechanism.
+ * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME
*/
- if (sta->sta.tdls &&
- (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
- time_before(jiffies,
- sta->status_stats.last_tdls_pkt_time +
- STA_LOST_TDLS_PKT_TIME)))
+ if (sta->status_stats.lost_packets < pkt_thr ||
+ !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time =
- jiffies;
+ sta->status_stats.last_pkt_time = jiffies;
} else if (noack_success) {
/* nothing to do here, do not account as lost */
} else {
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
struct ieee80211_supported_band *sband;
+ struct sta_info *sta;
int retry_count;
bool acked, noack_success;
+ if (pubsta) {
+ sta = container_of(pubsta, struct sta_info, sta);
+
+ if (status->rate)
+ sta->tx_stats.last_rate_info = *status->rate;
+ }
+
if (status->skb)
return __ieee80211_tx_status(hw, status);
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
if (pubsta) {
- struct sta_info *sta;
-
- sta = container_of(pubsta, struct sta_info, sta);
-
if (!acked && !noack_success)
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
- /* Track when last TDLS packet was ACKed */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time = jiffies;
+ /* Track when last packet was ACKed */
+ sta->status_stats.last_pkt_time = jiffies;
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
return;
} else if (noack_success) {
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_pkt_time = jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
nh_flags |= RTNH_F_DEAD;
- /* fall through */
+ fallthrough;
case NETDEV_CHANGE:
nh_flags |= RTNH_F_LINKDOWN;
break;
sk->sk_state_change(sk);
break;
case TCP_CLOSING:
- fallthrough;
case TCP_LAST_ACK:
inet_sk_state_store(sk, TCP_CLOSE);
sk->sk_state_change(sk);
if (!psize)
return -EINVAL;
- if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
+ if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
+ iov_iter_revert(&msg->msg_iter, psize);
return -ENOMEM;
+ }
} else {
offset = dfrag->offset;
psize = min_t(size_t, dfrag->data_len, avail_size);
*/
ret = do_tcp_sendpages(ssk, page, offset, psize,
msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
- if (ret <= 0)
+ if (ret <= 0) {
+ if (!retransmission)
+ iov_iter_revert(&msg->msg_iter, psize);
return ret;
+ }
frag_truesize += ret;
if (!retransmission) {
goto out;
}
-wait_for_sndbuf:
__mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
while (!sk_stream_memory_free(sk) ||
*/
mptcp_set_timeout(sk, ssk);
release_sock(ssk);
- goto wait_for_sndbuf;
+ goto restart;
}
}
}
struct mptcp_data_frag *dfrag;
u64 orig_write_seq;
size_t copied = 0;
- struct msghdr msg;
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT,
+ };
long timeo = 0;
lock_sock(sk);
lock_sock(ssk);
- msg.msg_flags = MSG_DONTWAIT;
orig_len = dfrag->data_len;
orig_offset = dfrag->offset;
orig_write_seq = dfrag->data_seq;
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
- /* fall through */
+ fallthrough;
case TCP_SYN_SENT:
tcp_disconnect(ssk, O_NONBLOCK);
break;
switch (nd->state) {
case ncsi_dev_state_suspend:
nd->state = ncsi_dev_state_suspend_select;
- /* Fall through */
+ fallthrough;
case ncsi_dev_state_suspend_select:
ndp->pending_req_num = 1;
switch (nd->state) {
case ncsi_dev_state_probe:
nd->state = ncsi_dev_state_probe_deselect;
- /* Fall through */
+ fallthrough;
case ncsi_dev_state_probe_deselect:
ndp->pending_req_num = 8;
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
- /* fall through */
+ fallthrough;
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, udphoff,
skb->len - udphoff, 0);
- /* fall through */
+ fallthrough;
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection tracking support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
+ * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
[SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
+#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+
#define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT
u_int32_t offset, count;
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
+ bool ignore = false;
if (sctp_error(skb, dataoff, state))
return -NF_ACCEPT;
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
- } else if (sch->type == SCTP_CID_HEARTBEAT ||
- sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.last_dir = dir;
+ ignore = true;
+ continue;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ }
+ } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
- pr_debug("Verification tag check failed\n");
- goto out_unlock;
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
+ ct->proto.sctp.last_dir == dir)
+ goto out_unlock;
+
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ ct->proto.sctp.vtag[!dir] = 0;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
}
}
spin_unlock_bh(&ct->lock);
+ /* allow but do not refresh timeout */
+ if (ignore)
+ return NF_ACCEPT;
+
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
- /* Set ASSURED if we see see valid ack in ESTABLISHED
+ /* Set ASSURED if we see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
return false;
}
-static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
- struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- u32 extra_jiffies)
-{
- if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
- ct->status & IPS_NAT_CLASH))
- nf_ct_kill(ct);
- else
- nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
-}
-
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb,
nf_ct_refresh_acct(ct, ctinfo, skb, extra);
+ /* never set ASSURED for IPS_NAT_CLASH, they time out soon */
+ if (unlikely((ct->status & IPS_NAT_CLASH)))
+ return NF_ACCEPT;
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_REPLIED]);
+
+ if (unlikely((ct->status & IPS_NAT_CLASH)))
+ return NF_ACCEPT;
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
family, table);
if (err < 0)
- goto err;
+ goto err_fill_table_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_table_info:
kfree_skb(skb2);
return err;
}
nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
family, table, chain);
if (err < 0)
- goto err;
+ goto err_fill_chain_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_chain_info:
kfree_skb(skb2);
return err;
}
if (nla[NFTA_CHAIN_NAME]) {
chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
} else {
- if (!(flags & NFT_CHAIN_BINDING))
- return -EINVAL;
+ if (!(flags & NFT_CHAIN_BINDING)) {
+ err = -EINVAL;
+ goto err1;
+ }
snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
chain->name = kstrdup(name, GFP_KERNEL);
nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule, NULL);
if (err < 0)
- goto err;
+ goto err_fill_rule_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_rule_info:
kfree_skb(skb2);
return err;
}
goto nla_put_failure;
}
- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
+ if (set->udata &&
+ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
if (err < 0)
- goto err;
+ goto err_fill_set_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_set_info:
kfree_skb(skb2);
return err;
}
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
- goto err1;
+ return err;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem);
if (err < 0)
- goto err2;
+ goto err_fill_setelem;
- err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
- /* This avoids a loop in nfnetlink. */
- if (err < 0)
- goto err1;
+ return nfnetlink_unicast(skb, ctx->net, ctx->portid);
- return 0;
-err2:
+err_fill_setelem:
kfree_skb(skb);
-err1:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return err;
}
/* called with rcu_read_lock held */
nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0)
- goto err;
+ goto err_fill_obj_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_obj_info:
kfree_skb(skb2);
return err;
}
NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable, &flowtable->hook_list);
if (err < 0)
- goto err;
+ goto err_fill_flowtable_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_flowtable_info:
kfree_skb(skb2);
return err;
}
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq);
if (err < 0)
- goto err;
+ goto err_fill_gen_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_gen_info:
kfree_skb(skb2);
return err;
}
}
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags)
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
{
- return netlink_unicast(net->nfnl, skb, portid, flags);
+ int err;
+
+ err = nlmsg_unicast(net->nfnl, skb, portid);
+ if (err == -EAGAIN)
+ err = -ENOBUFS;
+
+ return err;
}
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
goto out;
}
}
- nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
- MSG_DONTWAIT);
+ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
out:
inst->qlen = 0;
inst->skb = NULL;
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */
- err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
+ err = nfnetlink_unicast(nskb, net, queue->peer_portid);
if (err < 0) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
void *info;
};
-static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1);
-
static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
const char *tablename)
{
return 0;
}
+static void nft_compat_wait_for_destructors(void)
+{
+ /* xtables matches or targets can have side effects, e.g.
+ * creation/destruction of /proc files.
+ * The xt ->destroy functions are run asynchronously from
+ * work queue. If we have pending invocations we thus
+ * need to wait for those to finish.
+ */
+ nf_tables_trans_destroy_flush_work();
+}
+
static int
nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
- /* xtables matches or targets can have side effects, e.g.
- * creation/destruction of /proc files.
- * The xt ->destroy functions are run asynchronously from
- * work queue. If we have pending invocations we thus
- * need to wait for those to finish.
- */
- if (refcount_read(&nft_compat_pending_destroy) > 1)
- nf_tables_trans_destroy_flush_work();
+ nft_compat_wait_for_destructors();
ret = xt_check_target(&par, size, proto, inv);
if (ret < 0)
static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr)
{
- refcount_dec(&nft_compat_pending_destroy);
module_put(me);
kfree(expr->ops);
}
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+ nft_compat_wait_for_destructors();
+
return xt_check_match(&par, size, proto, inv);
}
static struct nft_expr_type nft_match_type;
-static void nft_mt_tg_deactivate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- enum nft_trans_phase phase)
-{
- if (phase == NFT_TRANS_COMMIT)
- refcount_inc(&nft_compat_pending_destroy);
-}
-
static const struct nft_expr_ops *
nft_match_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
ops->type = &nft_match_type;
ops->eval = nft_match_eval;
ops->init = nft_match_init;
- ops->deactivate = nft_mt_tg_deactivate,
ops->destroy = nft_match_destroy;
ops->dump = nft_match_dump;
ops->validate = nft_match_validate;
ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
ops->init = nft_target_init;
ops->destroy = nft_target_destroy;
- ops->deactivate = nft_mt_tg_deactivate,
ops->dump = nft_target_dump;
ops->validate = nft_target_validate;
ops->data = target;
nfnetlink_subsys_unregister(&nfnl_compat_subsys);
nft_unregister_expr(&nft_target_type);
nft_unregister_expr(&nft_match_type);
-
- WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1);
}
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
- *dest = (err >= 0);
+ nft_reg_store8(dest, err >= 0);
return;
} else if (err < 0) {
goto err;
err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
- *dest = (err >= 0);
+ nft_reg_store8(dest, err >= 0);
return;
} else if (err < 0) {
goto err;
}
if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
- ct->status & IPS_SEQ_ADJUST)
+ ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
goto out;
if (!nf_ct_is_confirmed(ct))
u32 *dest = ®s->data[priv->dreg];
int offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
+ bool overlap = false, dup_end_left = false, dup_end_right = false;
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
struct nft_rbtree_elem *rbe;
struct rb_node *parent, **p;
- bool overlap = false;
int d;
/* Detect overlaps as we descend the tree. Set the flag in these cases:
*
* b1. _ _ __>| !_ _ __| (insert end before existing start)
* b2. _ _ ___| !_ _ _>| (insert end after existing start)
- * b3. _ _ ___! >|_ _ __| (insert start after existing end)
+ * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
+ * '--' no nodes falling in this range
+ * b4. >|_ _ ! (insert start before existing start)
*
* Case a3. resolves to b3.:
* - if the inserted start element is the leftmost, because the '0'
* element in the tree serves as end element
- * - otherwise, if an existing end is found. Note that end elements are
- * always inserted after corresponding start elements.
+ * - otherwise, if an existing end is found immediately to the left. If
+ * there are existing nodes in between, we need to further descend the
+ * tree before we can conclude the new start isn't causing an overlap
+ *
+ * or to b4., which, preceded by a3., means we already traversed one or
+ * more existing intervals entirely, from the right.
*
* For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
* in that order.
*
* The flag is also cleared in two special cases:
*
- * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
- * b5. |__ _ >|!__ _ _ (insert end right after existing start)
+ * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
+ * b6. |__ _ >|!__ _ _ (insert end right after existing start)
*
* which always happen as last step and imply that no further
* overlapping is possible.
+ *
+ * Another special case comes from the fact that start elements matching
+ * an already existing start element are allowed: insertion is not
+ * performed but we return -EEXIST in that case, and the error will be
+ * cleared by the caller if NLM_F_EXCL is not present in the request.
+ * This way, request for insertion of an exact overlap isn't reported as
+ * error to userspace if not desired.
+ *
+ * However, if the existing start matches a pre-existing start, but the
+ * end element doesn't match the corresponding pre-existing end element,
+ * we need to report a partial overlap. This is a local condition that
+ * can be noticed without need for a tracking flag, by checking for a
+ * local duplicated end for a corresponding start, from left and right,
+ * separately.
*/
parent = NULL;
if (nft_rbtree_interval_start(new)) {
if (nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext))
+ !nft_set_elem_expired(&rbe->ext) && !*p)
overlap = false;
} else {
+ if (dup_end_left && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
+
+ if (overlap) {
+ dup_end_right = true;
+ continue;
+ }
}
} else if (d > 0) {
p = &parent->rb_right;
if (nft_rbtree_interval_end(new)) {
+ if (dup_end_right && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
- } else if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask) &&
+
+ if (overlap) {
+ dup_end_left = true;
+ continue;
+ }
+ } else if (nft_set_elem_active(&rbe->ext, genmask) &&
!nft_set_elem_expired(&rbe->ext)) {
- overlap = true;
+ overlap = nft_rbtree_interval_end(rbe);
}
} else {
if (nft_rbtree_interval_end(rbe) &&
p = &parent->rb_left;
}
}
+
+ dup_end_left = dup_end_right = false;
}
if (overlap)
struct recent_table *t;
/* recent_net_exit() is called before recent_mt_destroy(). Make sure
- * that the parent xt_recent proc entry is is empty before trying to
+ * that the parent xt_recent proc entry is empty before trying to
* remove it.
*/
spin_lock_bh(&recent_lock);
kfree(netlbl_domhsh_addr6_entry(iter6));
}
#endif /* IPv6 */
+ kfree(ptr->def.addrsel);
}
kfree(ptr->domain);
kfree(ptr);
goto add_return;
}
#endif /* IPv6 */
+ /* cleanup the new entry since we've moved everything over */
+ netlbl_domhsh_free_entry(&entry->rcu);
} else
ret_val = -EINVAL;
{
int ret_val = 0;
struct audit_buffer *audit_buf;
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
if (entry == NULL)
return -ENOENT;
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val)
+ return ret_val;
+
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
audit_log_end(audit_buf);
}
- if (ret_val == 0) {
- struct netlbl_af4list *iter4;
- struct netlbl_domaddr4_map *map4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct netlbl_af6list *iter6;
- struct netlbl_domaddr6_map *map6;
-#endif /* IPv6 */
-
- switch (entry->def.type) {
- case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach_rcu(iter4,
- &entry->def.addrsel->list4) {
- map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->def.cipso);
- }
+ switch (entry->def.type) {
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ cipso_v4_doi_putdef(map4->def.cipso);
+ }
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->def.addrsel->list6) {
- map6 = netlbl_domhsh_addr6_entry(iter6);
- calipso_doi_putdef(map6->def.calipso);
- }
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ calipso_doi_putdef(map6->def.calipso);
+ }
#endif /* IPv6 */
- break;
- case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->def.cipso);
- break;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ cipso_v4_doi_putdef(entry->def.cipso);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- case NETLBL_NLTYPE_CALIPSO:
- calipso_doi_putdef(entry->def.calipso);
- break;
+ case NETLBL_NLTYPE_CALIPSO:
+ calipso_doi_putdef(entry->def.calipso);
+ break;
#endif /* IPv6 */
- }
- call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
}
+ call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val;
}
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (skb_queue_empty(&sk->sk_receive_queue))
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
if (!state)
return -ENOMEM;
+ memset(&state->policies[state->n_alloc], 0,
+ flex_array_size(state, policies, n_alloc - state->n_alloc));
+
state->policies[state->n_alloc].policy = policy;
state->policies[state->n_alloc].maxtype = maxtype;
state->n_alloc = n_alloc;
goto next;
case NLA_NESTED:
type = NL_ATTR_TYPE_NESTED;
- /* fall through */
+ fallthrough;
case NLA_NESTED_ARRAY:
if (pt->type == NLA_NESTED_ARRAY)
type = NL_ATTR_TYPE_NESTED_ARRAY;
case NR_DISCREQ:
nr_write_internal(sk, NR_DISCACK);
- /* fall through */
+ fallthrough;
case NR_DISCACK:
nr_disconnect(sk, 0);
break;
case 3:
re_sort_routes(nr_node, 0, 1);
re_sort_routes(nr_node, 1, 2);
- /* fall through */
+ fallthrough;
case 2:
re_sort_routes(nr_node, 0, 1);
case 1:
switch (i) {
case 0:
nr_node->routes[0] = nr_node->routes[1];
- /* fall through */
+ fallthrough;
case 1:
nr_node->routes[1] = nr_node->routes[2];
case 2:
switch (i) {
case 0:
s->routes[0] = s->routes[1];
- /* Fallthrough */
+ fallthrough;
case 1:
s->routes[1] = s->routes[2];
case 2:
switch (i) {
case 0:
t->routes[0] = t->routes[1];
- /* fall through */
+ fallthrough;
case 1:
t->routes[1] = t->routes[2];
case 2:
}
}
/* Non-ICMP, fall thru to initialize if needed. */
- /* fall through */
+ fallthrough;
case IP_CT_NEW:
/* Seen it before? This can happen for loopback, retrans,
* or local packets.
switch (type) {
case OVS_CT_ATTR_FORCE_COMMIT:
info->force = true;
- /* fall through. */
+ fallthrough;
case OVS_CT_ATTR_COMMIT:
info->commit = true;
break;
case -EINVAL:
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
- /* fall-through */
+ fallthrough;
case -EPROTO:
skb->transport_header = skb->network_header;
error = 0;
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
- unsigned short macoff, netoff, hdrlen;
+ unsigned short macoff, hdrlen;
+ unsigned int netoff;
struct sk_buff *copy_skb = NULL;
struct timespec64 ts;
__u32 ts_status;
}
macoff = netoff - maclen;
}
+ if (netoff > USHRT_MAX) {
+ atomic_inc(&po->tp_drops);
+ goto drop_n_restore;
+ }
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist_delete(dev, &po->mclist);
- /* fallthrough */
+ fallthrough;
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
err = -EINVAL;
goto out;
}
- /* fall through */
+ fallthrough;
case PNS_PEP_DISABLE_REQ:
atomic_set(&pn->tx_credits, 0);
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
case PNS_PIPE_ALIGNED_DATA:
__skb_pull(skb, 1);
- /* fall through */
+ fallthrough;
case PNS_PIPE_DATA:
__skb_pull(skb, 3); /* Pipe data header */
if (!pn_flow_safe(pn->rx_fc)) {
err = pipe_rcv_created(sk, skb);
if (err)
break;
- /* fall through */
+ fallthrough;
case PNS_PIPE_RESET_IND:
if (!pn->init_enable)
break;
- /* fall through */
+ fallthrough;
case PNS_PIPE_ENABLED_IND:
if (!pn_flow_safe(pn->tx_fc)) {
atomic_set(&pn->tx_credits, 1);
switch (hdr->message_id) {
case PNS_PIPE_ALIGNED_DATA:
__skb_pull(skb, 1);
- /* fall through */
+ fallthrough;
case PNS_PIPE_DATA:
__skb_pull(skb, 3); /* Pipe data header */
if (!pn_flow_safe(pn->rx_fc)) {
*/
static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
{
+ u32 min_port;
int rc;
mutex_lock(&qrtr_port_lock);
if (!*port) {
- rc = idr_alloc(&qrtr_ports, ipc,
- QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
- GFP_ATOMIC);
- if (rc >= 0)
- *port = rc;
+ min_port = QRTR_MIN_EPH_SOCKET;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
+ if (!rc)
+ *port = min_port;
} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
rc = -EACCES;
} else if (*port == QRTR_PORT_CTRL) {
- rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+ min_port = 0;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
} else {
- rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
- if (rc >= 0)
- *port = rc;
+ min_port = *port;
+ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
+ if (!rc)
+ *port = min_port;
}
mutex_unlock(&qrtr_port_lock);
case RDS_CMSG_ZCOPY_COOKIE:
zcopy_cookie = true;
- /* fall through */
+ fallthrough;
case RDS_CMSG_RDMA_DEST:
case RDS_CMSG_RDMA_MAP:
switch (frametype) {
case ROSE_RESET_REQUEST:
rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
- /* fall through */
+ fallthrough;
case ROSE_RESET_CONFIRMATION:
rose_stop_timer(sk);
rose_start_idletimer(sk);
case 0:
rose_node->neighbour[0] =
rose_node->neighbour[1];
- /* fall through */
+ fallthrough;
case 1:
rose_node->neighbour[1] =
rose_node->neighbour[2];
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
- /* fall through */
+ fallthrough;
case 1:
t->neighbour[1] = t->neighbour[2];
case 2:
ret = 0;
break;
}
- /* Fall through */
+ fallthrough;
default:
ret = -EBUSY;
break;
rx->local = local;
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
- /* Fall through */
+ fallthrough;
case RXRPC_CLIENT_BOUND:
if (!m->msg_name &&
m->msg_name = &rx->connect_srx;
m->msg_namelen = sizeof(rx->connect_srx);
}
- /* Fall through */
+ fallthrough;
case RXRPC_SERVER_BOUND:
case RXRPC_SERVER_LISTENING:
ret = rxrpc_do_sendmsg(rx, m, len);
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
- RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
- /* ping management */
- rxrpc_serial_t ping_serial; /* Last ping sent */
- ktime_t ping_time; /* Time last ping sent */
+ /* RTT management */
+ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
+ ktime_t rtt_sent_at[4]; /* Time packet sent */
+ unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
+ * Mask of pending samples in 8-11 */
+#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
+#define RXRPC_CALL_RTT_PEND_SHIFT 8
/* transmission-phase ACK management */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
/*
* rtt.c
*/
-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
case RXRPC_CALL_SERVER_ACCEPTING:
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
abort = true;
- /* fall through */
+ fallthrough;
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_discard;
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call->rxnet = rxnet;
+ call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls);
return call;
conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
rxrpc_activate_channels_locked(conn);
}
- /* fall through */
+ fallthrough;
case RXRPC_CONN_CLIENT_ACTIVE:
if (list_empty(&conn->waiting_calls)) {
rxrpc_deactivate_one_channel(conn, channel);
}
/*
- * Process a requested ACK.
+ * See if there's a cached RTT probe to complete.
*/
-static void rxrpc_input_requested_ack(struct rxrpc_call *call,
- ktime_t resp_time,
- rxrpc_serial_t orig_serial,
- rxrpc_serial_t ack_serial)
+static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t acked_serial,
+ rxrpc_serial_t ack_serial,
+ enum rxrpc_rtt_rx_trace type)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
+ rxrpc_serial_t orig_serial;
+ unsigned long avail;
ktime_t sent_at;
- int ix;
+ bool matched = false;
+ int i;
- for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
- skb = call->rxtx_buffer[ix];
- if (!skb)
- continue;
+ avail = READ_ONCE(call->rtt_avail);
+ smp_rmb(); /* Read avail bits before accessing data. */
- sent_at = skb->tstamp;
- smp_rmb(); /* Read timestamp before serial. */
- sp = rxrpc_skb(skb);
- if (sp->hdr.serial != orig_serial)
+ for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
+ if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
continue;
- goto found;
- }
- return;
+ sent_at = call->rtt_sent_at[i];
+ orig_serial = call->rtt_serial[i];
+
+ if (orig_serial == acked_serial) {
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_mb(); /* Read data before setting avail bit */
+ set_bit(i, &call->rtt_avail);
+ if (type != rxrpc_rtt_rx_cancel)
+ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+ sent_at, resp_time);
+ else
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
+ orig_serial, acked_serial, 0, 0);
+ matched = true;
+ }
+
+ /* If a later serial is being acked, then mark this slot as
+ * being available.
+ */
+ if (after(acked_serial, orig_serial)) {
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
+ orig_serial, acked_serial, 0, 0);
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb();
+ set_bit(i, &call->rtt_avail);
+ }
+ }
-found:
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
- orig_serial, ack_serial, sent_at, resp_time);
+ if (!matched)
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
}
/*
*/
static void rxrpc_input_ping_response(struct rxrpc_call *call,
ktime_t resp_time,
- rxrpc_serial_t orig_serial,
+ rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial)
{
- rxrpc_serial_t ping_serial;
- ktime_t ping_time;
-
- ping_time = call->ping_time;
- smp_rmb();
- ping_serial = READ_ONCE(call->ping_serial);
-
- if (orig_serial == call->acks_lost_ping)
+ if (acked_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
-
- if (before(orig_serial, ping_serial) ||
- !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
- return;
- if (after(orig_serial, ping_serial))
- return;
-
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
- orig_serial, ack_serial, ping_time, resp_time);
}
/*
struct rxrpc_ackinfo info;
u8 acks[RXRPC_MAXACKS];
} buf;
- rxrpc_serial_t acked_serial;
+ rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
}
offset += sizeof(buf.ack);
+ ack_serial = sp->hdr.serial;
acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket);
prev_pkt = ntohl(buf.ack.previousPacket);
summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
buf.ack.reason : RXRPC_ACK__INVALID);
- trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
+ trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ switch (buf.ack.reason) {
+ case RXRPC_ACK_PING_RESPONSE:
rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
- if (buf.ack.reason == RXRPC_ACK_REQUESTED)
- rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
+ ack_serial);
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_ping_response);
+ break;
+ case RXRPC_ACK_REQUESTED:
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_requested_ack);
+ break;
+ default:
+ if (acked_serial != 0)
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_cancel);
+ break;
+ }
if (buf.ack.reason == RXRPC_ACK_PING) {
- _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
+ _proto("Rx ACK %%%u PING Request", ack_serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ack);
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
return;
/* Discard any out-of-order or duplicate ACKs (inside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
goto out;
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
- /* Fall through */
+ fallthrough;
case RXRPC_CALL_COMPLETE:
break;
default:
case RXRPC_PACKET_TYPE_BUSY:
if (rxrpc_to_server(sp))
goto discard;
- /* Fall through */
+ fallthrough;
case RXRPC_PACKET_TYPE_ACK:
case RXRPC_PACKET_TYPE_ACKALL:
if (sp->hdr.callNumber == 0)
goto bad_message;
- /* Fall through */
+ fallthrough;
case RXRPC_PACKET_TYPE_ABORT:
break;
/* Fall through and set IPv4 options too otherwise we don't get
* errors from IPv4 packets sent through the IPv6 socket.
*/
- /* Fall through */
+ fallthrough;
case AF_INET:
/* we want to receive ICMP errors */
ip_sock_set_recverr(local->socket->sk);
}
/*
+ * Record the beginning of an RTT probe.
+ */
+static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ enum rxrpc_rtt_tx_trace why)
+{
+ unsigned long avail = call->rtt_avail;
+ int rtt_slot = 9;
+
+ if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
+ goto no_slot;
+
+ rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
+ if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
+ goto no_slot;
+
+ call->rtt_serial[rtt_slot] = serial;
+ call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ smp_wmb(); /* Write data before avail bit */
+ set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+
+ trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
+ return rtt_slot;
+
+no_slot:
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
+ return -1;
+}
+
+/*
+ * Cancel an RTT probe.
+ */
+static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
+ rxrpc_serial_t serial, int rtt_slot)
+{
+ if (rtt_slot != -1) {
+ clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb(); /* Clear pending bit before setting slot */
+ set_bit(rtt_slot, &call->rtt_avail);
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
+ }
+}
+
+/*
* Send an ACK call packet.
*/
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
size_t len, n;
- int ret;
+ int ret, rtt_slot = -1;
u8 reason;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
if (_serial)
*_serial = serial;
- if (ping) {
- call->ping_serial = serial;
- smp_wmb();
- /* We need to stick a time in before we send the packet in case
- * the reply gets back before kernel_sendmsg() completes - but
- * asking UDP to send the packet can take a relatively long
- * time.
- */
- call->ping_time = ktime_get_real();
- set_bit(RXRPC_CALL_PINGING, &call->flags);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
- }
+ if (ping)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
- if (ping)
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohl(pkt->ack.serial),
false, true,
struct kvec iov[2];
rxrpc_serial_t serial;
size_t len;
- int ret;
+ int ret, rtt_slot = -1;
_enter(",{%d}", skb->len);
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ }
+
rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
if (ret >= 0) {
if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = skb->tstamp;
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_count > 1) {
unsigned long nowj = jiffies, ack_lost_at;
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
switch (conn->params.local->srx.transport.family) {
case AF_INET6:
BUG();
}
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ }
rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
case SO_EE_ORIGIN_ICMP6:
if (err == EACCES)
err = EHOSTUNREACH;
- /* Fall through */
+ fallthrough;
default:
_proto("Rx Received error report { orig=%u }", ee->ee_origin);
break;
* rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
* @sock: The socket on which the call is in progress.
* @call: The call to query
+ * @_srtt: Where to store the SRTT value.
*
- * Get the call's peer smoothed RTT.
+ * Get the call's peer smoothed RTT in uS.
*/
-u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
+bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
+ u32 *_srtt)
{
- return call->peer->srtt_us >> 3;
+ struct rxrpc_peer *peer = call->peer;
+
+ if (peer->rtt_count == 0) {
+ *_srtt = 1000000; /* 1S */
+ return false;
+ }
+
+ *_srtt = call->peer->srtt_us >> 3;
+ return true;
}
EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
case RXRPC_ACK_DELAY:
if (ret != -EAGAIN)
break;
- /* Fall through */
+ fallthrough;
default:
rxrpc_send_ack_packet(call, false, NULL);
}
* exclusive access to the peer RTT data.
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int rtt_slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
ktime_t send_time, ktime_t resp_time)
{
peer->rtt_count++;
spin_unlock(&peer->rtt_input_lock);
- trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
+ trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
peer->srtt_us >> 3, peer->rto_j);
}
ret = -ENOMEM;
ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket)
- goto temporary_error;
+ goto temporary_error_free_resp;
eproto = tracepoint_string("rxkad_tkt_short");
abort_code = RXKADPACKETSHORT;
temporary_error_free_ticket:
kfree(ticket);
+temporary_error_free_resp:
kfree(response);
temporary_error:
/* Ignore the response packet if we got a temporary error such as
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
if (!last)
break;
- /* Fall through */
+ fallthrough;
case RXRPC_CALL_SERVER_SEND_REPLY:
call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
rxrpc_notify_end_tx(rx, call, notify_end_tx);
if (p.call.timeouts.normal > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_rx_timo, j);
- /* Fall through */
+ fallthrough;
case 2:
j = msecs_to_jiffies(p.call.timeouts.idle);
if (p.call.timeouts.idle > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_req_timo, j);
- /* Fall through */
+ fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
j = msecs_to_jiffies(p.call.timeouts.hard);
err = ip_defrag(net, skb, user);
local_bh_enable();
if (err && err != -EINPROGRESS)
- goto out_free;
+ return err;
if (!err) {
*defrag = true;
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- /* fall through */
+ fallthrough;
case TC_ACT_SHOT:
return 0;
}
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
if (err)
- goto err_early_drop_init;
-
- err = tcf_qevent_init(&q->qe_mark, sch,
- FLOW_BLOCK_BINDER_TYPE_RED_MARK,
- tb[TCA_RED_MARK_BLOCK], extack);
- if (err)
- goto err_mark_init;
-
- return 0;
+ return err;
-err_mark_init:
- tcf_qevent_destroy(&q->qe_early_drop, sch);
-err_early_drop_init:
- del_timer_sync(&q->adapt_timer);
- red_offload(sch, false);
- qdisc_put(q->qdisc);
- return err;
+ return tcf_qevent_init(&q->qe_mark, sch,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
+ tb[TCA_RED_MARK_BLOCK], extack);
}
static int red_change(struct Qdisc *sch, struct nlattr *opt,
spin_unlock(&q->current_entry_lock);
}
-static void taprio_sched_to_offload(struct taprio_sched *q,
+static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
+{
+ u32 i, queue_mask = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(tc_mask & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+
+ queue_mask |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue_mask;
+}
+
+static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched,
- const struct tc_mqprio_qopt *mqprio,
struct tc_taprio_qopt_offload *offload)
{
struct sched_entry *entry;
e->command = entry->command;
e->interval = entry->interval;
- e->gate_mask = entry->gate_mask;
+ e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
+
i++;
}
}
static int taprio_enable_offload(struct net_device *dev,
- struct tc_mqprio_qopt *mqprio,
struct taprio_sched *q,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
return -ENOMEM;
}
offload->enable = 1;
- taprio_sched_to_offload(q, sched, mqprio, offload);
+ taprio_sched_to_offload(dev, sched, offload);
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
}
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
- err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
+ err = taprio_enable_offload(dev, q, new_admin, extack);
else
err = taprio_disable_offload(dev, q, extack);
if (err)
case AF_INET:
if (!__ipv6_only_sock(sctp_opt2sk(sp)))
return 1;
- /* fallthru */
+ fallthrough;
default:
return 0;
}
case SCTP_CID_ABORT:
if (sctp_test_T_bit(chunk))
ctx->packet->vtag = ctx->asoc->c.my_vtag;
- /* fallthru */
+ fallthrough;
/* The following chunks are "response" chunks, i.e.
* they are generated in response to something we
case SCTP_CID_ECN_CWR:
case SCTP_CID_ASCONF_ACK:
one_packet = 1;
- /* Fall through */
+ fallthrough;
case SCTP_CID_SACK:
case SCTP_CID_HEARTBEAT:
if (!ctx->packet || !ctx->packet->has_cookie_echo)
return;
- /* fall through */
+ fallthrough;
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = SCTP_IERROR_ERROR;
- /* Fall through */
+ fallthrough;
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
if (timer_pending(timer))
break;
- /* fall through */
+ fallthrough;
case SCTP_CMD_TIMER_START:
timer = &asoc->timers[cmd->obj.to];
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
- /* Fall Through */
+ fallthrough;
case SCTP_IERROR_AUTH_BAD_KEYID:
case SCTP_IERROR_BAD_SIG:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
pr_debug("%s: begins, snum:%d\n", __func__, snum);
- local_bh_disable();
-
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
continue;
index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(net, pp->net))
goto next;
break;
next:
- spin_unlock(&head->lock);
+ spin_unlock_bh(&head->lock);
+ cond_resched();
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
- goto fail;
+ return ret;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found;
ret = 0;
fail_unlock:
- spin_unlock(&head->lock);
-
-fail:
- local_bh_enable();
+ spin_unlock_bh(&head->lock);
return ret;
}
int ret;
if (outcnt <= stream->outcnt)
- return 0;
+ goto out;
ret = genradix_prealloc(&stream->out, outcnt, gfp);
if (ret)
return ret;
+out:
stream->outcnt = outcnt;
return 0;
}
int ret;
if (incnt <= stream->incnt)
- return 0;
+ goto out;
ret = genradix_prealloc(&stream->in, incnt, gfp);
if (ret)
return ret;
+out:
stream->incnt = incnt;
return 0;
}
cancel_work_sync(&smc->conn.close_work);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
- sk->sk_state = SMC_CLOSED;
}
/* terminate smc socket abnormally - active abort
}
switch (sk->sk_state) {
case SMC_ACTIVE:
- sk->sk_state = SMC_PEERABORTWAIT;
- smc_close_cancel_work(smc);
- sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* passive closing */
- break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
+ sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* postponed passive closing */
+ sock_put(sk); /* (postponed) passive closing */
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
case SMC_PEERCLOSEWAIT1:
if (rxflags->peer_done_writing)
sk->sk_state = SMC_PEERCLOSEWAIT2;
- /* fall through */
+ fallthrough;
/* to check for closing */
case SMC_PEERCLOSEWAIT2:
if (!smc_cdc_rxed_any_close(conn))
if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg);
smcd_cdc_rx_init(conn); /* init tasklet for this conn */
+ } else {
+ conn->rx_off = 0;
}
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock);
list_del(&smc->conn.sndbuf_desc->list);
mutex_unlock(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ smc->conn.sndbuf_desc = NULL;
}
return rc;
}
(req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
struct smc_connection *conn = &smc->conn;
- struct smcd_diag_dmbinfo dinfo = {
- .linkid = *((u32 *)conn->lgr->id),
- .peer_gid = conn->lgr->peer_gid,
- .my_gid = conn->lgr->smcd->local_gid,
- .token = conn->rmb_desc->token,
- .peer_token = conn->peer_token
- };
+ struct smcd_diag_dmbinfo dinfo;
+
+ memset(&dinfo, 0, sizeof(dinfo));
+
+ dinfo.linkid = *((u32 *)conn->lgr->id);
+ dinfo.peer_gid = conn->lgr->peer_gid;
+ dinfo.my_gid = conn->lgr->smcd->local_gid;
+ dinfo.token = conn->rmb_desc->token;
+ dinfo.peer_token = conn->peer_token;
if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
goto errout;
struct smc_init_info ini;
int lnk_idx, rc = 0;
+ if (!llc->qp_mtu)
+ goto out_reject;
+
ini.vlan_id = lgr->vlan_id;
smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
kfree(qentry);
}
+static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
+ if (llc->raw.data[i])
+ return false;
+ return true;
+}
+
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{
if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
- !llc->add_link.qp_mtu && !llc->add_link.link_num)
+ smc_llc_is_empty_llc_message(llc))
return true;
return false;
}
EXPORT_SYMBOL(kernel_getsockname);
/**
- * kernel_peername - get the address which the socket is connected (kernel space)
+ * kernel_getpeername - get the address which the socket is connected (kernel space)
* @sock: socket
* @addr: address holder
*
EXPORT_SYMBOL(kernel_sendpage_locked);
/**
- * kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
* @sock: socket
* @how: connection part
*
switch (conflen) {
case 16:
*q++ = i++;
- /* fall through */
+ fallthrough;
case 8:
*q++ = i++;
break;
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
-#include <linux/sunrpc/auth_gss.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rpcgss.h>
switch (status) {
case -ENOMEM:
rpc_delay(task, HZ >> 2);
- /* fall through */
+ fallthrough;
case -EAGAIN: /* woken up; retry */
task->tk_action = call_retry_reserve;
return;
/* Use rate-limiting and a max number of retries if refresh
* had status 0 but failed to update the cred.
*/
- /* fall through */
+ fallthrough;
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -EAGAIN:
status = -EACCES;
- /* fall through */
+ fallthrough;
case -EKEYEXPIRED:
if (!task->tk_cred_retry)
break;
rpc_force_rebind(clnt);
goto out_retry;
}
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ECONNABORTED:
case -ENETDOWN:
break;
/* retry with existing socket, after a delay */
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -EADDRINUSE:
case -ENOTCONN:
case -EAGAIN:
*/
case -ENOBUFS:
rpc_delay(task, HZ>>2);
- /* fall through */
+ fallthrough;
case -EBADSLT:
case -EAGAIN:
task->tk_action = call_transmit;
rpc_call_rpcerror(task, task->tk_status);
return;
}
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ECONNABORTED:
case -EADDRINUSE:
break;
case -ENOBUFS:
rpc_delay(task, HZ>>2);
- /* fall through */
+ fallthrough;
case -EBADSLT:
case -EAGAIN:
task->tk_status = 0;
* were a timeout.
*/
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -ETIMEDOUT:
break;
case -ECONNREFUSED:
break;
case -EADDRINUSE:
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -EPIPE:
case -EAGAIN:
break;
case -EAGAIN:
xprt_add_backlog(xprt, task);
dprintk("RPC: waiting for request slot\n");
- /* fall through */
+ fallthrough;
default:
task->tk_status = -EAGAIN;
}
case RDMA_CM_EVENT_DEVICE_REMOVAL:
pr_info("rpcrdma: removing device %s for %pISpc\n",
ep->re_id->device->name, sap);
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_CHANGE:
ep->re_connect_status = -ENODEV;
goto disconnected;
default:
dprintk("RPC: sendmsg returned unrecognized error %d\n",
-status);
- /* fall through */
+ fallthrough;
case -EPIPE:
xs_close(xprt);
status = -ENOTCONN;
xprt->connect_cookie++;
clear_bit(XPRT_CONNECTED, &xprt->state);
xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
- /* fall through */
+ fallthrough;
case TCP_CLOSING:
/*
* If the server closed down the connection, make sure that
switch (ret) {
case 0:
xs_set_srcport(transport, sock);
- /* fall through */
+ fallthrough;
case -EINPROGRESS:
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
default:
printk("%s: connect returned unhandled error %d\n",
__func__, status);
- /* fall through */
+ fallthrough;
case -EADDRNOTAVAIL:
/* We're probably in TIME_WAIT. Get rid of existing socket,
* and retry
menuconfig TIPC
tristate "The TIPC Protocol"
depends on INET
+ depends on IPV6 || IPV6=n
help
The Transparent Inter Process Communication (TIPC) protocol is
specially designed for intra cluster communication. This protocol
test_and_set_bit_lock(0, &b->up);
break;
}
- /* fall through */
+ fallthrough;
case NETDEV_GOING_DOWN:
clear_bit_unlock(0, &b->up);
tipc_reset_bearer(net, b);
if (aead->cloned) {
tipc_aead_put(aead->cloned);
} else {
- head = *this_cpu_ptr(aead->tfm_entry);
+ head = *get_cpu_ptr(aead->tfm_entry);
+ put_cpu_ptr(aead->tfm_entry);
list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
crypto_free_aead(tfm_entry->tfm);
list_del(&tfm_entry->list);
*/
static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
{
- struct tipc_tfm **tfm_entry = this_cpu_ptr(aead->tfm_entry);
+ struct tipc_tfm **tfm_entry;
+ struct crypto_aead *tfm;
+ tfm_entry = get_cpu_ptr(aead->tfm_entry);
*tfm_entry = list_next_entry(*tfm_entry, list);
- return (*tfm_entry)->tfm;
+ tfm = (*tfm_entry)->tfm;
+ put_cpu_ptr(tfm_entry);
+
+ return tfm;
}
/**
switch (err) {
case 0:
this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ rcu_read_lock();
if (likely(test_bit(0, &b->up)))
b->media->send_msg(net, skb, b, &tx_ctx->dst);
else
kfree_skb(skb);
+ rcu_read_unlock();
break;
case -EINPROGRESS:
return;
update = true;
deliver = false;
}
- /* Fall thru */
+ fallthrough;
case TIPC_GRP_BCAST_MSG:
m->bc_rcv_nxt++;
ack = msg_grp_bc_ack_req(hdr);
skb_queue_tail(mc_inputq, skb);
return true;
}
- /* fall through */
+ fallthrough;
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg)
{
- int err;
+ struct nlmsghdr *nlh;
struct sk_buff *arg;
+ int err;
if (msg->req_type && (!msg->req_size ||
!TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -ENOMEM;
}
+ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+ if (!nlh) {
+ kfree_skb(arg);
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return -EMSGSIZE;
+ }
+ nlmsg_end(arg, nlh);
+
err = __tipc_nl_compat_dumpit(cmd, msg, arg);
if (err) {
kfree_skb(msg->rep);
case TIPC_ESTABLISHED:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= EPOLLOUT;
- /* fall through */
+ fallthrough;
case TIPC_LISTEN:
case TIPC_CONNECTING:
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
* case is EINPROGRESS, rather than EALREADY.
*/
res = -EINPROGRESS;
- /* fall through */
+ fallthrough;
case TIPC_CONNECTING:
if (!timeout) {
if (previous == TIPC_CONNECTING)
trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
- sk->sk_shutdown = SEND_SHUTDOWN;
+ if (tipc_sk_type_connectionless(sk))
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ else
+ sk->sk_shutdown = SEND_SHUTDOWN;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
- /* Wake up anyone sleeping in poll */
- sk->sk_state_change(sk);
res = 0;
} else {
res = -ENOTCONN;
}
+ /* Wake up anyone sleeping in poll. */
+ sk->sk_state_change(sk);
release_sock(sk);
return res;
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
u8 node_id[NODE_ID_LEN] = {0,};
+ struct net_device *dev;
int rmcast = 0;
ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
rcu_assign_pointer(ub->bearer, b);
tipc_udp_media_addr_set(&b->addr, &local);
if (local.proto == htons(ETH_P_IP)) {
- struct net_device *dev;
-
dev = __ip_dev_find(net, local.ipv4.s_addr, false);
if (!dev) {
err = -ENODEV;
b->mtu = b->media->mtu;
#if IS_ENABLED(CONFIG_IPV6)
} else if (local.proto == htons(ETH_P_IPV6)) {
- struct net_device *dev;
-
- dev = ipv6_dev_find(net, &local.ipv6);
+ dev = ub->ifindex ? __dev_get_by_index(net, ub->ifindex) : NULL;
+ dev = ipv6_dev_find(net, &local.ipv6, dev);
if (!dev) {
err = -ENODEV;
goto err;
*/
case SOCK_RAW:
sock->type = SOCK_DGRAM;
- /* fall through */
+ fallthrough;
case SOCK_DGRAM:
sock->ops = &unix_dgram_ops;
break;
*/
#include <linux/export.h>
+#include <linux/bitfield.h>
#include <net/cfg80211.h>
#include "core.h"
#include "rdev-ops.h"
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_edmg *edmg_cap;
u32 width, control_freq, cap;
+ bool support_80_80 = false;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
if (!ht_cap->ht_supported &&
chandef->chan->band != NL80211_BAND_6GHZ)
return false;
- /* fall through */
+ fallthrough;
case NL80211_CHAN_WIDTH_20_NOHT:
prohibited_flags |= IEEE80211_CHAN_NO_20MHZ;
width = 20;
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
- cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (chandef->chan->band != NL80211_BAND_6GHZ &&
- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap = vht_cap->cap;
+ support_80_80 =
+ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
+ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
+ u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1;
+ if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80)
return false;
- /* fall through */
+ fallthrough;
case NL80211_CHAN_WIDTH_80:
prohibited_flags |= IEEE80211_CHAN_NO_80MHZ;
width = 80;
return false;
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ &&
+ !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
return false;
break;
default:
sizeof(struct cfg80211_chan_def));
queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk);
cfg80211_sched_dfs_chan_update(rdev);
- /* fall through */
+ fallthrough;
case NL80211_RADAR_CAC_ABORTED:
wdev->cac_started = false;
break;
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 1:
if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
sizeof(u32) * rdev->wiphy.n_cipher_suites,
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 2:
if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
rdev->wiphy.interface_modes))
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 3:
nl_bands = nla_nest_start_noflag(msg,
NL80211_ATTR_WIPHY_BANDS);
state->chan_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
default:
/* add frequencies */
nl_freqs = nla_nest_start_noflag(msg,
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 4:
nl_cmds = nla_nest_start_noflag(msg,
NL80211_ATTR_SUPPORTED_COMMANDS);
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 5:
if (rdev->ops->remain_on_channel &&
(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 6:
#ifdef CONFIG_PM
if (nl80211_send_wowlan(msg, rdev, state->split))
#else
state->split_start++;
#endif
- /* fall through */
+ fallthrough;
case 7:
if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
rdev->wiphy.software_iftypes))
state->split_start++;
if (state->split)
break;
- /* fall through */
+ fallthrough;
case 8:
if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
break;
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case RATE_INFO_BW_20:
rate_flg = 0;
break;
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
params.he_6ghz_capa =
- nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+ nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
params.airtime_weight =
if (WARN_ON(!alpha2))
return -EINVAL;
+ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
+ return -EINVAL;
+
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
switch (ftype) {
case CFG80211_BSS_FTYPE_BEACON:
ies->from_beacon = true;
- /* fall through */
+ fallthrough;
case CFG80211_BSS_FTYPE_UNKNOWN:
rcu_assign_pointer(tmp.pub.beacon_ies, ies);
break;
return err;
case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
*treason = NL80211_TIMEOUT_ASSOC;
- /* fall through */
+ fallthrough;
case CFG80211_CONN_ASSOC_FAILED:
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
WLAN_REASON_DEAUTH_LEAVING, false);
- /* fall through */
+ fallthrough;
case CFG80211_CONN_ABANDON:
/* free directly, disconnected event already sent */
cfg80211_sme_free(wdev);
return (freq - 2407) / 5;
else if (freq >= 4910 && freq <= 4980)
return (freq - 4000) / 5;
- else if (freq < 5945)
+ else if (freq < 5925)
return (freq - 5000) / 5;
+ else if (freq == 5935)
+ return 2;
else if (freq <= 45000) /* DMG band lower limit */
- /* see 802.11ax D4.1 27.3.22.2 */
- return (freq - 5940) / 5;
+ /* see 802.11ax D6.1 27.3.22.2 */
+ return (freq - 5950) / 5;
else if (freq >= 58320 && freq <= 70200)
return (freq - 56160) / 2160;
else
sband->bitrates[i].flags |=
IEEE80211_RATE_MANDATORY_G;
want--;
- /* fall through */
+ fallthrough;
default:
sband->bitrates[i].flags |=
IEEE80211_RATE_ERP_G;
case NL80211_IFTYPE_STATION:
if (dev->ieee80211_ptr->use_4addr)
break;
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_ADHOC:
wstats.qual.qual = sig + 110;
break;
}
- /* fall through */
+ fallthrough;
case CFG80211_SIGNAL_TYPE_UNSPEC:
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) {
wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
wstats.qual.qual = sinfo.signal;
break;
}
- /* fall through */
+ fallthrough;
default:
wstats.qual.updated |= IW_QUAL_LEVEL_INVALID;
wstats.qual.updated |= IW_QUAL_QUAL_INVALID;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
- /*fall through */
+ fallthrough;
case X25_FAC_THROUGHPUT:
facilities->throughput = p[1];
*vc_fac_mask |= X25_MASK_THROUGHPUT;
case X25_RESET_REQUEST:
x25_write_internal(sk, X25_RESET_CONFIRMATION);
- /* fall through */
+ fallthrough;
case X25_RESET_CONFIRMATION: {
x25_stop_timer(sk);
x25->condition = 0x00;
switch (nexthdr) {
case NEXTHDR_FRAGMENT:
onlyproto = 1;
- /* fall through */
+ fallthrough;
case NEXTHDR_ROUTING:
case NEXTHDR_HOP:
case NEXTHDR_DEST:
"Option -%c requires an argument.\n\n",
optopt);
case 'h':
- // fallthrough
+ fallthrough;
default:
Usage();
return 0;
KBUILD_CFLAGS += -Wshadow
KBUILD_CFLAGS += $(call cc-option, -Wlogical-op)
KBUILD_CFLAGS += -Wmissing-field-initializers
-KBUILD_CFLAGS += -Wsign-compare
KBUILD_CFLAGS += -Wtype-limits
KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
KBUILD_CFLAGS += -Wpadded
KBUILD_CFLAGS += -Wpointer-arith
KBUILD_CFLAGS += -Wredundant-decls
+KBUILD_CFLAGS += -Wsign-compare
KBUILD_CFLAGS += -Wswitch-default
KBUILD_CFLAGS += $(call cc-option, -Wpacked-bitfield-compat)
# Check if the commit log has what seems like a diff which can confuse patch
if ($in_commit_log && !$commit_log_has_diff &&
- (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
+ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
+ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
$line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
$line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
ERROR("DIFF_IN_COMMIT_MSG",
static const char *key_pass;
static BIO *wb;
static char *cert_dst;
-int kbuild_verbose;
+static int kbuild_verbose;
static void write_cert(X509 *x509)
{
{ "__int128_t", BUILTIN_INT_KEYW },
{ "__uint128_t", BUILTIN_INT_KEYW },
- // According to rth, c99 defines "_Bool", __restrict", __restrict__", "restrict". KAO
+ // According to rth, c99 defines "_Bool", "__restrict", "__restrict__", "restrict". KAO
{ "_Bool", BOOL_KEYW },
- { "_restrict", RESTRICT_KEYW },
+ { "__restrict", RESTRICT_KEYW },
{ "__restrict__", RESTRICT_KEYW },
{ "restrict", RESTRICT_KEYW },
{ "asm", ASM_KEYW },
switch (ptype) {
case P_MENU:
child_count++;
- prompt = prompt;
if (single_menu_mode) {
item_make(menu, 'm',
"%s%*c%s",
connect(action, SIGNAL(toggled(bool)),
parent(), SLOT(setShowName(bool)));
connect(parent(), SIGNAL(showNameChanged(bool)),
- action, SLOT(setOn(bool)));
+ action, SLOT(setChecked(bool)));
action->setChecked(showName);
headerPopup->addAction(action);
connect(action, SIGNAL(toggled(bool)),
parent(), SLOT(setShowRange(bool)));
connect(parent(), SIGNAL(showRangeChanged(bool)),
- action, SLOT(setOn(bool)));
+ action, SLOT(setChecked(bool)));
action->setChecked(showRange);
headerPopup->addAction(action);
connect(action, SIGNAL(toggled(bool)),
parent(), SLOT(setShowData(bool)));
connect(parent(), SIGNAL(showDataChanged(bool)),
- action, SLOT(setOn(bool)));
+ action, SLOT(setChecked(bool)));
action->setChecked(showData);
headerPopup->addAction(action);
}
configSettings->endGroup();
connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
}
+
+ contextMenu = createStandardContextMenu();
+ QAction *action = new QAction("Show Debug Info", contextMenu);
+
+ action->setCheckable(true);
+ connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
+ connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
+ action->setChecked(showDebug());
+ contextMenu->addSeparator();
+ contextMenu->addAction(action);
}
void ConfigInfoView::saveSettings(void)
void ConfigInfoView::menuInfo(void)
{
struct symbol* sym;
- QString head, debug, help;
+ QString info;
+ QTextStream stream(&info);
sym = _menu->sym;
if (sym) {
if (_menu->prompt) {
- head += "<big><b>";
- head += print_filter(_menu->prompt->text);
- head += "</b></big>";
+ stream << "<big><b>";
+ stream << print_filter(_menu->prompt->text);
+ stream << "</b></big>";
if (sym->name) {
- head += " (";
+ stream << " (";
if (showDebug())
- head += QString().sprintf("<a href=\"s%s\">", sym->name);
- head += print_filter(sym->name);
+ stream << "<a href=\"s" << sym->name << "\">";
+ stream << print_filter(sym->name);
if (showDebug())
- head += "</a>";
- head += ")";
+ stream << "</a>";
+ stream << ")";
}
} else if (sym->name) {
- head += "<big><b>";
+ stream << "<big><b>";
if (showDebug())
- head += QString().sprintf("<a href=\"s%s\">", sym->name);
- head += print_filter(sym->name);
+ stream << "<a href=\"s" << sym->name << "\">";
+ stream << print_filter(sym->name);
if (showDebug())
- head += "</a>";
- head += "</b></big>";
+ stream << "</a>";
+ stream << "</b></big>";
}
- head += "<br><br>";
+ stream << "<br><br>";
if (showDebug())
- debug = debug_info(sym);
+ stream << debug_info(sym);
- struct gstr help_gstr = str_new();
- menu_get_ext_help(_menu, &help_gstr);
- help = print_filter(str_get(&help_gstr));
- str_free(&help_gstr);
} else if (_menu->prompt) {
- head += "<big><b>";
- head += print_filter(_menu->prompt->text);
- head += "</b></big><br><br>";
+ stream << "<big><b>";
+ stream << print_filter(_menu->prompt->text);
+ stream << "</b></big><br><br>";
if (showDebug()) {
if (_menu->prompt->visible.expr) {
- debug += " dep: ";
- expr_print(_menu->prompt->visible.expr, expr_print_help, &debug, E_NONE);
- debug += "<br><br>";
+ stream << " dep: ";
+ expr_print(_menu->prompt->visible.expr,
+ expr_print_help, &stream, E_NONE);
+ stream << "<br><br>";
}
}
}
if (showDebug())
- debug += QString().sprintf("defined at %s:%d<br><br>", _menu->file->name, _menu->lineno);
+ stream << "defined at " << _menu->file->name << ":"
+ << _menu->lineno << "<br><br>";
- setText(head + debug + help);
+ setText(info);
}
QString ConfigInfoView::debug_info(struct symbol *sym)
{
QString debug;
+ QTextStream stream(&debug);
- debug += "type: ";
- debug += print_filter(sym_type_name(sym->type));
+ stream << "type: ";
+ stream << print_filter(sym_type_name(sym->type));
if (sym_is_choice(sym))
- debug += " (choice)";
+ stream << " (choice)";
debug += "<br>";
if (sym->rev_dep.expr) {
- debug += "reverse dep: ";
- expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
+ stream << "reverse dep: ";
+ expr_print(sym->rev_dep.expr, expr_print_help, &stream, E_NONE);
+ stream << "<br>";
}
for (struct property *prop = sym->prop; prop; prop = prop->next) {
switch (prop->type) {
case P_PROMPT:
case P_MENU:
- debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name);
- debug += print_filter(prop->text);
- debug += "</a><br>";
+ stream << "prompt: <a href=\"m" << sym->name << "\">";
+ stream << print_filter(prop->text);
+ stream << "</a><br>";
break;
case P_DEFAULT:
case P_SELECT:
case P_COMMENT:
case P_IMPLY:
case P_SYMBOL:
- debug += prop_get_type_name(prop->type);
- debug += ": ";
- expr_print(prop->expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
+ stream << prop_get_type_name(prop->type);
+ stream << ": ";
+ expr_print(prop->expr, expr_print_help,
+ &stream, E_NONE);
+ stream << "<br>";
break;
case P_CHOICE:
if (sym_is_choice(sym)) {
- debug += "choice: ";
- expr_print(prop->expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
+ stream << "choice: ";
+ expr_print(prop->expr, expr_print_help,
+ &stream, E_NONE);
+ stream << "<br>";
}
break;
default:
- debug += "unknown property: ";
- debug += prop_get_type_name(prop->type);
- debug += "<br>";
+ stream << "unknown property: ";
+ stream << prop_get_type_name(prop->type);
+ stream << "<br>";
}
if (prop->visible.expr) {
- debug += " dep: ";
- expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE);
- debug += "<br>";
+ stream << " dep: ";
+ expr_print(prop->visible.expr, expr_print_help,
+ &stream, E_NONE);
+ stream << "<br>";
}
}
- debug += "<br>";
+ stream << "<br>";
return debug;
}
void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char *str)
{
- QString* text = reinterpret_cast<QString*>(data);
- QString str2 = print_filter(str);
+ QTextStream *stream = reinterpret_cast<QTextStream *>(data);
if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
- *text += QString().sprintf("<a href=\"s%s\">", sym->name);
- *text += str2;
- *text += "</a>";
- } else
- *text += str2;
+ *stream << "<a href=\"s" << sym->name << "\">";
+ *stream << print_filter(str);
+ *stream << "</a>";
+ } else {
+ *stream << print_filter(str);
+ }
}
void ConfigInfoView::clicked(const QUrl &url)
struct menu *m = NULL;
if (count < 1) {
- qInfo() << "Clicked link is empty";
delete[] data;
return;
}
strcat(data, "$");
result = sym_re_search(data);
if (!result) {
- qInfo() << "Clicked symbol is invalid:" << data;
delete[] data;
return;
}
delete data;
}
-QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
-{
- QMenu* popup = Parent::createStandardContextMenu(pos);
- QAction* action = new QAction("Show Debug Info", popup);
-
- action->setCheckable(true);
- connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
- connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
- action->setChecked(showDebug());
- popup->addSeparator();
- popup->addAction(action);
- return popup;
-}
-
-void ConfigInfoView::contextMenuEvent(QContextMenuEvent *e)
+void ConfigInfoView::contextMenuEvent(QContextMenuEvent *event)
{
- Parent::contextMenuEvent(e);
+ contextMenu->popup(event->globalPos());
+ event->accept();
}
ConfigSearchWindow::ConfigSearchWindow(ConfigMainWindow *parent)
};
enum colIdx {
- promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx, colNr
+ promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx
};
enum listMode {
singleMode, menuMode, symbolMode, fullMode, listMode
class ConfigInfoView : public QTextBrowser {
Q_OBJECT
typedef class QTextBrowser Parent;
+ QMenu *contextMenu;
public:
ConfigInfoView(QWidget* parent, const char *name = 0);
bool showDebug(void) const { return _showDebug; }
QString debug_info(struct symbol *sym);
static QString print_filter(const QString &str);
static void expr_print_help(void *data, struct symbol *sym, const char *str);
- QMenu *createStandardContextMenu(const QPoint & pos);
- void contextMenuEvent(QContextMenuEvent *e);
+ void contextMenuEvent(QContextMenuEvent *event);
struct symbol *sym;
struct menu *_menu;
}
my %setconfigs;
-my @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
+my @preserved_kconfigs;
+if (defined($ENV{'LMC_KEEP'})) {
+ @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
+}
sub in_preserved_kconfigs {
my $kconfig = $config2kfile{$_[0]};
stack = NULL;
break;
}
- /* fall through - to X_NAME */
+ fallthrough; /* to X_NAME */
case AA_X_NAME:
if (xindex & AA_X_CHILD)
/* released by caller */
switch (AUDIT_MODE(profile)) {
case AUDIT_ALL:
perms->audit = ALL_PERMS_MASK;
- /* fall through */
+ fallthrough;
case AUDIT_NOQUIET:
perms->quiet = 0;
break;
case AUDIT_QUIET:
perms->audit = 0;
- /* fall through */
+ fallthrough;
case AUDIT_QUIET_DENIED:
perms->quiet = ALL_PERMS_MASK;
break;
case IMA_XATTR_DIGEST_NG:
/* first byte contains algorithm id */
hash_start = 1;
- /* fall through */
+ fallthrough;
case IMA_XATTR_DIGEST:
if (iint->flags & IMA_DIGSIG_REQUIRED) {
*cause = "IMA-signature-required";
/* It's fine not to have xattrs when using a modsig. */
if (try_modsig)
break;
- /* fall through */
+ fallthrough;
case INTEGRITY_NOLABEL: /* No security.evm xattr. */
cause = "missing-HMAC";
goto out;
case Opt_uid_gt:
case Opt_euid_gt:
entry->uid_op = &uid_gt;
- /* fall through */
+ fallthrough;
case Opt_uid_lt:
case Opt_euid_lt:
if ((token == Opt_uid_lt) || (token == Opt_euid_lt))
entry->uid_op = &uid_lt;
- /* fall through */
+ fallthrough;
case Opt_uid_eq:
case Opt_euid_eq:
uid_token = (token == Opt_uid_eq) ||
break;
case Opt_fowner_gt:
entry->fowner_op = &uid_gt;
- /* fall through */
+ fallthrough;
case Opt_fowner_lt:
if (token == Opt_fowner_lt)
entry->fowner_op = &uid_lt;
- /* fall through */
+ fallthrough;
case Opt_fowner_eq:
ima_log_string_op(ab, "fowner", args[0].from,
entry->fowner_op);
/* skip ':' and '\0' */
buf_ptr += 2;
buflen -= buf_ptr - field_data->data;
- /* fall through */
+ fallthrough;
case DATA_FMT_DIGEST:
case DATA_FMT_HEX:
if (!buflen)
case -EAGAIN: /* no key */
if (ret)
break;
- /* fall through */
+ fallthrough;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
case -EAGAIN: /* no key */
if (ret)
break;
- /* fall through */
+ fallthrough;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
case -EAGAIN: /* no key */
if (ret)
break;
- /* fall through */
+ fallthrough;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
}
}
- /* fall through */
+ fallthrough;
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
- /* fall through */
+ fallthrough;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
- /* fall through */
+ fallthrough;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
dest_keyring = key_get(cred->session_keyring);
if (dest_keyring)
break;
- /* fall through */
+ fallthrough;
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
ret = look_up_user_keyrings(NULL, &dest_keyring);
if (ret < 0)
switch (cmd) {
case FIONREAD:
- /* fall through */
case FIBMAP:
- /* fall through */
case FIGETBSZ:
- /* fall through */
case FS_IOC_GETFLAGS:
- /* fall through */
case FS_IOC_GETVERSION:
error = file_has_perm(cred, file, FILE__GETATTR);
break;
case FS_IOC_SETFLAGS:
- /* fall through */
case FS_IOC_SETVERSION:
error = file_has_perm(cred, file, FILE__SETATTR);
break;
/* sys_ioctl() checks */
case FIONBIO:
- /* fall through */
case FIOASYNC:
error = file_has_perm(cred, file, 0);
break;
err = file_has_perm(cred, file, FILE__WRITE);
break;
}
- /* fall through */
+ fallthrough;
case F_SETOWN:
case F_SETSIG:
case F_GETFL:
scontext, tcontext);
}
- /* Fallthrough */
+ fallthrough;
case AVTAB_CHANGE:
if ((tclass == p->process_class) || sock)
/* Use the process MLS attributes. */
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
-
- /* fall through */
}
return -EINVAL;
}
* to set mount options simulate setting the
* superblock default.
*/
- /* Fall through */
+ fallthrough;
default:
/*
* This isn't an understood special case.
tomoyo_set_space(head);
tomoyo_set_string(head, cond->transit->name);
}
- /* fall through */
+ fallthrough;
case 1:
{
const u16 condc = cond->condc;
}
}
head->r.cond_step++;
- /* fall through */
+ fallthrough;
case 2:
if (!tomoyo_flush(head))
break;
head->r.cond_step++;
- /* fall through */
+ fallthrough;
case 3:
if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
tomoyo_io_printf(head, " grant_log=%s",
tomoyo_set_string(head, tomoyo_dif[i]);
head->r.index = 0;
head->r.step++;
- /* fall through */
+ fallthrough;
case 1:
while (head->r.index < TOMOYO_MAX_ACL_GROUPS) {
i = head->r.index++;
head->r.index = 0;
head->r.step++;
tomoyo_set_lf(head);
- /* fall through */
+ fallthrough;
case 2:
if (!tomoyo_read_domain2(head, &domain->acl_info_list))
return;
head->r.step++;
if (!tomoyo_set_lf(head))
return;
- /* fall through */
+ fallthrough;
case 3:
head->r.step = 0;
if (head->r.print_this_domain_only)
/* Check max_learning_entry parameter. */
if (tomoyo_domain_quota_is_ok(r))
break;
- /* fall through */
+ fallthrough;
default:
return 0;
}
case TOMOYO_DOMAINPOLICY:
if (tomoyo_select_domain(head, cp0))
continue;
- /* fall through */
+ fallthrough;
case TOMOYO_EXCEPTIONPOLICY:
if (!strcmp(cp0, "select transition_only")) {
head->r.print_transition_related_only = true;
continue;
}
- /* fall through */
+ fallthrough;
default:
if (!tomoyo_manager()) {
error = -EPERM;
case TOMOYO_TYPE_LINK:
if (!d_is_dir(path1->dentry))
break;
- /* fall through */
+ fallthrough;
case TOMOYO_TYPE_PIVOT_ROOT:
tomoyo_add_slash(&buf1);
tomoyo_add_slash(&buf2);
snd_BUG();
return -EINVAL;
}
- if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
- return -ENXIO;
+ if (!snd_pcm_format_linear(format->format))
+ return -EINVAL;
err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
src_format, dst_format,
* timer tasklet
*
*/
-static void snd_timer_tasklet(unsigned long arg)
+static void snd_timer_tasklet(struct tasklet_struct *t)
{
- struct snd_timer *timer = (struct snd_timer *) arg;
+ struct snd_timer *timer = from_tasklet(timer, t, task_queue);
unsigned long flags;
if (timer->card && timer->card->shutdown) {
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
- tasklet_init(&timer->task_queue, snd_timer_tasklet,
- (unsigned long)timer);
+ tasklet_setup(&timer->task_queue, snd_timer_tasklet);
timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
#define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
-static void pcm_period_tasklet(unsigned long data);
+static void pcm_period_tasklet(struct tasklet_struct *t);
/**
* amdtp_stream_init - initialize an AMDTP stream structure
s->flags = flags;
s->context = ERR_PTR(-1);
mutex_init(&s->mutex);
- tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
+ tasklet_setup(&s->period_tasklet, pcm_period_tasklet);
s->packet_index = 0;
init_waitqueue_head(&s->callback_wait);
}
}
-static void pcm_period_tasklet(unsigned long data)
+static void pcm_period_tasklet(struct tasklet_struct *t)
{
- struct amdtp_stream *s = (void *)data;
+ struct amdtp_stream *s = from_tasklet(s, t, period_tasklet);
struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
if (pcm)
#define VENDOR_DIGIDESIGN 0x00a07e
#define MODEL_CONSOLE 0x000001
#define MODEL_RACK 0x000002
+#define SPEC_VERSION 0x000001
static int name_card(struct snd_dg00x *dg00x)
{
/* Both of 002/003 use the same ID. */
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
+ .version = SPEC_VERSION,
.model_id = MODEL_CONSOLE,
},
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
+ .version = SPEC_VERSION,
.model_id = MODEL_RACK,
},
{}
.midi_capture_ports = 2,
.midi_playback_ports = 4,
},
- // This kernel module doesn't support FE-8 because the most of features
- // can be implemented in userspace without any specific support of this
- // module.
};
static int identify_model(struct snd_tscm *tscm)
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
+ // Tascam, FW-1884.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = 0x00022e,
+ .specifier_id = 0x00022e,
+ .version = 0x800000,
+ },
+ // Tascam, FE-8 (.version = 0x800001)
+ // This kernel module doesn't support FE-8 because the most of features
+ // can be implemented in userspace without any specific support of this
+ // module.
+ //
+ // .version = 0x800002 is unknown.
+ //
+ // Tascam, FW-1082.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = 0x00022e,
+ .specifier_id = 0x00022e,
+ .version = 0x800003,
+ },
+ // Tascam, FW-1804.
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_SPECIFIER_ID,
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
.vendor_id = 0x00022e,
.specifier_id = 0x00022e,
+ .version = 0x800004,
},
{}
};
INIT_LIST_HEAD(&bus->hlink_list);
init_waitqueue_head(&bus->rirb_wq);
bus->irq = -1;
+
+ /*
+ * Default value of '8' is as per the HD audio specification (Rev 1.0a).
+ * Following relation is used to derive STRIPE control value.
+ * For sample rate <= 48K:
+ * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
+ * For sample rate > 48K:
+ * { ((num_channels * bits_per_sample * rate/48000) /
+ * number of SDOs) >= 8 }
+ */
+ bus->sdo_limit = 8;
+
return 0;
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_init);
bus->chip_init = true;
- /*
- * Default value of '8' is as per the HD audio specification (Rev 1.0a).
- * Following relation is used to derive STRIPE control value.
- * For sample rate <= 48K:
- * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
- * For sample rate > 48K:
- * { ((num_channels * bits_per_sample * rate/48000) /
- * number of SDOs) >= 8 }
- */
- bus->sdo_limit = 8;
-
return true;
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_init_chip);
void snd_hdac_device_exit(struct hdac_device *codec)
{
pm_runtime_put_noidle(&codec->dev);
+ /* keep balance of runtime PM child_count in parent device */
+ pm_runtime_set_suspended(&codec->dev);
snd_hdac_bus_remove_device(codec->bus, codec);
kfree(codec->vendor_name);
kfree(codec->chip_name);
#endif
/*
* Apollolake (Broxton-P)
- * the legacy HDaudio driver is used except on Up Squared (SOF) and
+ * the legacy HDAudio driver is used except on Up Squared (SOF) and
* Chromebooks (SST)
*/
#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
},
#endif
/*
- * Skylake and Kabylake use legacy HDaudio driver except for Google
+ * Skylake and Kabylake use legacy HDAudio driver except for Google
* Chromebooks (SST)
*/
#endif
/*
- * Geminilake uses legacy HDaudio driver except for Google
+ * Geminilake uses legacy HDAudio driver except for Google
* Chromebooks
*/
/* Geminilake */
/*
* CoffeeLake, CannonLake, CometLake, IceLake, TigerLake use legacy
- * HDaudio driver except for Google Chromebooks and when DMICs are
+ * HDAudio driver except for Google Chromebooks and when DMICs are
* present. Two cases are required since Coreboot does not expose NHLT
* tables.
*
if (pci->class == 0x040300)
return SND_INTEL_DSP_DRIVER_LEGACY;
if (pci->class != 0x040100 && pci->class != 0x040380) {
- dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDA legacy driver\n", pci->class);
+ dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDAudio legacy driver\n", pci->class);
return SND_INTEL_DSP_DRIVER_LEGACY;
}
}
/*
- * This is apparently the standard way to initailise an MPU-401
+ * This is apparently the standard way to initialise an MPU-401
*/
static inline void initialise_mpu401(const struct snd_mpu401 *mpu)
{
}
/*
- * Tell the SoundScape to begin a DMA tranfer using the given channel.
+ * Tell the SoundScape to begin a DMA transfer using the given channel.
* All locking issues are left to the caller.
*/
static void sscape_start_dma_unsafe(unsigned io_base, enum GA_REG reg)
}
/*
- * Initialse an MPU-401 subdevice for MIDI support on the SoundScape.
+ * Initialise an MPU-401 subdevice for MIDI support on the SoundScape.
*/
static int create_mpu401(struct snd_card *card, int devnum,
unsigned long port, int irq)
add_timer(&dpcm->timer);
}
-static void snd_card_asihpi_int_task(unsigned long data)
+static void snd_card_asihpi_int_task(struct tasklet_struct *t)
{
- struct hpi_adapter *a = (struct hpi_adapter *)data;
- struct snd_card_asihpi *asihpi;
+ struct snd_card_asihpi *asihpi = from_tasklet(asihpi, t, t);
+ struct hpi_adapter *a = asihpi->hpi;
WARN_ON(!a || !a->snd_card || !a->snd_card->private_data);
asihpi = (struct snd_card_asihpi *)a->snd_card->private_data;
if (hpi->interrupt_mode) {
asihpi->pcm_start = snd_card_asihpi_pcm_int_start;
asihpi->pcm_stop = snd_card_asihpi_pcm_int_stop;
- tasklet_init(&asihpi->t, snd_card_asihpi_int_task,
- (unsigned long)hpi);
+ tasklet_setup(&asihpi->t, snd_card_asihpi_int_task);
hpi->interrupt_callback = snd_card_asihpi_isr;
} else {
asihpi->pcm_start = snd_card_asihpi_pcm_timer_start;
else
/* Power down */
chip->spi_dac_reg[reg] |= bit;
- return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
+ if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
+ return -ENXIO;
}
return 0;
}
*/
if (dmic_detect) {
err = snd_intel_dsp_driver_probe(pci);
- if (err != SND_INTEL_DSP_DRIVER_ANY &&
- err != SND_INTEL_DSP_DRIVER_LEGACY)
+ if (err != SND_INTEL_DSP_DRIVER_ANY && err != SND_INTEL_DSP_DRIVER_LEGACY) {
+ dev_dbg(&pci->dev, "HDAudio driver not selected, aborting probe\n");
return -ENODEV;
+ }
} else {
dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n");
}
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
/* Zhaoxin */
{ PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
- /* Loongson */
- { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, azx_ids);
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
if (chip && chip->running) {
+ /* enable controller wake up event */
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
+ STATESTS_INT_MASK);
+
azx_stop_chip(chip);
azx_enter_link_reset(chip);
}
if (chip && chip->running) {
hda_tegra_init(hda);
azx_init_chip(chip, 1);
+ /* disable controller wake up event*/
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+ ~STATESTS_INT_MASK);
}
return 0;
hda_nid_t cvt_nid)
{
if (per_pin) {
+ haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid);
snd_hda_set_dev_select(codec, per_pin->pin_nid,
per_pin->dev_id);
intel_verify_pin_cvt_connect(codec, per_pin);
static int patch_tegra_hdmi(struct hda_codec *codec)
{
+ struct hdmi_spec *spec;
int err;
err = patch_generic_hdmi(codec);
return err;
codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
+ spec = codec->spec;
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
return 0;
}
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
}
}
+/* Quirk for Thinkpad X1 7th and 8th Gen
+ * The following fixed routing needed
+ * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly
+ * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC
+ * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp
+ */
+static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0
+ };
+ struct alc_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ spec->gen.preferred_dacs = preferred_pairs;
+ break;
+ case HDA_FIXUP_ACT_BUILD:
+ /* The generic parser creates somewhat unintuitive volume ctls
+ * with the fixed routing above, and the shared DAC2 may be
+ * confusing for PA.
+ * Rename those to unique names so that PA doesn't touch them
+ * and use only Master volume.
+ */
+ rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume");
+ rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume");
+ break;
+ }
+}
+
static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
ALC289_FIXUP_DUAL_SPK,
ALC294_FIXUP_SPK2_TO_DAC1,
ALC294_FIXUP_ASUS_DUAL_SPK,
+ ALC285_FIXUP_THINKPAD_X1_GEN7,
ALC285_FIXUP_THINKPAD_HEADSET_JACK,
ALC294_FIXUP_ASUS_HPE,
ALC294_FIXUP_ASUS_COEF_1B,
.chained = true,
.chain_id = ALC294_FIXUP_SPK2_TO_DAC1
},
+ [ALC285_FIXUP_THINKPAD_X1_GEN7] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_thinkpad_x1_gen7,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ },
[ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_jack,
.chained = true,
- .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
+ .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7
},
[ALC294_FIXUP_ASUS_HPE] = {
.type = HDA_FIXUP_VERBS,
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
{.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+ {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
{}
};
#define ALC225_STANDARD_PINS \
return 0;
}
-static void riptide_handleirq(unsigned long dev_id)
+static void riptide_handleirq(struct tasklet_struct *t)
{
- struct snd_riptide *chip = (void *)dev_id;
+ struct snd_riptide *chip = from_tasklet(chip, t, riptide_tq);
struct cmdif *cif = chip->cif;
struct snd_pcm_substream *substream[PLAYBACK_SUBSTREAMS + 1];
struct snd_pcm_runtime *runtime;
chip->received_irqs = 0;
chip->handled_irqs = 0;
chip->cif = NULL;
- tasklet_init(&chip->riptide_tq, riptide_handleirq, (unsigned long)chip);
+ tasklet_setup(&chip->riptide_tq, riptide_handleirq);
if ((chip->res_port =
request_region(chip->port, 64, "RIPTIDE")) == NULL) {
return 0;
}
-static void hdsp_midi_tasklet(unsigned long arg)
+static void hdsp_midi_tasklet(struct tasklet_struct *t)
{
- struct hdsp *hdsp = (struct hdsp *)arg;
+ struct hdsp *hdsp = from_tasklet(hdsp, t, midi_tasklet);
if (hdsp->midi[0].pending)
snd_hdsp_midi_input_read (&hdsp->midi[0]);
spin_lock_init(&hdsp->lock);
- tasklet_init(&hdsp->midi_tasklet, hdsp_midi_tasklet, (unsigned long)hdsp);
+ tasklet_setup(&hdsp->midi_tasklet, hdsp_midi_tasklet);
pci_read_config_word(hdsp->pci, PCI_CLASS_REVISION, &hdsp->firmware_rev);
hdsp->firmware_rev &= 0xff;
}
-static void hdspm_midi_tasklet(unsigned long arg)
+static void hdspm_midi_tasklet(struct tasklet_struct *t)
{
- struct hdspm *hdspm = (struct hdspm *)arg;
+ struct hdspm *hdspm = from_tasklet(hdspm, t, midi_tasklet);
int i = 0;
while (i < hdspm->midiPorts) {
}
- tasklet_init(&hdspm->midi_tasklet,
- hdspm_midi_tasklet, (unsigned long) hdspm);
+ tasklet_setup(&hdspm->midi_tasklet, hdspm_midi_tasklet);
if (hdspm->io_type != MADIface) {
switch (filltype) {
case SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL:
silent = 1;
- /* intentionally fall thru */
+ fallthrough;
case SND_PS3_DMA_FILLTYPE_FIRSTFILL:
ch0_kick_event = PS3_AUDIO_KICK_EVENT_ALWAYS;
break;
case SND_PS3_DMA_FILLTYPE_SILENT_RUNNING:
silent = 1;
- /* intentionally fall thru */
+ fallthrough;
case SND_PS3_DMA_FILLTYPE_RUNNING:
ch0_kick_event = PS3_AUDIO_KICK_EVENT_SERIALOUT0_EMPTY;
break;
srate = params_rate(params);
for_each_rtd_codec_dais(rtd, i, codec_dai) {
- if (strcmp(codec_dai->component->name, "rt1015-aif"))
+ if (strcmp(codec_dai->name, "rt1015-aif"))
continue;
ret = snd_soc_dai_set_bclk_ratio(codec_dai, 64);
if (ret < 0)
return 0;
}
-static int acp_pdm_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
+static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
{
struct pdm_stream_instance *rtd;
+ int ret;
+ bool pdm_status;
unsigned int ch_mask;
rtd = substream->runtime->private_data;
- switch (params_channels(params)) {
+ ret = 0;
+ switch (substream->runtime->channels) {
case TWO_CH:
ch_mask = 0x00;
break;
default:
return -EINVAL;
}
- rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
- rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
- ACP_WOV_PDM_DECIMATION_FACTOR);
- return 0;
-}
-
-static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
- int cmd, struct snd_soc_dai *dai)
-{
- struct pdm_stream_instance *rtd;
- int ret;
- bool pdm_status;
-
- rtd = substream->runtime->private_data;
- ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
+ rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
+ ACP_WOV_PDM_DECIMATION_FACTOR);
rtd->bytescount = acp_pdm_get_byte_count(rtd,
substream->stream);
pdm_status = check_pdm_dma_status(rtd->acp_base);
}
static struct snd_soc_dai_ops acp_pdm_dai_ops = {
- .hw_params = acp_pdm_dai_hw_params,
.trigger = acp_pdm_dai_trigger,
};
/* cpu is BCLK master */
mrb |= MCHP_I2SMCC_MRB_CLKSEL_INT;
set_divs = 1;
- /* fall through */
+ fallthrough;
case SND_SOC_DAIFMT_CBM_CFM:
/* cpu is slave */
mra |= MCHP_I2SMCC_MRA_MODE_SLAVE;
REG_CR_VIC_SB_SLEEP, REG_CR_VIC_SB_SLEEP);
regmap_update_bits(regmap, JZ4770_CODEC_REG_CR_VIC,
REG_CR_VIC_SB, REG_CR_VIC_SB);
- /* fall-through */
+ fallthrough;
default:
break;
}
#define CDC_D_REVISION1 (0xf000)
#define CDC_D_PERPH_SUBTYPE (0xf005)
-#define CDC_D_INT_EN_SET (0x015)
-#define CDC_D_INT_EN_CLR (0x016)
+#define CDC_D_INT_EN_SET (0xf015)
+#define CDC_D_INT_EN_CLR (0xf016)
#define MBHC_SWITCH_INT BIT(7)
#define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
#define MBHC_BUTTON_PRESS_DET BIT(5)
break;
case SND_SOC_DAIFMT_DSP_A:
priv->tdm_offset += 1;
- /* fall through */
+ fallthrough;
/* DSP_A uses the same basic config as DSP_B
* except we need to shift the TDM output by one BCK cycle
*/
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct wm8994 *control = dev_get_drvdata(component->dev->parent);
int i;
+ if (control->type != WM8958)
+ return 0;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
case SND_SOC_DAPM_PRE_PMU:
{ 40, 0x0000 }, /* R40 - SPKOUTL volume */
{ 41, 0x0000 }, /* R41 - SPKOUTR volume */
- { 48, 0x0000 }, /* R48 - Additional control(4) */
{ 49, 0x0010 }, /* R49 - Class D Control 1 */
{ 51, 0x0003 }, /* R51 - Class D Control 2 */
case WM8962_SPKOUTL_VOLUME:
case WM8962_SPKOUTR_VOLUME:
case WM8962_THERMAL_SHUTDOWN_STATUS:
+ case WM8962_ADDITIONAL_CONTROL_4:
case WM8962_CLASS_D_CONTROL_1:
case WM8962_CLASS_D_CONTROL_2:
case WM8962_CLOCKING_4:
#define WM8994_NUM_DRC 3
#define WM8994_NUM_EQ 3
-static struct {
+struct wm8994_reg_mask {
unsigned int reg;
unsigned int mask;
-} wm8994_vu_bits[] = {
+};
+
+static struct wm8994_reg_mask wm8994_vu_bits[] = {
{ WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
{ WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
{ WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
{ WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
{ WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
- { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
- { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
{ WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
{ WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
{ WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
{ WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
- { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
- { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
{ WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
{ WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
{ WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
{ WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
};
+/* VU bitfields for ADC2, DAC2 not available on WM1811 */
+static struct wm8994_reg_mask wm8994_adc2_dac2_vu_bits[] = {
+ { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
+ { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
+ { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
+ { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+};
+
static int wm8994_drc_base[] = {
WM8994_AIF1_DRC1_1,
WM8994_AIF1_DRC2_1,
return true;
}
+static void wm8994_update_vu_bits(struct snd_soc_component *component)
+{
+ struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
+ struct wm8994 *control = wm8994->wm8994;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+ snd_soc_component_write(component, wm8994_vu_bits[i].reg,
+ snd_soc_component_read(component,
+ wm8994_vu_bits[i].reg));
+ if (control->type == WM1811)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(wm8994_adc2_dac2_vu_bits); i++)
+ snd_soc_component_write(component,
+ wm8994_adc2_dac2_vu_bits[i].reg,
+ snd_soc_component_read(component,
+ wm8994_adc2_dac2_vu_bits[i].reg));
+}
+
static int aif_mclk_set(struct snd_soc_component *component, int aif, bool enable)
{
struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
struct wm8994 *control = wm8994->wm8994;
int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
- int ret, i;
+ int ret;
int dac;
int adc;
int val;
break;
case SND_SOC_DAPM_POST_PMU:
- for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
- snd_soc_component_write(component, wm8994_vu_bits[i].reg,
- snd_soc_component_read(component,
- wm8994_vu_bits[i].reg));
+ wm8994_update_vu_bits(component);
break;
case SND_SOC_DAPM_PRE_PMD:
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
- int ret, i;
+ int ret;
int dac;
int adc;
int val;
break;
case SND_SOC_DAPM_POST_PMU:
- for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
- snd_soc_component_write(component, wm8994_vu_bits[i].reg,
- snd_soc_component_read(component,
- wm8994_vu_bits[i].reg));
+ wm8994_update_vu_bits(component);
break;
case SND_SOC_DAPM_PRE_PMD:
wm8994_vu_bits[i].mask,
wm8994_vu_bits[i].mask);
+ if (control->type != WM1811) {
+ for (i = 0; i < ARRAY_SIZE(wm8994_adc2_dac2_vu_bits); i++)
+ snd_soc_component_update_bits(component,
+ wm8994_adc2_dac2_vu_bits[i].reg,
+ wm8994_adc2_dac2_vu_bits[i].mask,
+ wm8994_adc2_dac2_vu_bits[i].mask);
+ }
+
/* Set the low bit of the 3D stereo depth so TLV matches */
snd_soc_component_update_bits(component, WM8994_AIF1_DAC1_FILTERS_2,
1 << WM8994_AIF1DAC1_3D_GAIN_SHIFT,
* @codec_priv: CODEC private data
* @cpu_priv: CPU private data
* @card: ASoC card structure
+ * @streams: Mask of current active streams
* @sample_rate: Current sample rate
* @sample_format: Current sample format
* @asrc_rate: ASRC sample rate used by Back-Ends
struct codec_priv codec_priv;
struct cpu_priv cpu_priv;
struct snd_soc_card card;
+ u8 streams;
u32 sample_rate;
snd_pcm_format_t sample_format;
u32 asrc_rate;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct codec_priv *codec_priv = &priv->codec_priv;
struct cpu_priv *cpu_priv = &priv->cpu_priv;
struct device *dev = rtd->card->dev;
+ unsigned int pll_out;
int ret;
priv->sample_rate = params_rate(params);
priv->sample_format = params_format(params);
+ priv->streams |= BIT(substream->stream);
- /*
- * If codec-dai is DAI Master and all configurations are already in the
- * set_bias_level(), bypass the remaining settings in hw_params().
- * Note: (dai_fmt & CBM_CFM) includes CBM_CFM and CBM_CFS.
- */
- if ((priv->card.set_bias_level &&
- priv->dai_fmt & SND_SOC_DAIFMT_CBM_CFM) ||
- fsl_asoc_card_is_ac97(priv))
+ if (fsl_asoc_card_is_ac97(priv))
return 0;
/* Specific configurations of DAIs starts from here */
cpu_priv->sysclk_dir[tx]);
if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set sysclk for cpu dai\n");
- return ret;
+ goto fail;
}
if (cpu_priv->slot_width) {
cpu_priv->slot_width);
if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set TDM slot for cpu dai\n");
+ goto fail;
+ }
+ }
+
+ /* Specific configuration for PLL */
+ if (codec_priv->pll_id && codec_priv->fll_id) {
+ if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
+ pll_out = priv->sample_rate * 384;
+ else
+ pll_out = priv->sample_rate * 256;
+
+ ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0),
+ codec_priv->pll_id,
+ codec_priv->mclk_id,
+ codec_priv->mclk_freq, pll_out);
+ if (ret) {
+ dev_err(dev, "failed to start FLL: %d\n", ret);
+ goto fail;
+ }
+
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0),
+ codec_priv->fll_id,
+ pll_out, SND_SOC_CLOCK_IN);
+
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to set SYSCLK: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ priv->streams &= ~BIT(substream->stream);
+ return ret;
+}
+
+static int fsl_asoc_card_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct codec_priv *codec_priv = &priv->codec_priv;
+ struct device *dev = rtd->card->dev;
+ int ret;
+
+ priv->streams &= ~BIT(substream->stream);
+
+ if (!priv->streams && codec_priv->pll_id && codec_priv->fll_id) {
+ /* Force freq to be 0 to avoid error message in codec */
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0),
+ codec_priv->mclk_id,
+ 0,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(dev, "failed to switch away from FLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0),
+ codec_priv->pll_id, 0, 0, 0);
+ if (ret && ret != -ENOTSUPP) {
+ dev_err(dev, "failed to stop FLL: %d\n", ret);
return ret;
}
}
static const struct snd_soc_ops fsl_asoc_card_ops = {
.hw_params = fsl_asoc_card_hw_params,
+ .hw_free = fsl_asoc_card_hw_free,
};
static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
},
};
-static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card,
- struct snd_soc_dapm_context *dapm,
- enum snd_soc_bias_level level)
-{
- struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(card);
- struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai *codec_dai;
- struct codec_priv *codec_priv = &priv->codec_priv;
- struct device *dev = card->dev;
- unsigned int pll_out;
- int ret;
-
- rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- codec_dai = asoc_rtd_to_codec(rtd, 0);
- if (dapm->dev != codec_dai->dev)
- return 0;
-
- switch (level) {
- case SND_SOC_BIAS_PREPARE:
- if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
- break;
-
- if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
- pll_out = priv->sample_rate * 384;
- else
- pll_out = priv->sample_rate * 256;
-
- ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id,
- codec_priv->mclk_id,
- codec_priv->mclk_freq, pll_out);
- if (ret) {
- dev_err(dev, "failed to start FLL: %d\n", ret);
- return ret;
- }
-
- ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->fll_id,
- pll_out, SND_SOC_CLOCK_IN);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dev, "failed to set SYSCLK: %d\n", ret);
- return ret;
- }
- break;
-
- case SND_SOC_BIAS_STANDBY:
- if (dapm->bias_level != SND_SOC_BIAS_PREPARE)
- break;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id,
- codec_priv->mclk_freq,
- SND_SOC_CLOCK_IN);
- if (ret && ret != -ENOTSUPP) {
- dev_err(dev, "failed to switch away from FLL: %d\n", ret);
- return ret;
- }
-
- ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed to stop FLL: %d\n", ret);
- return ret;
- }
- break;
-
- default:
- break;
- }
-
- return 0;
-}
-
static int fsl_asoc_card_audmux_init(struct device_node *np,
struct fsl_asoc_card_priv *priv)
{
/* Diversify the card configurations */
if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
codec_dai_name = "cs42888";
- priv->card.set_bias_level = NULL;
priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT;
priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
codec_dai_name = "wm8962";
- priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
priv->codec_priv.pll_id = WM8962_FLL;
priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) {
codec_dai_name = "wm8960-hifi";
- priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO;
priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO;
priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
} else if (of_device_is_compatible(np, "fsl,imx-audio-ac97")) {
codec_dai_name = "ac97-hifi";
- priv->card.set_bias_level = NULL;
priv->dai_fmt = SND_SOC_DAIFMT_AC97;
priv->card.dapm_routes = audio_map_ac97;
priv->card.num_dapm_routes = ARRAY_SIZE(audio_map_ac97);
} else if (of_device_is_compatible(np, "fsl,imx-audio-mqs")) {
codec_dai_name = "fsl-mqs-dai";
- priv->card.set_bias_level = NULL;
priv->dai_fmt = SND_SOC_DAIFMT_LEFT_J |
SND_SOC_DAIFMT_CBS_CFS |
SND_SOC_DAIFMT_NB_NF;
priv->card.num_dapm_routes = ARRAY_SIZE(audio_map_tx);
} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8524")) {
codec_dai_name = "wm8524-hifi";
- priv->card.set_bias_level = NULL;
priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
priv->dai_link[1].dpcm_capture = 0;
priv->dai_link[2].dpcm_capture = 0;
ESAI_xFCR_xFR, 0);
}
-static void fsl_esai_hw_reset(unsigned long arg)
+static void fsl_esai_hw_reset(struct tasklet_struct *t)
{
- struct fsl_esai *esai_priv = (struct fsl_esai *)arg;
+ struct fsl_esai *esai_priv = from_tasklet(esai_priv, t, task);
bool tx = true, rx = false, enabled[2];
unsigned long lock_flags;
u32 tfcr, rfcr;
return ret;
}
- tasklet_init(&esai_priv->task, fsl_esai_hw_reset,
- (unsigned long)esai_priv);
+ tasklet_setup(&esai_priv->task, fsl_esai_hw_reset);
pm_runtime_enable(&pdev->dev);
"missing baudclk for master mode\n");
return -EINVAL;
}
- /* fall through */
+ fallthrough;
case SND_SOC_DAIFMT_CBM_CFS:
ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER;
break;
static void psc_dma_free(struct snd_soc_component *component,
struct snd_pcm *pcm)
{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_pcm_substream *substream;
int stream;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U16_LE:
signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
- /* fall through */
+ fallthrough;
case SNDRV_PCM_FORMAT_S16_LE:
bits = HII2S_BITS_16;
break;
case SNDRV_PCM_FORMAT_U24_LE:
signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
- /* fall through */
+ fallthrough;
case SNDRV_PCM_FORMAT_S24_LE:
bits = HII2S_BITS_24;
break;
ret_val = power_up_sst(stream);
if (ret_val < 0)
- return ret_val;
+ goto out_power_up;
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
return snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
out_ops:
- kfree(stream);
mutex_unlock(&sst_lock);
+out_power_up:
+ kfree(stream);
return ret_val;
}
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
pdata->restore_stream = false;
- /* fallthrough */
+ fallthrough;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
sst_byt_stream_pause(byt, pcm_data->stream);
break;
break;
default:
dev_err(dev, "get speaker GPIO failed: %d\n", ret);
- /* fall through */
+ fallthrough;
case -EPROBE_DEFER:
return ret;
}
default:
dev_err(&pdev->dev, "Failed to get ext-amp-enable GPIO: %d\n",
ret_val);
- /* fall through */
+ fallthrough;
case -EPROBE_DEFER:
put_device(codec_dev);
return ret_val;
default:
dev_err(&pdev->dev, "Failed to get hp-detect GPIO: %d\n",
ret_val);
- /* fall through */
+ fallthrough;
case -EPROBE_DEFER:
put_device(codec_dev);
return ret_val;
stream->lpib);
snd_hdac_ext_stream_set_lpib(stream, stream->lpib);
}
- /* fall through */
+ fallthrough;
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
switch (slot_width) {
case 0:
slot_width = 32;
- /* Fall-through */
+ fallthrough;
case 32:
fmt |= SNDRV_PCM_FMTBIT_S32_LE;
- /* Fall-through */
+ fallthrough;
case 24:
fmt |= SNDRV_PCM_FMTBIT_S24_LE;
fmt |= SNDRV_PCM_FMTBIT_S20_LE;
- /* Fall-through */
+ fallthrough;
case 16:
fmt |= SNDRV_PCM_FMTBIT_S16_LE;
- /* Fall-through */
+ fallthrough;
case 8:
fmt |= SNDRV_PCM_FMTBIT_S8;
break;
case SND_SOC_DAIFMT_CBS_CFM:
case SND_SOC_DAIFMT_CBM_CFS:
dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n");
- /* Fall-through */
+ fallthrough;
default:
return -EINVAL;
}
case SND_SOC_DAIFMT_DSP_A:
sspsp |= SSPSP_FSRT;
- /* fall through */
+ fallthrough;
case SND_SOC_DAIFMT_DSP_B:
sscr0 |= SSCR0_MOD | SSCR0_PSP;
sscr1 |= SSCR1_TRAIL | SSCR1_RWOT;
}
static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
- SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
"Secondary MI2S Playback SD1",
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
- 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
- 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0),
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0),
};
static const struct snd_soc_component_driver q6afe_dai_component = {
return 0;
}
+static unsigned int q6routing_reg_read(struct snd_soc_component *component,
+ unsigned int reg)
+{
+ /* default value */
+ return 0;
+}
+
+static int q6routing_reg_write(struct snd_soc_component *component,
+ unsigned int reg, unsigned int val)
+{
+ /* dummy */
+ return 0;
+}
+
static const struct snd_soc_component_driver msm_soc_routing_component = {
.probe = msm_routing_probe,
.name = DRV_NAME,
.num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
.dapm_routes = intercon,
.num_dapm_routes = ARRAY_SIZE(intercon),
+ .read = q6routing_reg_read,
+ .write = q6routing_reg_write,
};
static int q6pcm_routing_probe(struct platform_device *pdev)
switch (params_channels(params)) {
case 8:
val |= PDM_PATH3_EN;
- /* fallthrough */
+ fallthrough;
case 6:
val |= PDM_PATH2_EN;
- /* fallthrough */
+ fallthrough;
case 4:
val |= PDM_PATH1_EN;
- /* fallthrough */
+ fallthrough;
case 2:
val |= PDM_PATH0_EN;
break;
switch (params_channels(params)) {
case 6:
val |= MOD_DC2_EN;
- /* Fall through */
+ fallthrough;
case 4:
val |= MOD_DC1_EN;
break;
return 0;
}
-static void siu_io_tasklet(unsigned long data)
+static void siu_io_tasklet(struct tasklet_struct *t)
{
- struct siu_stream *siu_stream = (struct siu_stream *)data;
+ struct siu_stream *siu_stream = from_tasklet(siu_stream, t, tasklet);
struct snd_pcm_substream *substream = siu_stream->substream;
struct device *dev = substream->pcm->card->dev;
struct snd_pcm_runtime *rt = substream->runtime;
(*port_info)->pcm = pcm;
/* IO tasklets */
- tasklet_init(&(*port_info)->playback.tasklet, siu_io_tasklet,
- (unsigned long)&(*port_info)->playback);
- tasklet_init(&(*port_info)->capture.tasklet, siu_io_tasklet,
- (unsigned long)&(*port_info)->capture);
+ tasklet_setup(&(*port_info)->playback.tasklet, siu_io_tasklet);
+ tasklet_setup(&(*port_info)->capture.tasklet, siu_io_tasklet);
}
dev_info(card->dev, "SuperH SIU driver initialized.\n");
ret = -EIO;
if (ret < 0)
- soc_component_ret(component, ret);
+ return soc_component_ret(component, ret);
return val;
}
"ASoC: idle_bias_off CODEC on over suspend\n");
break;
}
- /* fall through */
+ fallthrough;
case SND_SOC_BIAS_OFF:
snd_soc_component_suspend(component);
ec->hdr.name);
goto err_denum;
}
- /* fall through */
+ fallthrough;
case SND_SOC_TPLG_CTL_ENUM:
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
ec->hdr.name);
goto err_se;
}
- /* fall through */
+ fallthrough;
case SND_SOC_TPLG_CTL_ENUM:
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
return ret;
}
- /* fallthrough */
+ fallthrough;
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
snd_hdac_ext_link_stream_start(link_dev);
link_dev->link_prepared = 0;
- /* fallthrough */
+ fallthrough;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
snd_hdac_ext_link_stream_clear(link_dev);
break;
return ret;
}
- /* fallthrough */
+ fallthrough;
case SNDRV_PCM_TRIGGER_START:
if (spcm->stream[substream->stream].suspend_ignored) {
/*
spcm->stream[substream->stream].suspend_ignored = true;
return 0;
}
- /* fallthrough */
+ fallthrough;
case SNDRV_PCM_TRIGGER_STOP:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
ipc_first = true;
return 0;
}
-static int tegra186_dspk_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
{
struct tegra186_dspk *dspk = dev_get_drvdata(dev);
return 0;
}
-static int tegra186_dspk_runtime_resume(struct device *dev)
+static int __maybe_unused tegra186_dspk_runtime_resume(struct device *dev)
{
struct tegra186_dspk *dspk = dev_get_drvdata(dev);
int err;
.cache_type = REGCACHE_FLAT,
};
-static int tegra_admaif_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_admaif_runtime_suspend(struct device *dev)
{
struct tegra_admaif *admaif = dev_get_drvdata(dev);
return 0;
}
-static int tegra_admaif_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_admaif_runtime_resume(struct device *dev)
{
struct tegra_admaif *admaif = dev_get_drvdata(dev);
};
MODULE_DEVICE_TABLE(of, tegra_ahub_of_match);
-static int tegra_ahub_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_ahub_runtime_suspend(struct device *dev)
{
struct tegra_ahub *ahub = dev_get_drvdata(dev);
return 0;
}
-static int tegra_ahub_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_ahub_runtime_resume(struct device *dev)
{
struct tegra_ahub *ahub = dev_get_drvdata(dev);
int err;
{ TEGRA210_DMIC_LP_BIQUAD_1_COEF_4, 0x0 },
};
-static int tegra210_dmic_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra210_dmic_runtime_suspend(struct device *dev)
{
struct tegra210_dmic *dmic = dev_get_drvdata(dev);
return 0;
}
-static int tegra210_dmic_runtime_resume(struct device *dev)
+static int __maybe_unused tegra210_dmic_runtime_resume(struct device *dev)
{
struct tegra210_dmic *dmic = dev_get_drvdata(dev);
int err;
return tegra210_i2s_sw_reset(compnt, is_playback);
}
-static int tegra210_i2s_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra210_i2s_runtime_suspend(struct device *dev)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
return 0;
}
-static int tegra210_i2s_runtime_resume(struct device *dev)
+static int __maybe_unused tegra210_i2s_runtime_resume(struct device *dev)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
int err;
* rate is lowered.
*/
inv_fs = true;
- /* fall through */
+ fallthrough;
case SND_SOC_DAIFMT_DSP_A:
dev->mode = MOD_DSP_A;
break;
switch (n810_jack_func) {
case N810_JACK_HS:
line1l = 1;
- /* fall through */
+ fallthrough;
case N810_JACK_HP:
hp = 1;
break;
switch (channels) {
case 6:
dmic->ch_enabled |= OMAP_DMIC_UP3_ENABLE;
- /* fall through */
+ fallthrough;
case 4:
dmic->ch_enabled |= OMAP_DMIC_UP2_ENABLE;
- /* fall through */
+ fallthrough;
case 2:
dmic->ch_enabled |= OMAP_DMIC_UP1_ENABLE;
break;
/* up to 3 channels for capture */
return -EINVAL;
link_mask |= 1 << 4;
- /* fall through */
+ fallthrough;
case 4:
if (stream == SNDRV_PCM_STREAM_CAPTURE)
/* up to 3 channels for capture */
return -EINVAL;
link_mask |= 1 << 3;
- /* fall through */
+ fallthrough;
case 3:
link_mask |= 1 << 2;
- /* fall through */
+ fallthrough;
case 2:
link_mask |= 1 << 1;
- /* fall through */
+ fallthrough;
case 1:
link_mask |= 1 << 0;
break;
break;
case RX51_JACK_HS:
hs = 1;
- /* fall through */
+ fallthrough;
case RX51_JACK_HP:
hp = 1;
break;
#define NR_DMA_CHAIN 2
-static void txx9aclc_dma_tasklet(unsigned long data)
+static void txx9aclc_dma_tasklet(struct tasklet_struct *t)
{
- struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
+ struct txx9aclc_dmadata *dmadata = from_tasklet(dmadata, t, tasklet);
struct dma_chan *chan = dmadata->dma_chan;
struct dma_async_tx_descriptor *desc;
struct snd_pcm_substream *substream = dmadata->substream;
"playback" : "capture");
return -EBUSY;
}
- tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
- (unsigned long)dmadata);
+ tasklet_setup(&dmadata->tasklet, txx9aclc_dma_tasklet);
return 0;
}
zx_i2s_rx_dma_en(zx_i2s->reg_base, true);
else
zx_i2s_tx_dma_en(zx_i2s->reg_base, true);
- /* fall thru */
+ fallthrough;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (capture)
zx_i2s_rx_dma_en(zx_i2s->reg_base, false);
else
zx_i2s_tx_dma_en(zx_i2s->reg_base, false);
- /* fall thru */
+ fallthrough;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (capture)
val = readl_relaxed(zx_spdif->reg_base + ZX_FIFOCTRL);
val |= ZX_FIFOCTRL_TX_FIFO_RST;
writel_relaxed(val, zx_spdif->reg_base + ZX_FIFOCTRL);
- /* fall thru */
+ fallthrough;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
zx_spdif_cfg_tx(zx_spdif->reg_base, true);
spin_unlock_irqrestore(&ep->buffer_lock, flags);
}
-static void snd_usbmidi_out_tasklet(unsigned long data)
+static void snd_usbmidi_out_tasklet(struct tasklet_struct *t)
{
- struct snd_usb_midi_out_endpoint *ep =
- (struct snd_usb_midi_out_endpoint *) data;
+ struct snd_usb_midi_out_endpoint *ep = from_tasklet(ep, t, tasklet);
snd_usbmidi_do_output(ep);
}
}
spin_lock_init(&ep->buffer_lock);
- tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep);
+ tasklet_setup(&ep->tasklet, snd_usbmidi_out_tasklet);
init_waitqueue_head(&ep->drain_wait);
for (i = 0; i < 0x10; ++i)
*value -= ua->playback.queue_length;
}
-static void playback_tasklet(unsigned long data)
+static void playback_tasklet(struct tasklet_struct *t)
{
- struct ua101 *ua = (void *)data;
+ struct ua101 *ua = from_tasklet(ua, t, playback_tasklet);
unsigned long flags;
unsigned int frames;
struct ua101_urb *urb;
spin_lock_init(&ua->lock);
mutex_init(&ua->mutex);
INIT_LIST_HEAD(&ua->ready_playback_urbs);
- tasklet_init(&ua->playback_tasklet,
- playback_tasklet, (unsigned long)ua);
+ tasklet_setup(&ua->playback_tasklet, playback_tasklet);
init_waitqueue_head(&ua->alsa_capture_wait);
init_waitqueue_head(&ua->rate_feedback_wait);
init_waitqueue_head(&ua->alsa_playback_wait);
int num_ins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
- int i, err, nameid, type, len;
+ int i, err, nameid, type, len, val;
const struct procunit_info *info;
const struct procunit_value_info *valinfo;
const struct usbmix_name_map *map;
break;
}
+ err = get_cur_ctl_value(cval, cval->control << 8, &val);
+ if (err < 0) {
+ usb_mixer_elem_info_free(cval);
+ return -EINVAL;
+ }
+
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
if (!kctl) {
usb_mixer_elem_info_free(cval);
case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+ case USB_ID(0x0499, 0x172f): /* Steinberg UR22C */
case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
ep = 0x82;
ifnum = 0;
goto add_sync_ep_from_ifnum;
.data = (const struct snd_usb_audio_quirk[]) {
{
.ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 0,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = &(const struct audioformat) {
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
.endpoint = 0x01,
.ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .datainterval = 1,
+ .maxpacksize = 0x024c,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+ .rate_max = 48000,
+ .nr_rates = 2,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000
+ }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .datainterval = 1,
+ .maxpacksize = 0x0126,
.rates = SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000,
.rate_min = 44100,
/* Lenovo ThinkStation P620 Rear Line-in, Line-out and Microphone */
{
USB_DEVICE(0x17aa, 0x1046),
- QUIRK_DEVICE_PROFILE("Lenovo", "ThinkStation P620 Rear",
- "Lenovo-ThinkStation-P620-Rear"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Lenovo",
+ .product_name = "ThinkStation P620 Rear",
+ .profile_name = "Lenovo-ThinkStation-P620-Rear",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_SETUP_DISABLE_AUTOSUSPEND
+ }
},
/* Lenovo ThinkStation P620 Internal Speaker + Front Headset */
{
USB_DEVICE(0x17aa, 0x104d),
- QUIRK_DEVICE_PROFILE("Lenovo", "ThinkStation P620 Main",
- "Lenovo-ThinkStation-P620-Main"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Lenovo",
+ .product_name = "ThinkStation P620 Main",
+ .profile_name = "Lenovo-ThinkStation-P620-Main",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_SETUP_DISABLE_AUTOSUSPEND
+ }
},
/* Native Instruments MK2 series */
{
/*
* Pioneer DJ DJM-250MK2
- * PCM is 8 channels out @ 48 fixed (endpoints 0x01).
- * The output from computer to the mixer is usable.
+ * PCM is 8 channels out @ 48 fixed (endpoint 0x01)
+ * and 8 channels in @ 48 fixed (endpoint 0x82).
+ *
+ * Both playback and recording is working, even simultaneously.
+ *
+ * Playback channels could be mapped to:
+ * - CH1
+ * - CH2
+ * - AUX
*
- * The input (phono or line to computer) is not working.
- * It should be at endpoint 0x82 and probably also 8 channels,
- * but it seems that it works only with Pioneer proprietary software.
- * Even on officially supported OS, the Audacity was unable to record
- * and Mixxx to recognize the control vinyls.
+ * Recording channels could be mapped to:
+ * - Post CH1 Fader
+ * - Post CH2 Fader
+ * - Cross Fader A
+ * - Cross Fader B
+ * - MIC
+ * - AUX
+ * - REC OUT
+ *
+ * There is remaining problem with recording directly from PHONO/LINE.
+ * If we map a channel to:
+ * - CH1 Control Tone PHONO
+ * - CH1 Control Tone LINE
+ * - CH2 Control Tone PHONO
+ * - CH2 Control Tone LINE
+ * it is silent.
+ * There is no signal even on other operating systems with official drivers.
+ * The signal appears only when a supported application is started.
+ * This needs to be investigated yet...
+ * (there is quite a lot communication on the USB in both directions)
+ *
+ * In current version this mixer could be used for playback
+ * and for recording from vinyls (through Post CH* Fader)
+ * but not for DVS (Digital Vinyl Systems) like in Mixxx.
*/
USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
.rate_max = 48000,
.nr_rates = 1,
.rate_table = (unsigned int[]) { 48000 }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8, // inputs
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC|
+ USB_ENDPOINT_USAGE_IMPLICIT_FB,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 48000 }
}
},
{
* they pretend to be 96kHz mono as a workaround for stereo being broken
* by that...
*
- * They also have swapped L-R channels, but that's for userspace to deal
- * with.
+ * They also have an issue with initial stream alignment that causes the
+ * channels to be swapped and out of phase, which is dealt with in quirks.c.
*/
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
return 1; /* Continue with creating streams and mixer */
}
+static int setup_disable_autosuspend(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ driver->supports_autosuspend = 0;
+ return 1; /* Continue with creating streams and mixer */
+}
+
/*
* audio-interface quirks
*
[QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
[QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
[QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk,
+ [QUIRK_SETUP_DISABLE_AUTOSUSPEND] = setup_disable_autosuspend,
};
if (quirk->type < QUIRK_TYPE_COUNT) {
set_format_emu_quirk(subs, fmt);
break;
case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
pioneer_djm_set_format_quirk(subs);
break;
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
QUIRK_AUDIO_ALIGN_TRANSFER,
QUIRK_AUDIO_STANDARD_MIXER,
QUIRK_SETUP_FMT_AFTER_RESUME,
+ QUIRK_SETUP_DISABLE_AUTOSUSPEND,
QUIRK_TYPE_COUNT
};
if SND_X86
config HDMI_LPE_AUDIO
- tristate "HDMI audio without HDaudio on Intel Atom platforms"
+ tristate "HDMI audio without HDAudio on Intel Atom platforms"
depends on DRM_I915
select SND_PCM
help
if (!info->btf_id || !info->nr_func_info ||
btf__get_from_id(info->btf_id, &prog_btf))
goto print;
- finfo = (struct bpf_func_info *)info->func_info;
+ finfo = u64_to_ptr(info->func_info);
func_type = btf__type_by_id(prog_btf, finfo->type_id);
if (!func_type || !btf_is_func(func_type))
goto print;
var_name, align);
return -EINVAL;
}
+ /* Assume 32-bit architectures when generating data section
+ * struct memory layout. Given bpftool can't know which target
+ * host architecture it's emitting skeleton for, we need to be
+ * conservative and assume 32-bit one to ensure enough padding
+ * bytes are generated for pointer and long types. This will
+ * still work correctly for 64-bit architectures, because in
+ * the worst case we'll generate unnecessary padding field,
+ * which on 64-bit architectures is not strictly necessary and
+ * would be handled by natural 8-byte alignment. But it still
+ * will be a correct memory layout, based on recorded offsets
+ * in BTF.
+ */
+ if (align > 4)
+ align = 4;
align_off = (off + align - 1) / align * align;
if (align_off != need_off) {
{ \n\
struct %1$s *obj; \n\
\n\
- obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\
+ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) \n\
return NULL; \n\
if (%1$s__create_skeleton(obj)) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
\n\
- s = (typeof(s))calloc(1, sizeof(*s)); \n\
+ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
if (!s) \n\
return -1; \n\
obj->skeleton = s; \n\
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
- s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\
goto err; \n\
",
/* programs */ \n\
s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\
- s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
+ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\
goto err; \n\
",
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "tp_name",
- (const char *)info->raw_tracepoint.tp_name);
+ u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT:
printf("\n\ttp '%s' ",
- (const char *)info->raw_tracepoint.tp_name);
+ (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
break;
case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info);
/* Make sure we do not use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+static inline void *u64_to_ptr(__u64 ptr)
+{
+ return (void *)(unsigned long)ptr;
+}
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
while (true) {
ret = read(fd, buf, sizeof(buf));
if (ret < 0) {
+ if (errno == EAGAIN)
+ continue;
err = -errno;
p_err("failed to read PID iterator output: %d", err);
goto out;
p_info("no instructions returned");
return -1;
}
- buf = (unsigned char *)(info->jited_prog_insns);
+ buf = u64_to_ptr(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1;
}
- buf = (unsigned char *)info->xlated_prog_insns;
+ buf = u64_to_ptr(info->xlated_prog_insns);
member_len = info->xlated_prog_len;
}
return -1;
}
- func_info = (void *)info->func_info;
+ func_info = u64_to_ptr(info->func_info);
if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info);
n = write(fd, buf, member_len);
close(fd);
- if (n != member_len) {
+ if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
return -1;
__u32 i;
if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
- ksyms = (__u64 *) info->jited_ksyms;
+ ksyms = u64_to_ptr(info->jited_ksyms);
}
if (json_output)
jsonw_start_array(json_wtr);
- lens = (__u32 *) info->jited_func_lens;
+ lens = u64_to_ptr(info->jited_func_lens);
for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
} else {
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info->nr_jited_ksyms;
- dd.jited_ksyms = (__u64 *) info->jited_ksyms;
+ dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = info->func_info_rec_size;
goto out;
}
- func_info = (struct bpf_func_info *)(info_linear->info.func_info);
+ func_info = u64_to_ptr(info_linear->info.func_info);
t = btf__type_by_id(btf, func_info[0].type_id);
if (!t) {
p_err("btf %d doesn't have type %d",
return btf_id__add(root, id, false);
}
+/*
+ * The data of compressed section should be aligned to 4
+ * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
+ * sets sh_addralign to 1, which makes libelf fail with
+ * misaligned section error during the update:
+ * FAILED elf_update(WRITE): invalid section alignment
+ *
+ * While waiting for ld fix, we fix the compressed sections
+ * sh_addralign value manualy.
+ */
+static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh)
+{
+ int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8;
+
+ if (!(sh->sh_flags & SHF_COMPRESSED))
+ return 0;
+
+ if (sh->sh_addralign == expected)
+ return 0;
+
+ pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n",
+ sh->sh_addralign, expected);
+
+ sh->sh_addralign = expected;
+
+ if (gelf_update_shdr(scn, sh) == 0) {
+ printf("FAILED cannot update section header: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+ return 0;
+}
+
static int elf_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
obj->efile.idlist_shndx = idx;
obj->efile.idlist_addr = sh.sh_addr;
}
+
+ if (compressed_section_fix(elf, scn, &sh))
+ return -1;
}
return 0;
*
* Also, note that **bpf_trace_printk**\ () is slow, and should
* only be used for debugging purposes. For this reason, a notice
- * bloc (spanning several lines) is printed to kernel logs and
+ * block (spanning several lines) is printed to kernel logs and
* states that the helper should not be used "for production use"
* the first time this helper is used (or more precisely, when
* **trace_printk**\ () buffers are allocated). For passing values
*
* int ret;
* struct bpf_tunnel_key key = {};
- *
+ *
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
* if (ret < 0)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* if (key.remote_ipv4 != 0x0a000001)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* return TC_ACT_OK; // accept packet
*
* This interface can also be used with all encapsulation devices
* Description
* Retrieve the realm or the route, that is to say the
* **tclassid** field of the destination for the *skb*. The
- * indentifier retrieved is a user-provided tag, similar to the
+ * identifier retrieved is a user-provided tag, similar to the
* one used with the net_cls cgroup (see description for
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
* held by a route (a destination entry), not by a task.
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
* Helper macro to manipulate data structures
*/
#ifndef offsetof
-#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
+#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
#endif
#ifndef container_of
#define container_of(ptr, type, member) \
__u32 types_size;
__u32 data_size;
int fd;
+ int ptr_sz;
};
static inline __u64 ptr_to_u64(const void *ptr)
return btf->types[type_id];
}
+static int determine_ptr_size(const struct btf *btf)
+{
+ const struct btf_type *t;
+ const char *name;
+ int i;
+
+ for (i = 1; i <= btf->nr_types; i++) {
+ t = btf__type_by_id(btf, i);
+ if (!btf_is_int(t))
+ continue;
+
+ name = btf__name_by_offset(btf, t->name_off);
+ if (!name)
+ continue;
+
+ if (strcmp(name, "long int") == 0 ||
+ strcmp(name, "long unsigned int") == 0) {
+ if (t->size != 4 && t->size != 8)
+ continue;
+ return t->size;
+ }
+ }
+
+ return -1;
+}
+
+static size_t btf_ptr_sz(const struct btf *btf)
+{
+ if (!btf->ptr_sz)
+ ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+ return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
+}
+
+/* Return pointer size this BTF instance assumes. The size is heuristically
+ * determined by looking for 'long' or 'unsigned long' integer type and
+ * recording its size in bytes. If BTF type information doesn't have any such
+ * type, this function returns 0. In the latter case, native architecture's
+ * pointer size is assumed, so will be either 4 or 8, depending on
+ * architecture that libbpf was compiled for. It's possible to override
+ * guessed value by using btf__set_pointer_size() API.
+ */
+size_t btf__pointer_size(const struct btf *btf)
+{
+ if (!btf->ptr_sz)
+ ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+
+ if (btf->ptr_sz < 0)
+ /* not enough BTF type info to guess */
+ return 0;
+
+ return btf->ptr_sz;
+}
+
+/* Override or set pointer size in bytes. Only values of 4 and 8 are
+ * supported.
+ */
+int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
+{
+ if (ptr_sz != 4 && ptr_sz != 8)
+ return -EINVAL;
+ btf->ptr_sz = ptr_sz;
+ return 0;
+}
+
static bool btf_type_is_void(const struct btf_type *t)
{
return t == &btf_void || btf_is_fwd(t);
size = t->size;
goto done;
case BTF_KIND_PTR:
- size = sizeof(void *);
+ size = btf_ptr_sz(btf);
goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
- return min(sizeof(void *), (size_t)t->size);
+ return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR:
- return sizeof(void *);
+ return btf_ptr_sz(btf);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
if (IS_ERR(btf))
goto done;
+ switch (gelf_getclass(elf)) {
+ case ELFCLASS32:
+ btf__set_pointer_size(btf, 4);
+ break;
+ case ELFCLASS64:
+ btf__set_pointer_size(btf, 8);
+ break;
+ default:
+ pr_warn("failed to get ELF class (bitness) for %s\n", path);
+ break;
+ }
+
if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
+LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
+LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
+#include <linux/kernel.h>
#include "btf.h"
#include "hashmap.h"
#include "libbpf.h"
const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
struct btf_dump_opts opts;
+ int ptr_sz;
bool strip_mods;
/* per-type auxiliary state */
d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
d->opts.ctx = opts ? opts->ctx : NULL;
+ d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->type_names)) {
}
}
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
switch (kind) {
case BTF_KIND_INT:
+ /* Emit type alias definitions if necessary */
+ btf_dump_emit_missing_aliases(d, id, t);
+
tstate->emit_state = EMITTED;
break;
case BTF_KIND_ENUM:
int align, int lvl)
{
int off_diff = m_off - cur_off;
- int ptr_bits = sizeof(void *) * 8;
+ int ptr_bits = d->ptr_sz * 8;
if (off_diff <= 0)
/* no gap */
btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz;
} else {
- m_sz = max(0, btf__resolve_size(d->btf, m->type));
+ m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8;
}
btf_dump_printf(d, ";");
btf_dump_printf(d, " __attribute__((packed))");
}
+static const char *missing_base_types[][2] = {
+ /*
+ * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
+ * SIMD intrinsics. Alias them to standard base types.
+ */
+ { "__Poly8_t", "unsigned char" },
+ { "__Poly16_t", "unsigned short" },
+ { "__Poly64_t", "unsigned long long" },
+ { "__Poly128_t", "unsigned __int128" },
+};
+
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ const char *name = btf_dump_type_name(d, id);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
+ if (strcmp(name, missing_base_types[i][0]) == 0) {
+ btf_dump_printf(d, "typedef %s %s;\n\n",
+ missing_base_types[i][1], name);
+ break;
+ }
+ }
+}
+
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
data = elf_getdata(scn, NULL);
if (!scn || !data) {
pr_warn("failed to get Elf_Data from map section %d (%s)\n",
- obj->efile.maps_shndx, MAPS_ELF_SEC);
+ obj->efile.btf_maps_shndx, MAPS_ELF_SEC);
return -EINVAL;
}
BTF_ELF_SEC, err);
goto out;
}
+ /* enforce 8-byte pointers for BPF-targeted BTFs */
+ btf__set_pointer_size(obj->btf, 8);
err = 0;
}
if (btf_ext_data) {
if (IS_ERR(kern_btf))
return PTR_ERR(kern_btf);
+ /* enforce 8-byte pointers for BPF-targeted BTFs */
+ btf__set_pointer_size(obj->btf, 8);
bpf_object__sanitize_btf(obj, kern_btf);
}
map = bpf_create_map_xattr(&map_attr);
if (map < 0) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, errno);
- return -errno;
+ __func__, cp, -ret);
+ return ret;
}
insns[0].imm = map;
static int bpf_object__collect_map_relos(struct bpf_object *obj,
GElf_Shdr *shdr, Elf_Data *data)
{
- int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
+ const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
+ int i, j, nrels, new_sz;
const struct btf_var_secinfo *vi = NULL;
const struct btf_type *sec, *var, *def;
const struct btf_member *member;
vi = btf_var_secinfos(sec) + map->btf_var_idx;
if (vi->offset <= rel.r_offset &&
- rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
+ rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
break;
}
if (j == obj->nr_maps) {
return -EINVAL;
moff = rel.r_offset - vi->offset - moff;
- if (moff % ptr_sz)
+ /* here we use BPF pointer size, which is always 64 bit, as we
+ * are parsing ELF that was built for BPF target
+ */
+ if (moff % bpf_ptr_sz)
return -EINVAL;
- moff /= ptr_sz;
+ moff /= bpf_ptr_sz;
if (moff >= map->init_slots_sz) {
new_sz = moff + 1;
- tmp = realloc(map->init_slots, new_sz * ptr_sz);
+ tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
if (!tmp)
return -ENOMEM;
map->init_slots = tmp;
memset(map->init_slots + map->init_slots_sz, 0,
- (new_sz - map->init_slots_sz) * ptr_sz);
+ (new_sz - map->init_slots_sz) * host_ptr_sz);
map->init_slots_sz = new_sz;
}
map->init_slots[moff] = targ_map;
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ err = -errno;
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("failed to pin program: %s\n", cp);
- return -errno;
+ return err;
}
pr_debug("pinned program '%s'\n", path);
bpf_program__set_sk_lookup;
btf__parse;
btf__parse_raw;
+ btf__pointer_size;
btf__set_fd;
+ btf__set_pointer_size;
} LIBBPF_0.0.9;
default:
ret = 0;
val = eval_num_arg(data, size, event, arg);
- trace_seq_printf(s, "%p", (void *)val);
+ trace_seq_printf(s, "%p", (void *)(intptr_t)val);
break;
}
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
+
- a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
'param1', 'param2', etc are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*.
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
+
- a symbolically formed event like 'pmu/param1=0x3,param2/' where
param1 and param2 are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*
hardware thread. This is essentially a replacement for the any bit and
convenient for post processing.
+--summary::
+Print summary for interval mode (-I).
+
EXAMPLES
--------
init_stats(&event_stats);
for (i = 0; i < multi_iterations; i++) {
session = perf_session__new(NULL, false, NULL);
- if (!session)
- return -ENOMEM;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
- OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
if (report.mmaps_mode)
report.tasks_mode = true;
+ if (dump_trace)
+ report.tool.ordered_events = false;
+
if (quiet)
perf_quiet_option();
}
if (!sched->idle_hist || thread->tid == 0) {
- timehist_update_runtime_stats(tr, t, tprev);
+ if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
+ timehist_update_runtime_stats(tr, t, tprev);
if (sched->idle_hist) {
struct idle_thread_runtime *itr = (void *)tr;
printf("\nIdle stats:\n");
for (i = 0; i < idle_max_cpu; ++i) {
+ if (cpu_list && !test_bit(i, cpu_bitmap))
+ continue;
+
t = idle_threads[i];
if (!t)
continue;
{
struct evsel *counter;
- if (!stat_config.summary && (read_affinity_counters(rs) < 0))
+ if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0))
return;
evlist__for_each_entry(evsel_list, counter) {
if (stat_config.walltime_run_table)
stat_config.walltime_run[run_idx] = t1 - t0;
- if (interval) {
+ if (interval && stat_config.summary) {
stat_config.interval = 0;
- stat_config.summary = true;
+ stat_config.stop_read_counter = true;
init_stats(&walltime_nsecs_stats);
update_stats(&walltime_nsecs_stats, t1 - t0);
"Use with 'percore' event qualifier to show the event "
"counts of one hardware thread by sum up total hardware "
"threads of same physical core"),
+ OPT_BOOLEAN(0, "summary", &stat_config.summary,
+ "print summary for interval mode"),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
"libpfm4 event selector. use 'perf list' to list available events",
goto out_delete_evlist;
}
+#ifdef HAVE_LIBBPF_SUPPORT
if (!top.record_opts.no_bpf_event) {
top.sb_evlist = evlist__new();
goto out_delete_evlist;
}
}
+#endif
if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
return s;
/* allocate space for a new string */
- fixed = (char *) malloc(len + 1);
+ fixed = (char *) malloc(len + esc_count + 1);
if (!fixed)
return NULL;
perf_mmap__read_done(&md->core);
}
- if (count != expect) {
+ if (count != expect * evlist->core.nr_entries) {
pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
goto out_delete_evlist;
}
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
{
.metric_expr = "1/m3",
.metric_name = "M3",
+},
+{
+ .name = NULL,
}
};
{
int nr_entries = evlist->core.nr_entries;
-single_entry:
if (perf_evlist__single_entry(evlist)) {
+single_entry: {
struct evsel *first = evlist__first(evlist);
return perf_evsel__hists_browse(first, nr_entries, help,
env, warn_lost_event,
annotation_opts);
}
+ }
if (symbol_conf.event_group) {
struct evsel *pos;
if (payload & BIT(EV_TLB_ACCESS))
decoder->record.type |= ARM_SPE_TLB_ACCESS;
- if ((idx == 1 || idx == 2 || idx == 3) &&
+ if ((idx == 2 || idx == 4 || idx == 8) &&
(payload & BIT(EV_LLC_MISS)))
decoder->record.type |= ARM_SPE_LLC_MISS;
- if ((idx == 1 || idx == 2 || idx == 3) &&
+ if ((idx == 2 || idx == 4 || idx == 8) &&
(payload & BIT(EV_LLC_ACCESS)))
decoder->record.type |= ARM_SPE_LLC_ACCESS;
- if ((idx == 1 || idx == 2 || idx == 3) &&
+ if ((idx == 2 || idx == 4 || idx == 8) &&
(payload & BIT(EV_REMOTE_ACCESS)))
decoder->record.type |= ARM_SPE_REMOTE_ACCESS;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
- if (etm->synth_opts.last_branch)
+ if (etm->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ /*
+ * We don't use the hardware index, but the sample generation
+ * code uses the new format branch_stack with this field,
+ * so the event attributes must indicate that it's present.
+ */
+ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+ }
if (etm->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (pt->synth_opts.last_branch)
+ if (pt->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ /*
+ * We don't use the hardware index, but the sample generation
+ * code uses the new format branch_stack with this field,
+ * so the event attributes must indicate that it's present.
+ */
+ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+ }
if (pt->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
return 0;
}
-static int is_bpf_image(const char *name)
-{
- return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
- strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
-}
-
static int machine__process_ksymbol_register(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
return name && (strstr(name, "bpf_prog_") == name);
}
+bool __map__is_bpf_image(const struct map *map)
+{
+ const char *name;
+
+ if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
+ return true;
+
+ /*
+ * If PERF_RECORD_KSYMBOL is not included, the dso will not have
+ * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
+ * guess the type based on name.
+ */
+ name = map->dso->short_name;
+ return name && is_bpf_image(name);
+}
+
bool __map__is_ool(const struct map *map)
{
return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
bool __map__is_bpf_prog(const struct map *map);
+bool __map__is_bpf_image(const struct map *map);
bool __map__is_ool(const struct map *map);
static inline bool __map__is_kmodule(const struct map *map)
{
return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
- !__map__is_bpf_prog(map) && !__map__is_ool(map);
+ !__map__is_bpf_prog(map) && !__map__is_ool(map) &&
+ !__map__is_bpf_image(map);
}
bool map__has_symbols(const struct map *map);
return !strcmp(name, ENTRY_TRAMPOLINE_NAME);
}
+static inline bool is_bpf_image(const char *name)
+{
+ return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
+ strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
+}
#endif /* __PERF_MAP_H */
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/pfm.h"
+#include "perf.h"
#define MAX_NAME_LEN 100
evsel = __add_event(list, &parse_state->idx, &attr, true,
get_config_name(head_config), pmu,
&config_terms, auto_merge_stats, NULL);
- if (evsel) {
- evsel->unit = info.unit;
- evsel->scale = info.scale;
- evsel->per_pkg = info.per_pkg;
- evsel->snapshot = info.snapshot;
- evsel->metric_expr = info.metric_expr;
- evsel->metric_name = info.metric_name;
- evsel->pmu_name = name ? strdup(name) : NULL;
- evsel->use_uncore_alias = use_uncore_alias;
- evsel->percore = config_term_percore(&evsel->config_terms);
- }
+ if (!evsel)
+ return -ENOMEM;
+
+ evsel->pmu_name = name ? strdup(name) : NULL;
+ evsel->use_uncore_alias = use_uncore_alias;
+ evsel->percore = config_term_percore(&evsel->config_terms);
- return evsel ? 0 : -ENOMEM;
+ if (parse_state->fake_pmu)
+ return 0;
+
+ evsel->unit = info.unit;
+ evsel->scale = info.scale;
+ evsel->per_pkg = info.per_pkg;
+ evsel->snapshot = info.snapshot;
+ evsel->metric_expr = info.metric_expr;
+ evsel->metric_name = info.metric_name;
+ return 0;
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (*str == 'u') {
if (!exclude)
exclude = eu = ek = eh = 1;
+ if (!exclude_GH && !perf_guest)
+ eG = 1;
eu = 0;
} else if (*str == 'k') {
if (!exclude)
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, $6, $4);
+ (void *)(uintptr_t) $2, $6, $4);
free($6);
if (err) {
free(list);
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, NULL, $4)) {
+ (void *)(uintptr_t) $2, NULL, $4)) {
free(list);
YYABORT;
}
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, $4, 0);
+ (void *)(uintptr_t) $2, $4, 0);
free($4);
if (err) {
free(list);
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, NULL, 0)) {
+ (void *)(uintptr_t) $2, NULL, 0)) {
free(list);
YYABORT;
}
session->decomp_last = decomp;
}
- pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
+ pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
return 0;
}
cpu_map__id_to_die(id),
config->csv_output ? 0 : -3,
cpu_map__id_to_cpu(id), config->csv_sep);
- } else {
+ } else if (id > -1) {
fprintf(config->output, "CPU%*d%s",
config->csv_output ? 0 : -7,
evsel__cpus(evsel)->map[id],
bool summary;
bool metric_no_group;
bool metric_no_merge;
+ bool stop_read_counter;
FILE *output;
unsigned int interval;
unsigned int timeout;
"exit_idle",
"mwait_idle",
"mwait_idle_with_hints",
+ "mwait_idle_with_hints.constprop.0",
"poll_idle",
"ppc64_runlatch_off",
"pseries_dedicated_idle_sleep",
while (input.pos < input.size) {
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
- pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
+ pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n",
src_size, output.size, dst_size, ZSTD_getErrorName(ret));
break;
}
test_tag
FEATURE-DUMP.libbpf
fixdep
-test_align
test_dev_cgroup
/test_progs*
test_tcpbpf_user
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
- test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
+ test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
/* Check getting link info */
info_len = sizeof(struct bpf_link_info) * 2;
bzero(&link_infos[i], info_len);
- link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
+ link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
- link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
- strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
+ link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
+ strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter") ||
info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)",
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id,
- (char *)link_infos[i].raw_tracepoint.tp_name,
+ (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter"))
goto done;
static struct btf_dump_test_case {
const char *name;
const char *file;
+ bool known_ptr_sz;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
- {"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
- {"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
- {"btf_dump: padding", "btf_dump_test_case_padding", {}},
- {"btf_dump: packing", "btf_dump_test_case_packing", {}},
- {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
- {"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
- {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
+ {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
+ {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
+ {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
+ {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
+ {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
+ {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
+ {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
};
static int btf_dump_all_types(const struct btf *btf,
goto done;
}
+ /* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
+ * so it's impossible to determine correct pointer size; but if they
+ * do, it should be 8 regardless of host architecture, becaues BPF
+ * target is always 64-bit
+ */
+ if (!t->known_ptr_sz) {
+ btf__set_pointer_size(btf, 8);
+ } else {
+ CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
+ 8, btf__pointer_size(btf));
+ }
+
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) {
CHECK(got[j] != exp[j], "check_res",
- "result #%d: expected %lx, but got %lx\n",
- j, exp[j], got[j]);
+ "result #%d: expected %llx, but got %llx\n",
+ j, (__u64)exp[j], (__u64)got[j]);
}
cleanup:
test_core_extern__destroy(skel);
.union_sz = sizeof(((type *)0)->union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
- .ptr_sz = sizeof(((type *)0)->ptr_field), \
- .enum_sz = sizeof(((type *)0)->enum_field), \
+ .ptr_sz = 8, /* always 8-byte pointer for BPF */ \
+ .enum_sz = sizeof(((type *)0)->enum_field), \
}
#define SIZE_CASE(name) { \
.sb4 = -1,
.sb20 = -0x17654321,
.u32 = 0xBEEF,
- .s32 = -0x3FEDCBA987654321,
+ .s32 = -0x3FEDCBA987654321LL,
}),
BITFIELDS_CASE(bitfields___bitfield_vs_int, {
- .ub1 = 0xFEDCBA9876543210,
+ .ub1 = 0xFEDCBA9876543210LL,
.ub2 = 0xA6,
- .ub7 = -0x7EDCBA987654321,
- .sb4 = -0x6123456789ABCDE,
- .sb20 = 0xD00D,
+ .ub7 = -0x7EDCBA987654321LL,
+ .sb4 = -0x6123456789ABCDELL,
+ .sb20 = 0xD00DLL,
.u32 = -0x76543,
- .s32 = 0x0ADEADBEEFBADB0B,
+ .s32 = 0x0ADEADBEEFBADB0BLL,
}),
BITFIELDS_CASE(bitfields___just_big_enough, {
- .ub1 = 0xF,
- .ub2 = 0x0812345678FEDCBA,
+ .ub1 = 0xFLL,
+ .ub2 = 0x0812345678FEDCBALL,
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
__u32 duration = 0, retval;
struct bpf_map *data_map;
const int zero = 0;
- u64 *result = NULL;
+ __u64 *result = NULL;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
link = calloc(sizeof(struct bpf_link *), prog_cnt);
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
- result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64));
+ result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
if (CHECK(!link || !prog || !result, "alloc_memory",
"failed to alloc memory"))
goto close_prog;
goto close_prog;
for (i = 0; i < prog_cnt; i++)
- if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
+ if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
result[i]))
goto close_prog;
CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
err || tattr.retval != 1,
tests[i].name,
- "err %d errno %d retval %d duration %d size %u/%lu\n",
+ "err %d errno %d retval %d duration %d size %u/%zu\n",
err, errno, tattr.retval, tattr.duration,
tattr.data_size_out, sizeof(flow_keys));
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{
int i, err, map_fd;
- uint64_t num;
+ __u64 num;
map_fd = bpf_find_map(__func__, obj, "result_number");
if (CHECK_FAIL(map_fd < 0))
struct {
char *name;
uint32_t key;
- uint64_t num;
+ __u64 num;
} tests[] = {
{ "relocate .bss reference", 0, 0 },
{ "relocate .data reference", 1, 42 },
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
- "err %d result %lx expected %lx\n",
+ "err %d result %llx expected %llx\n",
err, num, tests[i].num);
}
}
const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
struct bpf_map *data_map, *bss_map;
- void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
+ void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
struct test_mmap__bss *bss_data;
struct bpf_map_info map_info;
__u32 map_info_sz = sizeof(map_info);
/* check some more advanced mmap() manipulations */
+ tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0);
+ if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
+ goto cleanup;
+
/* map all but last page: pages 1-3 mapped */
- tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
+ tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
- if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
+ if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
+ munmap(tmp0, 4 * page_size);
goto cleanup;
+ }
/* unmap second page: pages 1, 3 mapped */
err = munmap(tmp1 + page_size, page_size);
if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
- munmap(tmp1, map_sz);
+ munmap(tmp1, 4 * page_size);
goto cleanup;
}
MAP_SHARED | MAP_FIXED, data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
munmap(tmp1, page_size);
- munmap(tmp1 + 2*page_size, page_size);
+ munmap(tmp1 + 2*page_size, 2 * page_size);
goto cleanup;
}
CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
- munmap(tmp1, 3 * page_size); /* unmap page 1 */
+ munmap(tmp1, 4 * page_size); /* unmap page 1 */
goto cleanup;
}
CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
"err %d errno %d retval %d\n", err, errno, tattr.retval);
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
- "incorrect output size, want %lu have %u\n",
+ "incorrect output size, want %zu have %u\n",
sizeof(pkt_v4), tattr.data_size_out);
CHECK_ATTR(buf[5] != 0, "overflow",
v6->sin6_addr.s6_addr[10] = 0xff;
v6->sin6_addr.s6_addr[11] = 0xff;
memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
+ memset(&v6->sin6_addr.s6_addr[0], 0, 10);
}
static int udp_recv_send(int server_fd)
CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
"ctx_size_out",
- "incorrect output size, want %lu have %u\n",
+ "incorrect output size, want %zu have %u\n",
sizeof(skb), tattr.ctx_size_out);
for (i = 0; i < 5; i++)
log_buf = va_arg(args, char *);
if (!log_buf)
goto out;
- if (strstr(log_buf, err_str) == 0)
+ if (err_str && strstr(log_buf, err_str) == 0)
found = true;
out:
printf(format, log_buf);
CHECK_VAL(bss->payload1_len2, size2);
CHECK_VAL(bss->total1, size1 + size2);
CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
- "doesn't match!");
+ "doesn't match!\n");
CHECK_VAL(data->payload2_len1, size1);
CHECK_VAL(data->payload2_len2, size2);
CHECK_VAL(data->total2, size1 + size2);
CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
- "doesn't match!");
+ "doesn't match!\n");
CHECK_VAL(data->payload3_len1, size1);
CHECK_VAL(data->payload3_len2, size2);
CHECK_VAL(data->total3, size1 + size2);
CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
- "doesn't match!");
+ "doesn't match!\n");
CHECK_VAL(data->payload4_len1, size1);
CHECK_VAL(data->payload4_len2, size2);
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
- "doesn't match!");
+ "doesn't match!\n");
cleanup:
test_varlen__destroy(skel);
}
#include <stdint.h>
#include <stdbool.h>
+
+void preserce_ptr_sz_fn(long x) {}
+
+#define __bpf_aligned __attribute__((aligned(8)))
+
/*
* KERNEL
*/
char a;
int b;
enum core_reloc_primitives_enum c;
- void *d;
- int (*f)(const char *);
+ void *d __bpf_aligned;
+ int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___diff_enum_def {
char a;
int b;
- void *d;
- int (*f)(const char *);
+ void *d __bpf_aligned;
+ int (*f)(const char *) __bpf_aligned;
enum {
X = 100,
Y = 200,
- } c; /* inline enum def with differing set of values */
+ } c __bpf_aligned; /* inline enum def with differing set of values */
};
struct core_reloc_primitives___diff_func_proto {
- void (*f)(int); /* incompatible function prototype */
- void *d;
- enum core_reloc_primitives_enum c;
+ void (*f)(int) __bpf_aligned; /* incompatible function prototype */
+ void *d __bpf_aligned;
+ enum core_reloc_primitives_enum c __bpf_aligned;
int b;
char a;
};
struct core_reloc_primitives___diff_ptr_type {
- const char * const d; /* different pointee type + modifiers */
- char a;
+ const char * const d __bpf_aligned; /* different pointee type + modifiers */
+ char a __bpf_aligned;
int b;
enum core_reloc_primitives_enum c;
- int (*f)(const char *);
+ int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_enum {
char a[1];
int b;
int c; /* int instead of enum */
- void *d;
- int (*f)(const char *);
+ void *d __bpf_aligned;
+ int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_int {
char a[1];
- int *b; /* ptr instead of int */
- enum core_reloc_primitives_enum c;
- void *d;
- int (*f)(const char *);
+ int *b __bpf_aligned; /* ptr instead of int */
+ enum core_reloc_primitives_enum c __bpf_aligned;
+ void *d __bpf_aligned;
+ int (*f)(const char *) __bpf_aligned;
};
struct core_reloc_primitives___err_non_ptr {
int b;
enum core_reloc_primitives_enum c;
int d; /* int instead of ptr */
- int (*f)(const char *);
+ int (*f)(const char *) __bpf_aligned;
};
/*
};
typedef const int int_t;
-typedef const char *char_ptr_t;
+typedef const char *char_ptr_t __bpf_aligned;
typedef const int arr_t[7];
struct core_reloc_mods_substruct {
struct core_reloc_mods {
int a;
int_t b;
- char *c;
+ char *c __bpf_aligned;
char_ptr_t d;
- int e[3];
+ int e[3] __bpf_aligned;
arr_t f;
struct core_reloc_mods_substruct g;
core_reloc_mods_substruct_t h;
struct core_reloc_mods___mod_swap {
int b;
int_t a;
- char *d;
+ char *d __bpf_aligned;
char_ptr_t c;
- int f[3];
+ int f[3] __bpf_aligned;
arr_t e;
struct {
int y;
typedef arr2_t arr3_t;
typedef arr3_t arr4_t;
-typedef const char * const volatile fancy_char_ptr_t;
+typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
arr4_t e;
fancy_char_ptr_t d;
fancy_char_ptr_t c;
- int3_t b;
+ int3_t b __bpf_aligned;
int3_t a;
};
int8_t sb4: 1; /* 4 -> 1 */
int32_t sb20: 30; /* 20 -> 30 */
/* non-bitfields */
- uint16_t u32; /* 32 -> 16 */
- int64_t s32; /* 32 -> 64 */
+ uint16_t u32; /* 32 -> 16 */
+ int64_t s32 __bpf_aligned; /* 32 -> 64 */
};
/* turn bitfield into non-bitfield and vice versa */
struct core_reloc_bitfields___bitfield_vs_int {
uint64_t ub1; /* 3 -> 64 non-bitfield */
uint8_t ub2; /* 20 -> 8 non-bitfield */
- int64_t ub7; /* 7 -> 64 non-bitfield signed */
- int64_t sb4; /* 4 -> 64 non-bitfield signed */
- uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
- int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
- uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
+ int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */
+ int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */
+ uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */
+ int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
+ uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */
};
struct core_reloc_bitfields___just_big_enough {
int bpf_testcb(struct bpf_sock_ops *skops)
{
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
+ struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr;
int good_call_rv = 0;
int bad_call_rv = 0;
int v = 0;
int op;
+ /* Test reading fields in bpf_sock_ops using single register */
+ asm volatile (
+ "%[reuse] = *(u32 *)(%[reuse] +96)"
+ : [reuse] "+r"(reuse)
+ :);
+
+ asm volatile (
+ "%[op] = *(u32 *)(%[skops] +96)"
+ : [op] "+r"(op)
+ : [skops] "r"(skops)
+ :);
+
+ asm volatile (
+ "r9 = %[skops];\n"
+ "r8 = *(u32 *)(r9 +164);\n"
+ "*(u32 *)(r9 +164) = r8;\n"
+ :: [skops] "r"(skops)
+ : "r9", "r8");
+
+ asm volatile (
+ "r1 = %[skops];\n"
+ "r1 = *(u64 *)(r1 +184);\n"
+ "if r1 == 0 goto +1;\n"
+ "r1 = *(u32 *)(r1 +4);\n"
+ :: [skops] "r"(skops):"r1");
+
+ asm volatile (
+ "r9 = %[skops];\n"
+ "r9 = *(u64 *)(r9 +184);\n"
+ "if r9 == 0 goto +1;\n"
+ "r9 = *(u32 *)(r9 +4);\n"
+ :: [skops] "r"(skops):"r9");
+
+ asm volatile (
+ "r1 = %[skops];\n"
+ "r2 = *(u64 *)(r1 +184);\n"
+ "if r2 == 0 goto +1;\n"
+ "r2 = *(u32 *)(r2 +4);\n"
+ :: [skops] "r"(skops):"r1", "r2");
+
op = (int) skops->op;
update_event_map(op);
bool capture = false;
/* .bss */
-long payload1_len1 = 0;
-long payload1_len2 = 0;
-long total1 = 0;
+__u64 payload1_len1 = 0;
+__u64 payload1_len2 = 0;
+__u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
/* .data */
info_garbage.garbage = 0;
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info),
- "err:%d errno:%d info_len:%u sizeof(*info):%lu",
+ "err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) {
err = -1;
goto done;
if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
- "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
+ "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
err, errno, info.id, info_len, sizeof(info),
raw_btf_size, info.btf_size, expected_nbytes, ret)) {
err = -1;
nexpected_line = snprintf(expected_line, line_size,
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+ "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"%u,0x%x,[[%d,%d],[%d,%d]]}\n",
percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key,
v->unused_bits2a,
v->bits28,
v->unused_bits2b,
- v->ui64,
+ (__u64)v->ui64,
v->ui8a[0], v->ui8a[1],
v->ui8a[2], v->ui8a[3],
v->ui8a[4], v->ui8a[5],
pid_t pid[tasks];
int i;
+ fflush(stdout);
+
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
if (!flavor)
return 0;
flavor++;
- fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
+ if (env.verbosity > VERBOSE_NONE)
+ fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
+
return chdir(flavor);
}
return (__u64) (unsigned long) ptr;
}
+static inline void *u64_to_ptr(__u64 ptr)
+{
+ return (void *) (unsigned long) ptr;
+}
+
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
/* Single step test, covers 2 basic instructions and 2 emulated */
asm volatile("ss_start: "
- "xor %%rax,%%rax\n\t"
+ "xor %%eax,%%eax\n\t"
"cpuid\n\t"
"movl $0x1a0,%%ecx\n\t"
"rdmsr\n\t"
- : : : "rax", "ecx");
+ : : : "eax", "ebx", "ecx", "edx");
/* DR6.BD test */
asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
;;
r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
+ ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
+ ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10
# SPDX-License-Identifier: GPL-2.0
#
# This tests basic flowtable functionality.
-# Creates following topology:
+# Creates following default topology:
#
# Originator (MTU 9000) <-Router1-> MTU 1500 <-Router2-> Responder (MTU 2000)
# Router1 is the one doing flow offloading, Router2 has no special
# purpose other than having a link that is smaller than either Originator
# and responder, i.e. TCPMSS announced values are too large and will still
# result in fragmentation and/or PMTU discovery.
+#
+# You can check with different Orgininator/Link/Responder MTU eg:
+# nft_flowtable.sh -o8000 -l1500 -r2000
+#
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
-nft --version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without nft tool"
- exit $ksft_skip
-fi
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
-
-which nc > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without nc (netcat)"
- exit $ksft_skip
-fi
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
-ip netns add nsr1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not create net namespace"
- exit $ksft_skip
-fi
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "which nc" "run test without nc (netcat)"
+checktool "ip netns add nsr1" "create net namespace"
ip netns add ns1
ip netns add ns2
# ns2 is going via nsr2 with a smaller mtu, so that TCPMSS announced by both peers
# is NOT the lowest link mtu.
-ip -net nsr1 link set veth0 mtu 9000
-ip -net ns1 link set eth0 mtu 9000
+omtu=9000
+lmtu=1500
+rmtu=2000
+
+usage(){
+ echo "nft_flowtable.sh [OPTIONS]"
+ echo
+ echo "MTU options"
+ echo " -o originator"
+ echo " -l link"
+ echo " -r responder"
+ exit 1
+}
+
+while getopts "o:l:r:" o
+do
+ case $o in
+ o) omtu=$OPTARG;;
+ l) lmtu=$OPTARG;;
+ r) rmtu=$OPTARG;;
+ *) usage;;
+ esac
+done
+
+if ! ip -net nsr1 link set veth0 mtu $omtu; then
+ exit 1
+fi
+
+ip -net ns1 link set eth0 mtu $omtu
+
+if ! ip -net nsr2 link set veth1 mtu $rmtu; then
+ exit 1
+fi
-ip -net nsr2 link set veth1 mtu 2000
-ip -net ns2 link set eth0 mtu 2000
+ip -net ns2 link set eth0 mtu $rmtu
# transfer-net between nsr1 and nsr2.
# these addresses are not used for connections.
ip -net ns$i route add default via 10.0.$i.1
ip -net ns$i addr add dead:$i::99/64 dev eth0
ip -net ns$i route add default via dead:$i::1
- ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null
+ if ! ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then
+ echo "ERROR: Check Originator/Responder values (problem during address addition)"
+ exit 1
+ fi
# don't set ip DF bit for first two tests
ip netns exec ns$i sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
# as PMTUd is off.
# This rule is deleted for the last test, when we expect PMTUd
# to kick in and ensure all packets meet mtu requirements.
- meta length gt 1500 accept comment something-to-grep-for
+ meta length gt $lmtu accept comment something-to-grep-for
# next line blocks connection w.o. working offload.
# we only do this for reverse dir, because we expect packets to
fi
# test basic connectivity
-ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null
-if [ $? -ne 0 ];then
+if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
echo "ERROR: ns1 cannot reach ns2" 1>&2
bash
exit 1
fi
-ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null
-if [ $? -ne 0 ];then
+if ! ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
echo "ERROR: ns2 cannot reach ns1" 1>&2
exit 1
fi
make_file()
{
name=$1
- who=$2
SIZE=$((RANDOM % (1024 * 8)))
TSIZE=$((SIZE * 1024))
out=$2
what=$3
- cmp "$in" "$out" > /dev/null 2>&1
- if [ $? -ne 0 ] ;then
+ if ! cmp "$in" "$out" > /dev/null 2>&1; then
echo "FAIL: file mismatch for $what" 1>&2
ls -l "$in"
ls -l "$out"
sleep 3
- kill $lpid
- kill $cpid
+ if ps -p $lpid > /dev/null;then
+ kill $lpid
+ fi
+
+ if ps -p $cpid > /dev/null;then
+ kill $cpid
+ fi
+
wait
- check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"
- if [ $? -ne 0 ];then
+ if ! check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"; then
lret=1
fi
- check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"
- if [ $? -ne 0 ];then
+ if ! check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"; then
lret=1
fi
return $lret
}
-make_file "$ns1in" "ns1"
-make_file "$ns2in" "ns2"
+make_file "$ns1in"
+make_file "$ns2in"
# First test:
# No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
-test_tcp_forwarding ns1 ns2
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding ns1 ns2; then
echo "PASS: flow offloaded for ns1/ns2"
else
echo "FAIL: flow offload for ns1/ns2:" 1>&2
}
EOF
-test_tcp_forwarding_nat ns1 ns2
-
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding_nat ns1 ns2; then
echo "PASS: flow offloaded for ns1/ns2 with NAT"
else
echo "FAIL: flow offload for ns1/ns2 with NAT" 1>&2
# Same as second test, but with PMTU discovery enabled.
handle=$(ip netns exec nsr1 nft -a list table inet filter | grep something-to-grep-for | cut -d \# -f 2)
-ip netns exec nsr1 nft delete rule inet filter forward $handle
-if [ $? -ne 0 ] ;then
+if ! ip netns exec nsr1 nft delete rule inet filter forward $handle; then
echo "FAIL: Could not delete large-packet accept rule"
exit 1
fi
ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-test_tcp_forwarding_nat ns1 ns2
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding_nat ns1 ns2; then
echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
else
echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2
ip -net ns2 route add default via 10.0.2.1
ip -net ns2 route add default via dead:2::1
-test_tcp_forwarding ns1 ns2
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding ns1 ns2; then
echo "PASS: ipsec tunnel mode for ns1/ns2"
else
echo "FAIL: ipsec tunnel mode for ns1/ns2"
hugetlb_vs_thp_test
subpage_prot
tempfile
+prot_sao
segv_errors
wild_bctr
large_vm_fork_separation
noarg:
$(MAKE) -C ../
-TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \
+TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
large_vm_fork_separation bad_accesses pkey_exec_prot \
pkey_siginfo stack_expansion_signal stack_expansion_ldst
$(TEST_GEN_PROGS): ../harness.c ../utils.c
+$(OUTPUT)/prot_sao: ../utils.c
+
$(OUTPUT)/wild_bctr: CFLAGS += -m64
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
$(OUTPUT)/bad_accesses: CFLAGS += -m64
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016, Michael Ellerman, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+
+#include <asm/cputable.h>
+
+#include "utils.h"
+
+#define SIZE (64 * 1024)
+
+int test_prot_sao(void)
+{
+ char *p;
+
+ /* SAO was introduced in 2.06 and removed in 3.1 */
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
+ have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /*
+ * Ensure we can ask for PROT_SAO.
+ * We can't really verify that it does the right thing, but at least we
+ * confirm the kernel will accept it.
+ */
+ p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ FAIL_IF(p == MAP_FAILED);
+
+ /* Write to the mapping, to at least cause a fault */
+ memset(p, 0xaa, SIZE);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_prot_sao, "prot-sao");
+}
return 0;
}
+/*
+ * Debuggers expect ptrace() to be able to peek at the vsyscall page.
+ * Use process_vm_readv() as a proxy for ptrace() to test this. We
+ * want it to work in the vsyscall=emulate case and to fail in the
+ * vsyscall=xonly case.
+ *
+ * It's worth noting that this ABI is a bit nutty. write(2) can't
+ * read from the vsyscall page on any kernel version or mode. The
+ * fact that ptrace() ever worked was a nice courtesy of old kernels,
+ * but the code to support it is fairly gross.
+ */
static int test_process_vm_readv(void)
{
#ifdef __x86_64__
remote.iov_len = 4096;
ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0);
if (ret != 4096) {
- printf("[OK]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno);
- return 0;
+ /*
+ * We expect process_vm_readv() to work if and only if the
+ * vsyscall page is readable.
+ */
+ printf("[%s]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", vsyscall_map_r ? "FAIL" : "OK", ret, errno);
+ return vsyscall_map_r ? 1 : 0;
}
if (vsyscall_map_r) {
printf("[FAIL]\tIt worked but returned incorrect data\n");
return 1;
}
+ } else {
+ printf("[FAIL]\tprocess_rm_readv() succeeded, but it should have failed in this configuration\n");
+ return 1;
}
#endif
--- /dev/null
+testusb-y += testusb.o
+ffs-test-y += ffs-test.o
# SPDX-License-Identifier: GPL-2.0
# Makefile for USB tools
+include ../scripts/Makefile.include
-PTHREAD_LIBS = -lpthread
-WARNINGS = -Wall -Wextra
-CFLAGS = $(WARNINGS) -g -I../include
-LDFLAGS = $(PTHREAD_LIBS)
+bindir ?= /usr/bin
-all: testusb ffs-test
-%: %.c
- $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+override CFLAGS += -O2 -Wall -Wextra -g -D_GNU_SOURCE -I$(OUTPUT)include -I$(srctree)/tools/include
+override LDFLAGS += -lpthread
+
+ALL_TARGETS := testusb ffs-test
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+
+all: $(ALL_PROGRAMS)
+
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+TESTUSB_IN := $(OUTPUT)testusb-in.o
+$(TESTUSB_IN): FORCE
+ $(Q)$(MAKE) $(build)=testusb
+$(OUTPUT)testusb: $(TESTUSB_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
+
+FFS_TEST_IN := $(OUTPUT)ffs-test-in.o
+$(FFS_TEST_IN): FORCE
+ $(Q)$(MAKE) $(build)=ffs-test
+$(OUTPUT)ffs-test: $(FFS_TEST_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
clean:
- $(RM) testusb ffs-test
+ rm -f $(ALL_PROGRAMS)
+ find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete
+
+install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(bindir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program $(DESTDIR)$(bindir); \
+ done
+
+FORCE:
+
+.PHONY: all install clean FORCE prepare
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
- need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
+ need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
+ range->flags);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)