Aaron Durbin <adurbin@google.com>
Abel Vesa <abelvesa@kernel.org> <abel.vesa@nxp.com>
Abel Vesa <abelvesa@kernel.org> <abelvesa@gmail.com>
+Abhijeet Dharmapurikar <quic_adharmap@quicinc.com> <adharmap@codeaurora.org>
Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org>
+Ahmad Masri <quic_amasri@quicinc.com> <amasri@codeaurora.org>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com>
+Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org>
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
Alex Shi <alexs@kernel.org> <alex.shi@linux.alibaba.com>
+Aloka Dixit <quic_alokad@quicinc.com> <alokad@codeaurora.org>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
+Amit Blay <quic_ablay@quicinc.com> <ablay@codeaurora.org>
+Amit Nischal <quic_anischal@quicinc.com> <anischal@codeaurora.org>
Andi Kleen <ak@linux.intel.com> <ak@suse.de>
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
Andreas Herrmann <aherrman@de.ibm.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
+Anilkumar Kolli <quic_akolli@quicinc.com> <akolli@codeaurora.org>
+Anirudh Ghayal <quic_aghayal@quicinc.com> <aghayal@codeaurora.org>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Ard Biesheuvel <ardb@kernel.org> <ard.biesheuvel@linaro.org>
Arnaud Patard <arnaud.patard@rtp-net.org>
Arnd Bergmann <arnd@arndb.de>
+Arun Kumar Neelakantam <quic_aneela@quicinc.com> <aneela@codeaurora.org>
+Ashok Raj Nagarajan <quic_arnagara@quicinc.com> <arnagara@codeaurora.org>
+Ashwin Chaugule <quic_ashwinc@quicinc.com> <ashwinc@codeaurora.org>
+Asutosh Das <quic_asutoshd@quicinc.com> <asutoshd@codeaurora.org>
Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
+Avaneesh Kumar Dwivedi <quic_akdwived@quicinc.com> <akdwived@codeaurora.org>
Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com>
+Balakrishna Godavarthi <quic_bgodavar@quicinc.com> <bgodavar@codeaurora.org>
+Banajit Goswami <quic_bgoswami@quicinc.com> <bgoswami@codeaurora.org>
+Baochen Qiang <quic_bqiang@quicinc.com> <bqiang@codeaurora.org>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
Brian King <brking@us.ibm.com>
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com>
+Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org>
+Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
+Chris Lew <quic_clew@quicinc.com> <clew@codeaurora.org>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
David Brownell <david-b@pacbell.net>
+David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
David Woodhouse <dwmw2@shinybook.infradead.org>
+Dedy Lansky <quic_dlansky@quicinc.com> <dlansky@codeaurora.org>
+Deepak Kumar Singh <quic_deesin@quicinc.com> <deesin@codeaurora.org>
Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
+Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
+Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
Filipe Lautert <filipe@icewall.org>
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
Franck Bui-Huu <vagabon.xyz@gmail.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com>
Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com>
+Gokul Sriram Palanisamy <quic_gokulsri@quicinc.com> <gokulsri@codeaurora.org>
+Govindaraj Saminathan <quic_gsamin@quicinc.com> <gsamin@codeaurora.org>
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
+Guru Das Srinagesh <quic_gurus@quicinc.com> <gurus@codeaurora.org>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu>
Jacob Shin <Jacob.Shin@amd.com>
+Jack Pham <quic_jackp@quicinc.com> <jackp@codeaurora.org>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
Jean Tourrilhes <jt@hpl.hp.com>
+Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
+Jeffrey Hugo <quic_jhugo@quicinc.com> <jhugo@codeaurora.org>
Jens Axboe <axboe@kernel.dk> <axboe@suse.de>
Jens Axboe <axboe@kernel.dk> <jens.axboe@oracle.com>
Jens Axboe <axboe@kernel.dk> <axboe@fb.com>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
+Jilai Wang <quic_jilaiw@quicinc.com> <jilaiw@codeaurora.org>
Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com>
Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com>
Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com>
Jiri Slaby <jirislaby@kernel.org> <xslaby@fi.muni.cz>
Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com>
Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
+Jishnu Prakash <quic_jprakash@quicinc.com> <jprakash@codeaurora.org>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
<josh@joshtriplett.org> <josht@vnet.ibm.com>
Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@redhat.com>
Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@us.ibm.com>
+Jouni Malinen <quic_jouni@quicinc.com> <jouni@codeaurora.org>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Iskren Chernev <me@iskren.info> <iskren.chernev@gmail.com>
Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
+Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
+Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
+Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org>
+Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org>
Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
+Kshitiz Godara <quic_kgodara@quicinc.com> <kgodara@codeaurora.org>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
Lee Jones <lee@kernel.org> <joneslee@google.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
+Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
<linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
+Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
+Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org>
+Maheshwar Ajja <quic_majja@quicinc.com> <majja@codeaurora.org>
+Malathi Gottam <quic_mgottam@quicinc.com> <mgottam@codeaurora.org>
+Manikanta Pubbisetty <quic_mpubbise@quicinc.com> <mpubbise@codeaurora.org>
Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
+Manoj Basapathi <quic_manojbm@quicinc.com> <manojbm@codeaurora.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Marek Behún <kabel@kernel.org> <marek.behun@nic.cz>
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
+Maulik Shah <quic_mkshah@quicinc.com> <mkshah@codeaurora.org>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org>
Maxime Ripard <mripard@kernel.org> <maxime@cerno.tech>
Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
+Maya Erez <quic_merez@quicinc.com> <merez@codeaurora.org>
Mayuresh Janorkar <mayur@ti.com>
+Md Sadre Alam <quic_mdalam@quicinc.com> <mdalam@codeaurora.org>
+Miaoqing Pan <quic_miaoqing@quicinc.com> <miaoqing@codeaurora.org>
Michael Buesch <m@bues.ch>
Michal Simek <michal.simek@amd.com> <michal.simek@xilinx.com>
Michel Dänzer <michel@tungstengraphics.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
+Mike Tipton <quic_mdtipton@quicinc.com> <mdtipton@codeaurora.org>
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
Mitesh shah <mshah@teja.com>
Morten Welinder <welinder@anemone.rentec.com>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
+Mukesh Ojha <quic_mojha@quicinc.com> <mojha@codeaurora.org>
+Muna Sinada <quic_msinada@quicinc.com> <msinada@codeaurora.org>
+Murali Nalajala <quic_mnalajal@quicinc.com> <mnalajal@codeaurora.org>
Mythri P K <mythripk@ti.com>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
+Neeraj Upadhyay <quic_neeraju@quicinc.com> <neeraju@codeaurora.org>
Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@cumulusnetworks.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@nvidia.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@isovalent.com>
+Odelu Kukatla <quic_okukatla@quicinc.com> <okukatla@codeaurora.org>
Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
+Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Patrick Mochel <mochel@digitalimplant.org>
Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com>
Paul Mackerras <paulus@ozlabs.org> <paulus@samba.org>
Paul Mackerras <paulus@ozlabs.org> <paulus@au1.ibm.com>
+Pavankumar Kondeti <quic_pkondeti@quicinc.com> <pkondeti@codeaurora.org>
Peter A Jonsson <pj@ludd.ltu.se>
Peter Oruba <peter.oruba@amd.com>
Peter Oruba <peter@oruba.de>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
+Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org>
+Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
+Rajeshwari Ravindra Kamble <quic_rkambl@quicinc.com> <rkambl@codeaurora.org>
+Raju P.L.S.S.S.N <quic_rplsssn@quicinc.com> <rplsssn@codeaurora.org>
Rajesh Shah <rajesh.shah@intel.com>
+Rakesh Pillai <quic_pillair@quicinc.com> <pillair@codeaurora.org>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
+Ram Chandra Jangir <quic_rjangir@quicinc.com> <rjangir@codeaurora.org>
Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
+Ravi Kumar Bokka <quic_rbokka@quicinc.com> <rbokka@codeaurora.org>
+Ravi Kumar Siddojigari <quic_rsiddoji@quicinc.com> <rsiddoji@codeaurora.org>
Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
+Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
Santosh Shilimkar <ssantosh@kernel.org>
Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
+Sahitya Tummala <quic_stummala@quicinc.com> <stummala@codeaurora.org>
+Sathishkumar Muruganandam <quic_murugana@quicinc.com> <murugana@codeaurora.org>
Satya Priya <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
S.Çağlar Onur <caglar@pardus.org.tr>
+Sayali Lokhande <quic_sayalil@quicinc.com> <sayalil@codeaurora.org>
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
+Sean Tranchetti <quic_stranche@quicinc.com> <stranche@codeaurora.org>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
+Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
+Sharath Chandra Vurukala <quic_sharathv@quicinc.com> <sharathv@codeaurora.org>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
+Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
+Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
Simon Kelley <simon@thekelleys.org.uk>
+Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
+Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
+Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@osdl.org>
Stephen Hemminger <stephen@networkplumber.org> <sthemmin@vyatta.com>
Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
-Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com> <subashab@codeaurora.org>
+Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org>
Subhash Jadavani <subhashj@codeaurora.org>
+Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Sumit Semwal <sumit.semwal@ti.com>
+Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
+Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>
Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu>
Thomas Pedersen <twp@codeaurora.org>
Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
+Tingwei Zhang <quic_tingwei@quicinc.com> <tingwei@codeaurora.org>
+Tirupathi Reddy <quic_tirupath@quicinc.com> <tirupath@codeaurora.org>
Tobias Klauser <tklauser@distanz.ch> <tobias.klauser@gmail.com>
Tobias Klauser <tklauser@distanz.ch> <klto@zhaw.ch>
Tobias Klauser <tklauser@distanz.ch> <tklauser@nuerscht.ch>
Tobias Klauser <tklauser@distanz.ch> <tklauser@xenon.tklauser.home>
Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
Tony Luck <tony.luck@intel.com>
+Trilok Soni <quic_tsoni@quicinc.com> <tsoni@codeaurora.org>
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Vara Reddy <quic_varar@quicinc.com> <varar@codeaurora.org>
+Varadarajan Narayanan <quic_varada@quicinc.com> <varada@codeaurora.org>
+Vasanthakumar Thiagarajan <quic_vthiagar@quicinc.com> <vthiagar@codeaurora.org>
Vasily Averin <vasily.averin@linux.dev> <vvs@virtuozzo.com>
Vasily Averin <vasily.averin@linux.dev> <vvs@openvz.org>
Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com>
Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru>
Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com>
+Veera Sundaram Sankaran <quic_veeras@quicinc.com> <veeras@codeaurora.org>
+Veerabhadrarao Badiganti <quic_vbadigan@quicinc.com> <vbadigan@codeaurora.org>
+Venkateswara Naralasetty <quic_vnaralas@quicinc.com> <vnaralas@codeaurora.org>
Vikash Garodia <quic_vgarodia@quicinc.com> <vgarodia@codeaurora.org>
Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
+Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org>
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
+Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
+Wesley Cheng <quic_wcheng@quicinc.com> <wcheng@codeaurora.org>
Will Deacon <will@kernel.org> <will.deacon@arm.com>
Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
C staging driver module
E unsigned module
== =====================
+
+What: /sys/module/grant_table/parameters/free_per_iteration
+Date: July 2023
+KernelVersion: 6.5 but backported to all supported stable branches
+Contact: Xen developer discussion <xen-devel@lists.xenproject.org>
+Description: Read and write number of grant entries to attempt to free per iteration.
+
+ Note: Future versions of Xen and Linux may provide a better
+ interface for controlling the rate of deferred grant reclaim
+ or may not need it at all.
+Users: Qubes OS (https://www.qubes-os.org)
45 = /dev/ttyMM1 Marvell MPSC - port 1 (obsolete unused)
46 = /dev/ttyCPM0 PPC CPM (SCC or SMC) - port 0
...
- 47 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 5
+ 49 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 3
50 = /dev/ttyIOC0 Altix serial card
...
81 = /dev/ttyIOC31 Altix serial card
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
- Spectre v2 variant attacks, including cross-thread branch target injections
- on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
+ Spectre v2 variant attacks.
- Legacy IBRS systems clear the IBRS bit on exit to userspace and
- therefore explicitly enable STIBP for that
+ On Intel's enhanced IBRS systems, this includes cross-thread branch target
+ injections on SMT systems (STIBP). In other words, Intel eIBRS enables
+ STIBP, too.
+
+ AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
+ the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.
The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator
| ARM | MMU-700 | #2268618,2812531| N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | GIC-700 | #2941627 | ARM64_ERRATUM_2941627 |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 |
+----------------+-----------------+-----------------+-----------------------------+
| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_843419 |
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,
whichever is the lower.
-noswap Disables swap. Remounts must respect the original settings.
- By default swap is enabled.
========= ============================================================
These parameters accept a suffix k, m or g for kilo, mega and giga and
use up all the memory on the machine; but enhances the scalability of
that instance in a system with many CPUs making intensive use of it.
+tmpfs blocks may be swapped out, when there is a shortage of memory.
+tmpfs has a mount option to disable its use of swap:
+
+====== ===========================================================
+noswap Disables swap. Remounts must respect the original settings.
+ By default swap is enabled.
+====== ===========================================================
+
tmpfs also supports Transparent Huge Pages which requires a kernel
configured with CONFIG_TRANSPARENT_HUGEPAGE and with huge supported for
your system (has_transparent_hugepage(), which is architecture specific).
The mount options for this are:
-====== ============================================================
-huge=0 never: disables huge pages for the mount
-huge=1 always: enables huge pages for the mount
-huge=2 within_size: only allocate huge pages if the page will be
- fully within i_size, also respect fadvise()/madvise() hints.
-huge=3 advise: only allocate huge pages if requested with
- fadvise()/madvise()
-====== ============================================================
-
-There is a sysfs file which you can also use to control system wide THP
-configuration for all tmpfs mounts, the file is:
-
-/sys/kernel/mm/transparent_hugepage/shmem_enabled
-
-This sysfs file is placed on top of THP sysfs directory and so is registered
-by THP code. It is however only used to control all tmpfs mounts with one
-single knob. Since it controls all tmpfs mounts it should only be used either
-for emergency or testing purposes. The values you can set for shmem_enabled are:
-
-== ============================================================
--1 deny: disables huge on shm_mnt and all mounts, for
- emergency use
--2 force: enables huge on shm_mnt and all mounts, w/o needing
- option, for testing
-== ============================================================
+================ ==============================================================
+huge=never Do not allocate huge pages. This is the default.
+huge=always Attempt to allocate huge page every time a new page is needed.
+huge=within_size Only allocate huge page if it will be fully within i_size.
+ Also respect madvise(2) hints.
+huge=advise Only allocate huge page if requested with madvise(2).
+================ ==============================================================
+
+See also Documentation/admin-guide/mm/transhuge.rst, which describes the
+sysfs file /sys/kernel/mm/transparent_hugepage/shmem_enabled: which can
+be used to deny huge pages on all tmpfs mounts in an emergency, or to
+force huge pages on all tmpfs mounts for testing.
tmpfs has a mount option to set the NUMA memory allocation policy for
all files in that instance (if CONFIG_NUMA is enabled) - which can be
packets but should only process up to ``budget`` number of
Rx packets. Rx processing is usually much more expensive.
-In other words, it is recommended to ignore the budget argument when
-performing TX buffer reclamation to ensure that the reclamation is not
-arbitrarily bounded; however, it is required to honor the budget argument
-for RX processing.
+In other words for Rx processing the ``budget`` argument limits how many
+packets driver can process in a single poll. Rx specific APIs like page
+pool or XDP cannot be used at all when ``budget`` is 0.
+skb Tx processing should happen regardless of the ``budget``, but if
+the argument is 0 driver cannot call any XDP (or page pool) APIs.
.. warning::
- The ``budget`` argument may be 0 if core tries to only process Tx completions
- and no Rx packets.
+ The ``budget`` argument may be 0 if core tries to only process
+ skb Tx completions and no Rx or XDP packets.
The poll method returns the amount of work done. If the driver still
has outstanding work to do (e.g. ``budget`` was exhausted)
Samsung Javier González <javier.gonz@samsung.com>
Microsoft James Morris <jamorris@linux.microsoft.com>
- VMware
Xen Andrew Cooper <andrew.cooper3@citrix.com>
Canonical John Johansen <john.johansen@canonical.com>
Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
SUSE Jiri Kosina <jkosina@suse.cz>
- Amazon
Google Kees Cook <keescook@chromium.org>
- GCC
LLVM Nick Desaulniers <ndesaulniers@google.com>
============= ========================================================
of the report are treated confidentially even after the embargo has been
lifted, in perpetuity.
-Coordination
-------------
-
-Fixes for sensitive bugs, such as those that might lead to privilege
-escalations, may need to be coordinated with the private
-<linux-distros@vs.openwall.org> mailing list so that distribution vendors
-are well prepared to issue a fixed kernel upon public disclosure of the
-upstream fix. Distros will need some time to test the proposed patch and
-will generally request at least a few days of embargo, and vendor update
-publication prefers to happen Tuesday through Thursday. When appropriate,
-the security team can assist with this coordination, or the reporter can
-include linux-distros from the start. In this case, remember to prefix
-the email Subject line with "[vs]" as described in the linux-distros wiki:
-<http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists>
+Coordination with other groups
+------------------------------
+
+The kernel security team strongly recommends that reporters of potential
+security issues NEVER contact the "linux-distros" mailing list until
+AFTER discussing it with the kernel security team. Do not Cc: both
+lists at once. You may contact the linux-distros mailing list after a
+fix has been agreed on and you fully understand the requirements that
+doing so will impose on you and the kernel community.
+
+The different lists have different goals and the linux-distros rules do
+not contribute to actually fixing any potential security problems.
CVE assignment
--------------
-The security team does not normally assign CVEs, nor do we require them
-for reports or fixes, as this can needlessly complicate the process and
-may delay the bug handling. If a reporter wishes to have a CVE identifier
-assigned ahead of public disclosure, they will need to contact the private
-linux-distros list, described above. When such a CVE identifier is known
-before a patch is provided, it is desirable to mention it in the commit
-message if the reporter agrees.
+The security team does not assign CVEs, nor do we require them for
+reports or fixes, as this can needlessly complicate the process and may
+delay the bug handling. If a reporter wishes to have a CVE identifier
+assigned, they should find one by themselves, for example by contacting
+MITRE directly. However under no circumstances will a patch inclusion
+be delayed to wait for a CVE identifier to arrive.
Non-disclosure agreements
-------------------------
M: Peter Chen <peter.chen@kernel.org>
M: Pawel Laszczak <pawell@cadence.com>
R: Roger Quadros <rogerq@kernel.org>
-R: Aswath Govindraju <a-govindraju@ti.com>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
F: include/linux/compiler_attributes.h
COMPUTE EXPRESS LINK (CXL)
+M: Davidlohr Bueso <dave@stgolabs.net>
+M: Jonathan Cameron <jonathan.cameron@huawei.com>
+M: Dave Jiang <dave.jiang@intel.com>
M: Alison Schofield <alison.schofield@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Ira Weiny <ira.weiny@intel.com>
-M: Ben Widawsky <bwidawsk@kernel.org>
M: Dan Williams <dan.j.williams@intel.com>
L: linux-cxl@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml
F: sound/soc/ti/
+TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS
+M: Shenghao Ding <shenghao-ding@ti.com>
+M: Kevin Lu <kevin-lu@ti.com>
+M: Baojun Xu <x1077012@ti.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/tas2552.txt
+F: Documentation/devicetree/bindings/sound/tas2562.yaml
+F: Documentation/devicetree/bindings/sound/tas2770.yaml
+F: Documentation/devicetree/bindings/sound/tas27xx.yaml
+F: Documentation/devicetree/bindings/sound/ti,pcm1681.txt
+F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml
+F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml
+F: Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
+F: Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+F: Documentation/devicetree/bindings/sound/tpa6130a2.txt
+F: include/sound/tas2*.h
+F: include/sound/tlv320*.h
+F: include/sound/tpa6130a2-plat.h
+F: sound/pci/hda/tas2781_hda_i2c.c
+F: sound/soc/codecs/pcm1681.c
+F: sound/soc/codecs/pcm1789*.*
+F: sound/soc/codecs/pcm179x*.*
+F: sound/soc/codecs/pcm186x*.*
+F: sound/soc/codecs/pcm3008.*
+F: sound/soc/codecs/pcm3060*.*
+F: sound/soc/codecs/pcm3168a*.*
+F: sound/soc/codecs/pcm5102a.c
+F: sound/soc/codecs/pcm512x*.*
+F: sound/soc/codecs/tas2*.*
+F: sound/soc/codecs/tlv320*.*
+F: sound/soc/codecs/tpa6130a2.*
+
TEXAS INSTRUMENTS DMA DRIVERS
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
L: dmaengine@vger.kernel.org
TTY LAYER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Jiri Slaby <jirislaby@kernel.org>
+L: linux-kernel@vger.kernel.org
+L: linux-serial@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
F: Documentation/driver-api/serial/
F: drivers/tty/
+F: drivers/tty/serial/serial_base.h
+F: drivers/tty/serial/serial_base_bus.c
F: drivers/tty/serial/serial_core.c
+F: drivers/tty/serial/serial_ctrl.c
+F: drivers/tty/serial/serial_port.c
F: include/linux/selection.h
F: include/linux/serial.h
F: include/linux/serial_core.h
VERSION = 6
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_FSCACHE=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_PHY_EXYNOS5250_SATA=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=m
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_PWM=y
CONFIG_PWM_LPC32XX=y
CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_MEMORY=y
# CONFIG_ARM_PMU is not set
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_STM32_TIMER_CNT=m
CONFIG_STM32_LPTIMER_CNT=m
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_DNOTIFY is not set
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=y
CONFIG_FANOTIFY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_FSCACHE=y
CONFIG_RTC_DRV_PCF8583=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=m
CONFIG_PHY_S5PV210_USB2=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
if (task == current)
put_cpu_fpsimd_context();
+ task_set_vl(task, type, vl);
+
/*
* Free the changed states if they are not in use, SME will be
* reallocated to the correct size on next use and we just
if (free_sme)
sme_free(task);
- task_set_vl(task, type, vl);
-
out:
update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
flags & PR_SVE_VL_INHERIT);
fpsimd_flush_thread_vl(ARM64_VEC_SME);
current->thread.svcr = 0;
- sme_smstop();
}
current->thread.fp_type = FP_STATE_FPSIMD;
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += $(call cc-option,-mexplicit-relocs)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
+KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
+KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
KBUILD_LDFLAGS += -m $(ld-emul)
-ifdef CONFIG_LOONGARCH
+ifdef need-compiler
CHECKFLAGS += $(shell $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=y
CONFIG_OVERLAY_FS_INDEX=y
static inline void init_lsx_upper(void)
{
- /*
- * Check cpu_has_lsx only if it's a constant. This will allow the
- * compiler to optimise out code for CPUs without LSX without adding
- * an extra redundant check for CPUs with LSX.
- */
- if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx)
- return;
-
- _init_lsx_upper();
+ if (cpu_has_lsx)
+ _init_lsx_upper();
}
static inline void restore_lsx_upper(struct task_struct *t)
static inline int thread_lsx_context_live(void)
{
- if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx)
+ if (!cpu_has_lsx)
return 0;
return test_thread_flag(TIF_LSX_CTX_LIVE);
static inline int thread_lasx_context_live(void)
{
- if (__builtin_constant_p(cpu_has_lasx) && !cpu_has_lasx)
+ if (!cpu_has_lasx)
return 0;
return test_thread_flag(TIF_LASX_CTX_LIVE);
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
+ goto out;
}
#endif
+ /*
+ * Append built-in command line to the bootloader command line if
+ * CONFIG_CMDLINE_EXTEND is enabled.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) {
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ }
+
+ /*
+ * Use built-in command line if the bootloader command line is empty.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0])
+ strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+
out:
*cmdline_p = boot_command_line;
}
addi.d a3, a2, -8
bgeu a0, a3, .Llt8
15: st.d zero, a0, 0
+ addi.d a0, a0, 8
.Llt8:
16: st.d zero, a2, -8
_asm_extable 13b, .L_fixup_handle_0
_asm_extable 14b, .L_fixup_handle_1
_asm_extable 15b, .L_fixup_handle_0
- _asm_extable 16b, .L_fixup_handle_1
+ _asm_extable 16b, .L_fixup_handle_0
_asm_extable 17b, .L_fixup_handle_s0
_asm_extable 18b, .L_fixup_handle_s0
_asm_extable 19b, .L_fixup_handle_s0
bgeu a1, a4, .Llt8
30: ld.d t0, a1, 0
31: st.d t0, a0, 0
+ addi.d a0, a0, 8
.Llt8:
32: ld.d t0, a3, -8
_asm_extable 30b, .L_fixup_handle_0
_asm_extable 31b, .L_fixup_handle_0
_asm_extable 32b, .L_fixup_handle_0
- _asm_extable 33b, .L_fixup_handle_1
+ _asm_extable 33b, .L_fixup_handle_0
_asm_extable 34b, .L_fixup_handle_s0
_asm_extable 35b, .L_fixup_handle_s0
_asm_extable 36b, .L_fixup_handle_s0
* no need to call lu32id to do a new filled operation.
*/
imm_51_31 = (imm >> 31) & 0x1fffff;
- if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) {
+ if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
/* lu32id rd, imm_51_32 */
imm_51_32 = (imm >> 32) & 0xfffff;
emit_insn(ctx, lu32id, rd, imm_51_32);
dbf %d0,morein
rts
- .section .fixup,#alloc,#execinstr
+ .section .fixup,"ax"
.even
1:
jbsr fpsp040_die
jbra .Lnotkern
- .section __ex_table,#alloc
+ .section __ex_table,"a"
.align 4
.long in_ea,1b
| Execption handling for movs access to illegal memory
- .section .fixup,#alloc,#execinstr
+ .section .fixup,"ax"
.even
1: moveq #-1,%d1
rts
-.section __ex_table,#alloc
+.section __ex_table,"a"
.align 4
.long dmrbuae,1b
.long dmrwuae,1b
lea %pc@(.Lcopy),%a4
2: addl #0x00000000,%a4 /* virt_to_phys() */
- .section ".m68k_fixup","aw"
+ .section .m68k_fixup,"aw"
.long M68K_FIXUP_MEMOFFSET, 2b+2
.previous
lea %pc@(.Lcont040),%a4
5: addl #0x00000000,%a4 /* virt_to_phys() */
- .section ".m68k_fixup","aw"
+ .section .m68k_fixup,"aw"
.long M68K_FIXUP_MEMOFFSET, 5b+2
.previous
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_REISERFS_FS=m
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_QUOTA=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_BTRFS_FS=m
CONFIG_QUOTA=y
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=m
CONFIG_XFS_POSIX_ACL=y
CONFIG_QUOTA=y
# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_VIRTIO_FS=m
CONFIG_FSCACHE=m
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_QUOTA=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_FSCACHE=m
CONFIG_ISO9660_FS=m
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_VFAT_FS=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_ISO9660_FS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_REISERFS_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_SOUND=m
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_UIO=m
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_USB_G_SERIAL=y
CONFIG_UIO=y
CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=y
CONFIG_MSDOS_FS=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FANOTIFY=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
CONFIG_FS_DAX=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_GFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_EXT4_FS=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_UDF_FS=m
CONFIG_EXT4_FS_SECURITY=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_OVERLAY_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_QUOTA_DEBUG=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_EXT4_FS=y
CONFIG_XFS_FS=y
CONFIG_BTRFS_FS=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_FSCACHE=m
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_PROC_KCORE=y
CONFIG_ROMFS_FS=m
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=y
CONFIG_QUOTA=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_PROC_KCORE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=y
CONFIG_QUOTA=y
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_PROC_KCORE=y
* Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
-#include <linux/minmax.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
static int write_sigio_thread(void *unused)
{
- struct pollfds *fds;
+ struct pollfds *fds, tmp;
struct pollfd *p;
int i, n, respond_fd;
char c;
"write_sigio_thread : "
"read on socket failed, "
"err = %d\n", errno);
- swap(current_poll, next_poll);
+ tmp = current_poll;
+ current_poll = next_poll;
+ next_poll = tmp;
respond_fd = sigio_private[1];
}
else {
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
*/
.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork_asm)
- UNWIND_HINT_REGS
+ /*
+ * This is the start of the kernel stack; even through there's a
+ * register set at the top, the regset isn't necessarily coherent
+ * (consider kthreads) and one cannot unwind further.
+ *
+ * This ensures stack unwinds of kernel threads terminate in a known
+ * good state.
+ */
+ UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR // copy_thread
CALL_DEPTH_ACCOUNT
movq %r12, %rcx /* fn_arg */
call ret_from_fork
+ /*
+ * Set the stack state to what is expected for the target function
+ * -- at this point the register set should be a valid user set
+ * and unwind should work normally.
+ */
+ UNWIND_HINT_REGS
jmp swapgs_restore_regs_and_return_to_usermode
SYM_CODE_END(ret_from_fork_asm)
.popsection
KVM_X86_OP(get_cpl)
KVM_X86_OP(set_segment)
KVM_X86_OP(get_cs_db_l_bits)
+KVM_X86_OP(is_valid_cr0)
KVM_X86_OP(set_cr0)
KVM_X86_OP_OPTIONAL(post_set_cr3)
KVM_X86_OP(is_valid_cr4)
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
+ bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
- bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
+ bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
#include <asm/cpu.h>
#include <linux/earlycpio.h>
#include <linux/initrd.h>
+#include <asm/microcode_amd.h>
struct ucode_patch {
struct list_head plist;
extern void load_ucode_amd_ap(unsigned int family);
extern int __init save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
+extern void amd_check_microcode(void);
#else
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
static inline void load_ucode_amd_ap(unsigned int family) {}
static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) {}
+static inline void amd_check_microcode(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_AMD_H */
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#include "cpu.h"
-static const int amd_erratum_383[];
-static const int amd_erratum_400[];
-static const int amd_erratum_1054[];
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
-
/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
*/
static u32 nodes_per_socket = 1;
+/*
+ * AMD errata checking
+ *
+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
+ * have an OSVW id assigned, which it takes as first argument. Both take a
+ * variable number of family-specific model-stepping ranges created by
+ * AMD_MODEL_RANGE().
+ *
+ * Example:
+ *
+ * const int amd_erratum_319[] =
+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
+ */
+
+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
+
+static const int amd_erratum_400[] =
+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
+
+static const int amd_erratum_383[] =
+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+
+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
+static const int amd_erratum_1054[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+
+static const int amd_zenbleed[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+{
+ int osvw_id = *erratum++;
+ u32 range;
+ u32 ms;
+
+ if (osvw_id >= 0 && osvw_id < 65536 &&
+ cpu_has(cpu, X86_FEATURE_OSVW)) {
+ u64 osvw_len;
+
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
+ if (osvw_id < osvw_len) {
+ u64 osvw_bits;
+
+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
+ osvw_bits);
+ return osvw_bits & (1ULL << (osvw_id & 0x3f));
+ }
+ }
+
+ /* OSVW unavailable or ID unknown, match family-model-stepping range */
+ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
+ while ((range = *erratum++))
+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
+ (ms >= AMD_MODEL_RANGE_START(range)) &&
+ (ms <= AMD_MODEL_RANGE_END(range)))
+ return true;
+
+ return false;
+}
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
}
}
+static bool cpu_has_zenbleed_microcode(void)
+{
+ u32 good_rev = 0;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
+ case 0x60 ... 0x67: good_rev = 0x0860010b; break;
+ case 0x68 ... 0x6f: good_rev = 0x08608105; break;
+ case 0x70 ... 0x7f: good_rev = 0x08701032; break;
+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
+
+ default:
+ return false;
+ break;
+ }
+
+ if (boot_cpu_data.microcode < good_rev)
+ return false;
+
+ return true;
+}
+
+static void zenbleed_check(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has_amd_erratum(c, amd_zenbleed))
+ return;
+
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return;
+
+ if (!cpu_has(c, X86_FEATURE_AVX))
+ return;
+
+ if (!cpu_has_zenbleed_microcode()) {
+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
+ } else {
+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
+ }
+}
+
static void init_amd(struct cpuinfo_x86 *c)
{
early_init_amd(c);
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
cpu_has(c, X86_FEATURE_AUTOIBRS))
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
+
+ zenbleed_check(c);
}
#ifdef CONFIG_X86_32
cpu_dev_register(amd_cpu_dev);
-/*
- * AMD errata checking
- *
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
- * have an OSVW id assigned, which it takes as first argument. Both take a
- * variable number of family-specific model-stepping ranges created by
- * AMD_MODEL_RANGE().
- *
- * Example:
- *
- * const int amd_erratum_319[] =
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
- */
-
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
-
-static const int amd_erratum_400[] =
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
-
-static const int amd_erratum_383[] =
- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-
-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
-static const int amd_erratum_1054[] =
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
-
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
-{
- int osvw_id = *erratum++;
- u32 range;
- u32 ms;
-
- if (osvw_id >= 0 && osvw_id < 65536 &&
- cpu_has(cpu, X86_FEATURE_OSVW)) {
- u64 osvw_len;
-
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
- if (osvw_id < osvw_len) {
- u64 osvw_bits;
-
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
- osvw_bits);
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
- }
- }
-
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_stepping;
- while ((range = *erratum++))
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
- (ms >= AMD_MODEL_RANGE_START(range)) &&
- (ms <= AMD_MODEL_RANGE_END(range)))
- return true;
-
- return false;
-}
-
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
static unsigned int amd_msr_dr_addr_masks[] = {
return 255;
}
EXPORT_SYMBOL_GPL(amd_get_highest_perf);
+
+static void zenbleed_check_cpu(void *unused)
+{
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+
+ zenbleed_check(c);
+}
+
+void amd_check_microcode(void)
+{
+ on_each_cpu(zenbleed_check_cpu, NULL, 1);
+}
}
/*
- * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
+ * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
* is not required.
*
- * Enhanced IBRS also protects against cross-thread branch target
+ * Intel's Enhanced IBRS also protects against cross-thread branch target
* injection in user-mode as the IBRS bit remains always set which
* implicitly enables cross-thread protections. However, in legacy IBRS
* mode, the IBRS bit is set only on kernel entry and cleared on return
- * to userspace. This disables the implicit cross-thread protection,
- * so allow for STIBP to be selected in that case.
+ * to userspace. AMD Automatic IBRS also does not protect userspace.
+ * These modes therefore disable the implicit cross-thread protection,
+ * so allow for STIBP to be selected in those cases.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
- spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+ (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
return;
/*
static char *stibp_state(void)
{
- if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS))
return "";
switch (spectre_v2_user_stibp) {
perf_check_microcode();
+ amd_check_microcode();
+
store_cpu_caps(&curr_info);
if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
- kobject_del(b->kobj);
+ kobject_put(b->kobj);
list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
- kobject_del(&pos->kobj);
+ kobject_put(b->kobj);
}
static void threshold_remove_bank(struct threshold_bank *bank)
}
static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
- unsigned long error_code, const char *str)
+ unsigned long error_code, const char *str,
+ unsigned long address)
{
- if (fixup_exception(regs, trapnr, error_code, 0))
+ if (fixup_exception(regs, trapnr, error_code, address))
return true;
current->thread.error_code = error_code;
goto exit;
}
- if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc))
+ if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
goto exit;
if (error_code)
#define VE_FAULT_STR "VE fault"
-static void ve_raise_fault(struct pt_regs *regs, long error_code)
+static void ve_raise_fault(struct pt_regs *regs, long error_code,
+ unsigned long address)
{
if (user_mode(regs)) {
gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
return;
}
- if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, VE_FAULT_STR))
+ if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
+ VE_FAULT_STR, address)) {
return;
+ }
- die_addr(VE_FAULT_STR, regs, error_code, 0);
+ die_addr(VE_FAULT_STR, regs, error_code, address);
}
/*
* it successfully, treat it as #GP(0) and handle it.
*/
if (!tdx_handle_virt_exception(regs, &ve))
- ve_raise_fault(regs, 0);
+ ve_raise_fault(regs, 0, ve.gla);
cond_local_irq_disable(regs);
}
*max_irr = -1;
for (i = vec = 0; i <= 7; i++, vec += 32) {
+ u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
+
+ irr_val = *p_irr;
pir_val = READ_ONCE(pir[i]);
- irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
+
if (pir_val) {
+ pir_val = xchg(&pir[i], 0);
+
prev_irr_val = irr_val;
- irr_val |= xchg(&pir[i], 0);
- *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
- if (prev_irr_val != irr_val) {
- max_updated_irr =
- __fls(irr_val ^ prev_irr_val) + vec;
- }
+ do {
+ irr_val = prev_irr_val | pir_val;
+ } while (prev_irr_val != irr_val &&
+ !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
+
+ if (prev_irr_val != irr_val)
+ max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
}
if (irr_val)
*max_irr = __fls(irr_val) + vec;
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
- return __kvm_apic_update_irr(pir, apic->regs, max_irr);
+ if (unlikely(!apic->apicv_active && irr_updated))
+ apic->irr_pending = true;
+ return irr_updated;
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
}
}
+static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+ return true;
+}
+
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
- struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
-
- /*
- * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
- * can't read guest memory (dereference memslots) to decode the WRMSR.
- */
- if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
- nrips && control->next_rip)
+ if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+ to_svm(vcpu)->vmcb->control.exit_info_1)
return handle_fastpath_set_msr_irqoff(vcpu);
return EXIT_FASTPATH_NONE;
.set_segment = svm_set_segment,
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = svm_get_cs_db_l_bits,
+ .is_valid_cr0 = svm_is_valid_cr0,
.set_cr0 = svm_set_cr0,
.post_set_cr3 = sev_post_set_cr3,
.is_valid_cr4 = svm_is_valid_cr4,
VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx
SYM_FUNC_END(vmx_do_nmi_irqoff)
-
-.section .text, "ax"
-
#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
/**
* vmread_error_trampoline - Trampoline from inline asm to vmread_error()
* @field: VMCS field encoding that failed
mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2
mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1
- call vmread_error
+ call vmread_error_trampoline2
/* Zero out @fault, which will be popped into the result register. */
_ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
SYM_FUNC_END(vmread_error_trampoline)
#endif
+.section .text, "ax"
+
SYM_FUNC_START(vmx_do_interrupt_irqoff)
VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
SYM_FUNC_END(vmx_do_interrupt_irqoff)
pr_warn_ratelimited(fmt); \
} while (0)
-void vmread_error(unsigned long field, bool fault)
+noinline void vmread_error(unsigned long field)
{
- if (fault)
+ vmx_insn_failed("vmread failed: field=%lx\n", field);
+}
+
+#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
+{
+ if (fault) {
kvm_spurious_fault();
- else
- vmx_insn_failed("vmread failed: field=%lx\n", field);
+ } else {
+ instrumentation_begin();
+ vmread_error(field);
+ instrumentation_end();
+ }
}
+#endif
noinline void vmwrite_error(unsigned long field, unsigned long value)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long old_rflags;
+ /*
+ * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
+ * is an unrestricted guest in order to mark L2 as needing emulation
+ * if L1 runs L2 as a restricted guest.
+ */
if (is_unrestricted_guest(vcpu)) {
kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
vmx->rflags = rflags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
+ /*
+ * KVM should never use VM86 to virtualize Real Mode when L2 is active,
+ * as using VM86 is unnecessary if unrestricted guest is enabled, and
+ * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
+ * should VM-Fail and KVM should reject userspace attempts to stuff
+ * CR0.PG=0 when L2 is active.
+ */
+ WARN_ON_ONCE(is_guest_mode(vcpu));
+
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING)
+static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+ if (is_guest_mode(vcpu))
+ return nested_guest_cr0_valid(vcpu, cr0);
+
+ if (to_vmx(vcpu)->nested.vmxon)
+ return nested_host_cr0_valid(vcpu, cr0);
+
+ return true;
+}
+
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
- if (is_unrestricted_guest(vcpu))
+ if (enable_unrestricted_guest)
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
else {
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
}
#endif
- if (enable_ept && !is_unrestricted_guest(vcpu)) {
+ if (enable_ept && !enable_unrestricted_guest) {
/*
* Ensure KVM has an up-to-date snapshot of the guest's CR3. If
* the below code _enables_ CR3 exiting, vmx_cache_reg() will
* this bit, even if host CR4.MCE == 0.
*/
hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
- if (is_unrestricted_guest(vcpu))
+ if (enable_unrestricted_guest)
hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
else if (vmx->rmode.vm86_active)
hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
vcpu->arch.cr4 = cr4;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
- if (!is_unrestricted_guest(vcpu)) {
+ if (!enable_unrestricted_guest) {
if (enable_ept) {
if (!is_paging(vcpu)) {
hw_cr4 &= ~X86_CR4_PAE;
if (kvm_vmx->pid_table)
return 0;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, vmx_get_pid_table_order(kvm));
+ pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ vmx_get_pid_table_order(kvm));
if (!pages)
return -ENOMEM;
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
- if (!nested_guest_cr0_valid(vcpu, val))
- return 1;
-
if (kvm_set_cr0(vcpu, val))
return 1;
vmcs_writel(CR0_READ_SHADOW, orig_val);
return 0;
} else {
- if (to_vmx(vcpu)->nested.vmxon &&
- !nested_host_cr0_valid(vcpu, val))
- return 1;
-
return kvm_set_cr0(vcpu, val);
}
}
.set_segment = vmx_set_segment,
.get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+ .is_valid_cr0 = vmx_is_valid_cr0,
.set_cr0 = vmx_set_cr0,
.is_valid_cr4 = vmx_is_valid_cr4,
.set_cr4 = vmx_set_cr4,
#include "vmcs.h"
#include "../x86.h"
-void vmread_error(unsigned long field, bool fault);
+void vmread_error(unsigned long field);
void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
* void vmread_error_trampoline(unsigned long field, bool fault);
*/
extern unsigned long vmread_error_trampoline;
+
+/*
+ * The second VMREAD error trampoline, called from the assembly trampoline,
+ * exists primarily to enable instrumentation for the VM-Fail path.
+ */
+void vmread_error_trampoline2(unsigned long field, bool fault);
+
#endif
static __always_inline void vmcs_check16(unsigned long field)
do_fail:
instrumentation_begin();
- WARN_ONCE(1, KBUILD_MODNAME ": vmread failed: field=%lx\n", field);
- pr_warn_ratelimited(KBUILD_MODNAME ": vmread failed: field=%lx\n", field);
+ vmread_error(field);
instrumentation_end();
return 0;
}
EXPORT_SYMBOL_GPL(load_pdptrs);
+static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+#ifdef CONFIG_X86_64
+ if (cr0 & 0xffffffff00000000UL)
+ return false;
+#endif
+
+ if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
+ return false;
+
+ if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
+ return false;
+
+ return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0);
+}
+
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
/*
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
- cr0 |= X86_CR0_ET;
-
-#ifdef CONFIG_X86_64
- if (cr0 & 0xffffffff00000000UL)
+ if (!kvm_is_valid_cr0(vcpu, cr0))
return 1;
-#endif
- cr0 &= ~CR0_RESERVED_BITS;
-
- if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
- return 1;
+ cr0 |= X86_CR0_ET;
- if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
- return 1;
+ /* Write to CR0 reserved bits are ignored, even on Intel. */
+ cr0 &= ~CR0_RESERVED_BITS;
#ifdef CONFIG_X86_64
if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
u64 data;
fastpath_t ret = EXIT_FASTPATH_NONE;
+ kvm_vcpu_srcu_read_lock(vcpu);
+
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
data = kvm_read_edx_eax(vcpu);
if (ret != EXIT_FASTPATH_NONE)
trace_kvm_msr_write(msr, data);
+ kvm_vcpu_srcu_read_unlock(vcpu);
+
return ret;
}
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
if (r < 0)
goto out;
if (r) {
- kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
- static_call(kvm_x86_inject_irq)(vcpu, false);
- WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0);
+ int irq = kvm_cpu_get_interrupt(vcpu);
+
+ if (!WARN_ON_ONCE(irq == -1)) {
+ kvm_queue_interrupt(vcpu, irq, false);
+ static_call(kvm_x86_inject_irq)(vcpu, false);
+ WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0);
+ }
}
if (kvm_cpu_has_injectable_intr(vcpu))
static_call(kvm_x86_enable_irq_window)(vcpu);
return false;
}
- return kvm_is_valid_cr4(vcpu, sregs->cr4);
+ return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
+ kvm_is_valid_cr0(vcpu, sregs->cr0);
}
static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
bool kvm_arch_has_irq_bypass(void)
{
- return true;
+ return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
}
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
for (i = 0; i < node->mapping_count; i++, map++) {
struct acpi_iort_node *parent;
- if (!map->id_count)
- continue;
-
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
if (parent != iommu)
if (qc->result_tf.status & ATA_SENSE &&
((ata_is_ncq(qc->tf.protocol) &&
dev->flags & ATA_DFLAG_CDL_ENABLED) ||
- (!(ata_is_ncq(qc->tf.protocol) &&
- ata_id_sense_reporting_enabled(dev->id))))) {
+ (!ata_is_ncq(qc->tf.protocol) &&
+ ata_id_sense_reporting_enabled(dev->id)))) {
/*
* Tell SCSI EH to not overwrite scmd->result even if
* this command is finished with result SAM_STAT_GOOD.
/* dma_request_channel may sleep, so calling from process context */
acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
if (IS_ERR(acdev->dma_chan)) {
- dev_err(acdev->host->dev, "Unable to get dma_chan\n");
+ dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan),
+ "Unable to get dma_chan\n");
acdev->dma_chan = NULL;
goto chan_request_fail;
}
* LOCKING:
* Inherited from caller.
*/
-void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
- int rv = -ENOMEM;
u32 bus_width;
+ int rv;
node = pdev->dev.of_node;
if (node == NULL)
cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
resource_size(res_cs0));
if (!cs0)
- return rv;
+ return -ENOMEM;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
- return rv;
+ return -ENOMEM;
ap = host->ports[0];
ap->private_data = cf_port;
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
WAKE_IRQ_DEDICATED_MANAGED | \
WAKE_IRQ_DEDICATED_REVERSE)
+#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
struct wake_irq {
struct device *dev;
return err;
}
-
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
- *
- * The interrupt starts disabled, and needs to be managed for
- * the device by the bus code or the device driver using
- * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
- * functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
* the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
* to enable dedicated wake-up interrupt after running the runtime suspend
* callback for @dev.
- *
- * The interrupt starts disabled, and needs to be managed for
- * the device by the bus code or the device driver using
- * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
- * functions.
*/
int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
-/**
- * dev_pm_enable_wake_irq - Enable device wake-up interrupt
- * @dev: Device
- *
- * Optionally called from the bus code or the device driver for
- * runtime_resume() to override the PM runtime core managed wake-up
- * interrupt handling to enable the wake-up interrupt.
- *
- * Note that for runtime_suspend()) the wake-up interrupts
- * should be unconditionally enabled unlike for suspend()
- * that is conditional.
- */
-void dev_pm_enable_wake_irq(struct device *dev)
-{
- struct wake_irq *wirq = dev->power.wakeirq;
-
- if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
- enable_irq(wirq->irq);
-}
-EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
-
-/**
- * dev_pm_disable_wake_irq - Disable device wake-up interrupt
- * @dev: Device
- *
- * Optionally called from the bus code or the device driver for
- * runtime_suspend() to override the PM runtime core managed wake-up
- * interrupt handling to disable the wake-up interrupt.
- */
-void dev_pm_disable_wake_irq(struct device *dev)
-{
- struct wake_irq *wirq = dev->power.wakeirq;
-
- if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
- disable_irq_nosync(wirq->irq);
-}
-EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
-
/**
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
* @dev: Device
return;
enable:
- if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
return;
- if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
+ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
+ }
}
/**
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
- !pm_runtime_status_suspended(wirq->dev))
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
- !pm_runtime_status_suspended(wirq->dev))
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
-static int get_lock_owner_info(struct rbd_device *rbd_dev,
- struct ceph_locker **lockers, u32 *num_lockers)
+static bool locker_equal(const struct ceph_locker *lhs,
+ const struct ceph_locker *rhs)
+{
+ return lhs->id.name.type == rhs->id.name.type &&
+ lhs->id.name.num == rhs->id.name.num &&
+ !strcmp(lhs->id.cookie, rhs->id.cookie) &&
+ ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
+}
+
+static void free_locker(struct ceph_locker *locker)
+{
+ if (locker)
+ ceph_free_lockers(locker, 1);
+}
+
+static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct ceph_locker *lockers;
+ u32 num_lockers;
u8 lock_type;
char *lock_tag;
+ u64 handle;
int ret;
- dout("%s rbd_dev %p\n", __func__, rbd_dev);
-
ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
- &lock_type, &lock_tag, lockers, num_lockers);
- if (ret)
- return ret;
+ &lock_type, &lock_tag, &lockers, &num_lockers);
+ if (ret) {
+ rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
+ return ERR_PTR(ret);
+ }
- if (*num_lockers == 0) {
+ if (num_lockers == 0) {
dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
+ lockers = NULL;
goto out;
}
if (strcmp(lock_tag, RBD_LOCK_TAG)) {
rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
lock_tag);
- ret = -EBUSY;
- goto out;
+ goto err_busy;
}
- if (lock_type == CEPH_CLS_LOCK_SHARED) {
- rbd_warn(rbd_dev, "shared lock type detected");
- ret = -EBUSY;
- goto out;
+ if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
+ rbd_warn(rbd_dev, "incompatible lock type detected");
+ goto err_busy;
}
- if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
- strlen(RBD_LOCK_COOKIE_PREFIX))) {
+ WARN_ON(num_lockers != 1);
+ ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
+ &handle);
+ if (ret != 1) {
rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
- (*lockers)[0].id.cookie);
- ret = -EBUSY;
- goto out;
+ lockers[0].id.cookie);
+ goto err_busy;
}
+ if (ceph_addr_is_blank(&lockers[0].info.addr)) {
+ rbd_warn(rbd_dev, "locker has a blank address");
+ goto err_busy;
+ }
+
+ dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
+ __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
+ &lockers[0].info.addr.in_addr,
+ le32_to_cpu(lockers[0].info.addr.nonce), handle);
out:
kfree(lock_tag);
- return ret;
+ return lockers;
+
+err_busy:
+ kfree(lock_tag);
+ ceph_free_lockers(lockers, num_lockers);
+ return ERR_PTR(-EBUSY);
}
static int find_watcher(struct rbd_device *rbd_dev,
static int rbd_try_lock(struct rbd_device *rbd_dev)
{
struct ceph_client *client = rbd_dev->rbd_client->client;
- struct ceph_locker *lockers;
- u32 num_lockers;
+ struct ceph_locker *locker, *refreshed_locker;
int ret;
for (;;) {
+ locker = refreshed_locker = NULL;
+
ret = rbd_lock(rbd_dev);
if (ret != -EBUSY)
- return ret;
+ goto out;
/* determine if the current lock holder is still alive */
- ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
- if (ret)
- return ret;
-
- if (num_lockers == 0)
+ locker = get_lock_owner_info(rbd_dev);
+ if (IS_ERR(locker)) {
+ ret = PTR_ERR(locker);
+ locker = NULL;
+ goto out;
+ }
+ if (!locker)
goto again;
- ret = find_watcher(rbd_dev, lockers);
+ ret = find_watcher(rbd_dev, locker);
if (ret)
goto out; /* request lock or error */
+ refreshed_locker = get_lock_owner_info(rbd_dev);
+ if (IS_ERR(refreshed_locker)) {
+ ret = PTR_ERR(refreshed_locker);
+ refreshed_locker = NULL;
+ goto out;
+ }
+ if (!refreshed_locker ||
+ !locker_equal(locker, refreshed_locker))
+ goto again;
+
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
- ENTITY_NAME(lockers[0].id.name));
+ ENTITY_NAME(locker->id.name));
ret = ceph_monc_blocklist_add(&client->monc,
- &lockers[0].info.addr);
+ &locker->info.addr);
if (ret) {
- rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
- ENTITY_NAME(lockers[0].id.name), ret);
+ rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
+ ENTITY_NAME(locker->id.name), ret);
goto out;
}
ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
- lockers[0].id.cookie,
- &lockers[0].id.name);
- if (ret && ret != -ENOENT)
+ locker->id.cookie, &locker->id.name);
+ if (ret && ret != -ENOENT) {
+ rbd_warn(rbd_dev, "failed to break header lock: %d",
+ ret);
goto out;
+ }
again:
- ceph_free_lockers(lockers, num_lockers);
+ free_locker(refreshed_locker);
+ free_locker(locker);
}
out:
- ceph_free_lockers(lockers, num_lockers);
+ free_locker(refreshed_locker);
+ free_locker(locker);
return ret;
}
if (ublksrv_pid <= 0)
return -EINVAL;
- wait_for_completion_interruptible(&ub->completion);
+ if (wait_for_completion_interruptible(&ub->completion) != 0)
+ return -EINTR;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
* - the device number is freed already, we will not find this
* device via ublk_get_device_from_id()
*/
- wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
-
+ if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
+ return -EINTR;
return 0;
}
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */
- wait_for_completion_interruptible(&ub->completion);
+ if (wait_for_completion_interruptible(&ub->completion))
+ return -EINTR;
+
pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
.of_match_table = of_match_ptr(of_st33zp24_i2c_match),
.acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
},
- .probe_new = st33zp24_i2c_probe,
+ .probe = st33zp24_i2c_probe,
.remove = st33zp24_i2c_remove,
.id_table = st33zp24_i2c_id
};
static struct i2c_driver i2c_atmel_driver = {
.id_table = i2c_atmel_id,
- .probe_new = i2c_atmel_probe,
+ .probe = i2c_atmel_probe,
.remove = i2c_atmel_remove,
.driver = {
.name = I2C_DRIVER_NAME,
static struct i2c_driver tpm_tis_i2c_driver = {
.id_table = tpm_tis_i2c_table,
- .probe_new = tpm_tis_i2c_probe,
+ .probe = tpm_tis_i2c_probe,
.remove = tpm_tis_i2c_remove,
.driver = {
.name = "tpm_i2c_infineon",
static struct i2c_driver i2c_nuvoton_driver = {
.id_table = i2c_nuvoton_id,
- .probe_new = i2c_nuvoton_probe,
+ .probe = i2c_nuvoton_probe,
.remove = i2c_nuvoton_remove,
.driver = {
.name = "tpm_i2c_nuvoton",
goto out;
}
- size += recv_data(chip, &buf[TPM_HEADER_SIZE],
- expected - TPM_HEADER_SIZE);
+ rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (rc < 0) {
+ size = rc;
+ goto out;
+ }
+ size += rc;
if (size < expected) {
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_i2c_match),
},
- .probe_new = tpm_tis_i2c_probe,
+ .probe = tpm_tis_i2c_probe,
.remove = tpm_tis_i2c_remove,
.id_table = tpm_tis_i2c_id,
};
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
static struct i2c_driver cr50_i2c_driver = {
- .probe_new = tpm_cr50_i2c_probe,
+ .probe = tpm_cr50_i2c_probe,
.remove = tpm_cr50_i2c_remove,
.driver = {
.name = "cr50_i2c",
menuconfig CXL_BUS
tristate "CXL (Compute Express Link) Devices Support"
depends on PCI
+ select FW_LOADER
+ select FW_UPLOAD
select PCI_DOE
help
CXL is a bus that is electrically compatible with PCI Express, but
config CXL_MEM
tristate "CXL: Memory Expansion"
depends on CXL_PCI
- select FW_UPLOAD
default CXL_BUS
help
The CXL.mem protocol allows a device to act as a provider of "System
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
- dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
- cxld->hpa_range.start, cxld->hpa_range.end);
- return 0;
+ dev_err(dev, "Failed to add decode range: %pr", res);
+ return rc;
}
dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
dev_name(&cxld->dev),
/* FW state bits */
#define CXL_FW_STATE_BITS 32
-#define CXL_FW_CANCEL BIT(0)
+#define CXL_FW_CANCEL 0
/**
* struct cxl_fw_state - Firmware upload / activation state
return 0;
failed2:
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
-failed1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
+failed1:
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret;
}
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
+#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
- uint32_t tmp_mask;
int i;
- tmp_mask = xcc_mask;
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are
* VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them
*/
if (amdgpu_sriov_vf(adev)) {
- for_each_inst(i, tmp_mask) {
- i = ffs(tmp_mask) - 1;
+ for_each_inst(i, xcc_mask) {
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24);
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
if (!q)
return 0;
- if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) ||
- KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0))
+ if (!kfd_dbg_has_cwsr_workaround(q->device))
return 0;
if (enable && q->properties.is_user_cu_masked)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
- bool sq_trap_en = !!spi_dbg_cntl;
+ bool sq_trap_en = !!spi_dbg_cntl || !kfd_dbg_has_cwsr_workaround(pdd->dev);
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
}
+static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev)
+{
+ return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3);
+}
+
static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
{
if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr;
- queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) ||
- KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3);
+ queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled;
queue_type = convert_to_mes_queue_type(q->properties.type);
*/
q->properties.is_evicted = !!qpd->evicted;
q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
- KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) &&
- KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3);
+ kfd_dbg_has_cwsr_workaround(q->device);
if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
- return;
+ break;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}
struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src)
{
- if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC)
src = PHYD32CLKF;
if (src == PHYD32CLKD)
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+
+ /* Not valid for the WDIVIDER to be set to 0 */
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dccg32_get_pixel_rate_div(
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
- gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
i915_vma_get(vma);
}
+ dpt->obj->mm.dirty = true;
+
atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
- dpt_obj = i915_gem_object_create_internal(i915, size);
+ dpt_obj = i915_gem_object_create_shmem(i915, size);
}
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(count * count, &prng);
- if (!order)
- return -ENOMEM;
+ if (!order) {
+ err = -ENOMEM;
+ goto out;
+ }
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);
* since we've already mapped it once in
* submit_reloc()
*/
- if (WARN_ON(!ptr))
+ if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return;
for (i = 0; i < dwords; i++) {
SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200),
- SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),
.hwcg = a640_hwcg,
}, {
.rev = ADRENO_REV(6, 9, 0, ANY_ID),
- .revn = 690,
- .name = "A690",
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a690_gmu.bin",
static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
{
- WARN_ON_ONCE(!gpu->revn);
+ /* revn can be zero, but if not is set at same time as info */
+ WARN_ON_ONCE(!gpu->info);
return gpu->revn == revn;
}
static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
{
- WARN_ON_ONCE(!gpu->revn);
+ /* revn can be zero, but if not is set at same time as info */
+ WARN_ON_ONCE(!gpu->info);
return (gpu->revn < 300);
}
static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
{
- WARN_ON_ONCE(!gpu->revn);
+ /* revn can be zero, but if not is set at same time as info */
+ WARN_ON_ONCE(!gpu->info);
return (gpu->revn < 210);
}
static inline int adreno_is_a690(const struct adreno_gpu *gpu)
{
- return adreno_is_revn(gpu, 690);
+ /* The order of args is important here to handle ANY_ID correctly */
+ return adreno_cmp_rev(ADRENO_REV(6, 9, 0, ANY_ID), gpu->rev);
};
/* check for a615, a616, a618, a619 or any derivatives */
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
-/**
- * enum dpu_core_perf_data_bus_id - data bus identifier
- * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
- */
-enum dpu_core_perf_data_bus_id {
- DPU_CORE_PERF_DATA_BUS_ID_MNOC,
- DPU_CORE_PERF_DATA_BUS_ID_LLCC,
- DPU_CORE_PERF_DATA_BUS_ID_EBI,
- DPU_CORE_PERF_DATA_BUS_ID_MAX,
-};
-
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
- 1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
+ 1, 2, 3, 4, 5};
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
case SSPP_DMA3:
ctx->pending_flush_mask |= BIT(25);
break;
+ case SSPP_DMA4:
+ ctx->pending_flush_mask |= BIT(13);
+ break;
+ case SSPP_DMA5:
+ ctx->pending_flush_mask |= BIT(14);
+ break;
case SSPP_CURSOR0:
ctx->pending_flush_mask |= BIT(22);
break;
const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.has_phy_lane = true,
- .regulator_data = dsi_phy_14nm_17mA_regulators,
- .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
f->fctx = fctx;
+ /*
+ * Until this point, the fence was just some pre-allocated memory,
+ * no-one should have taken a reference to it yet.
+ */
+ WARN_ON(kref_read(&fence->refcount));
+
dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
fctx->context, ++fctx->last_fence);
}
}
dma_fence_put(submit->user_fence);
- dma_fence_put(submit->hw_fence);
+
+ /*
+ * If the submit is freed before msm_job_run(), then hw_fence is
+ * just some pre-allocated memory, not a reference counted fence.
+ * Once the job runs and the hw_fence is initialized, it will
+ * have a refcount of at least one, since the submit holds a ref
+ * to the hw_fence.
+ */
+ if (kref_read(&submit->hw_fence->refcount) == 0) {
+ kfree(submit->hw_fence);
+ } else {
+ dma_fence_put(submit->hw_fence);
+ }
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
* after the job is armed
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
- idr_find(&queue->fence_idr, args->fence)) {
+ (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
ret = -EINVAL;
#define UBWC_2_0 0x20000000
#define UBWC_3_0 0x30000000
#define UBWC_4_0 0x40000000
+#define UBWC_4_3 0x40030000
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
} else {
- writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
+ if (data->ubwc_dec_version == UBWC_4_3)
+ writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
+ else
+ writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
}
}
msm_mdss_setup_ubwc_dec_30(msm_mdss);
break;
case UBWC_4_0:
+ case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss);
break;
default:
.macrotile_mode = 1,
};
+static const struct msm_mdss_data sm8550_data = {
+ .ubwc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = 6,
+ .ubwc_static = 1,
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
+};
+
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss" },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8550-mdss", .data = &sm8250_data },
+ { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
if (ret < 0)
return ret;
- *val = aqc_percent_to_pwm(ret);
+ *val = aqc_percent_to_pwm(*val);
break;
}
break;
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
#define ZEN_CUR_TEMP_TJ_SEL_MASK GENMASK(17, 16)
+/*
+ * AMD's Industrial processor 3255 supports temperature from -40 deg to 105 deg Celsius.
+ * Use the model name to identify 3255 CPUs and set a flag to display negative temperature.
+ * Do not round off to zero for negative Tctl or Tdie values if the flag is set
+ */
+#define AMD_I3255_STR "3255"
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
u32 show_temp;
bool is_zen;
u32 ccd_offset;
+ bool disp_negative;
};
#define TCTL_BIT 0
switch (channel) {
case 0: /* Tctl */
*val = get_raw_temp(data);
- if (*val < 0)
+ if (*val < 0 && !data->disp_negative)
*val = 0;
break;
case 1: /* Tdie */
*val = get_raw_temp(data) - data->temp_offset;
- if (*val < 0)
+ if (*val < 0 && !data->disp_negative)
*val = 0;
break;
case 2 ... 13: /* Tccd{1-12} */
data->pdev = pdev;
data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */
+ if (boot_cpu_data.x86 == 0x17 &&
+ strstr(boot_cpu_data.x86_model_id, AMD_I3255_STR)) {
+ data->disp_negative = true;
+ }
+
if (boot_cpu_data.x86 == 0x15 &&
((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
(boot_cpu_data.x86_model & 0xf0) == 0x70)) {
800, 800
};
-static inline long in_from_reg(u8 reg, u8 nr)
+/*
+ * NCT6798 scaling:
+ * CPUVC, IN1, AVSB, 3VCC, IN0, IN8, IN4, 3VSB, VBAT, VTT, IN5, IN6, IN2,
+ * IN3, IN7
+ * Additional scales to be added later: IN9 (800), VHIF (1600)
+ */
+static const u16 scale_in_6798[15] = {
+ 800, 800, 1600, 1600, 800, 800, 800, 1600, 1600, 1600, 1600, 1600, 800,
+ 800, 800
+};
+
+static inline long in_from_reg(u8 reg, u8 nr, const u16 *scales)
{
- return DIV_ROUND_CLOSEST(reg * scale_in[nr], 100);
+ return DIV_ROUND_CLOSEST(reg * scales[nr], 100);
}
-static inline u8 in_to_reg(u32 val, u8 nr)
+static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scales)
{
- return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
+ return clamp_val(DIV_ROUND_CLOSEST(val * 100, scales[nr]), 0, 255);
}
/* TSI temperatures are in 8.3 format */
if (IS_ERR(data))
return PTR_ERR(data);
- return sprintf(buf, "%ld\n", in_from_reg(data->in[nr][index], nr));
+ return sprintf(buf, "%ld\n",
+ in_from_reg(data->in[nr][index], nr, data->scale_in));
}
static ssize_t
if (err < 0)
return err;
mutex_lock(&data->update_lock);
- data->in[nr][index] = in_to_reg(val, nr);
+ data->in[nr][index] = in_to_reg(val, nr, data->scale_in);
err = nct6775_write_value(data, data->REG_IN_MINMAX[index - 1][nr], data->in[nr][index]);
mutex_unlock(&data->update_lock);
return err ? : count;
mutex_init(&data->update_lock);
data->name = nct6775_device_names[data->kind];
data->bank = 0xff; /* Force initial bank selection */
+ data->scale_in = scale_in;
switch (data->kind) {
case nct6106:
break;
}
+ if (data->kind == nct6798 || data->kind == nct6799)
+ data->scale_in = scale_in_6798;
+
reg_temp = NCT6779_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
if (data->kind == nct6791) {
int creb;
int cred;
- cre6 = sio_data->sio_inb(sio_data, 0xe0);
+ cre6 = sio_data->sio_inb(sio_data, 0xe6);
sio_data->sio_select(sio_data, NCT6775_LD_12);
cre0 = sio_data->sio_inb(sio_data, 0xe0);
u8 bank; /* current register bank */
u8 in_num; /* number of in inputs we have */
u8 in[15][3]; /* [0]=in, [1]=in_max, [2]=in_min */
+ const u16 *scale_in; /* internal scaling factors */
unsigned int rpm[NUM_FAN];
u16 fan_min[NUM_FAN];
u8 fan_pulses[NUM_FAN];
if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */
return 0;
- if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */
+ if (index >= 46 && !(reg & 0x02)) /* PECI 1 */
return 0;
return attr->mode;
}
/* Callbacks for turbo toggle attribute */
+static umode_t tt_toggle_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ switch (board) {
+ case aok_zoe_a1:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ return attr->mode;
+ default:
+ break;
+ }
+ return 0;
+}
+
static ssize_t tt_toggle_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
NULL
};
-ATTRIBUTE_GROUPS(oxp_ec);
+static struct attribute_group oxp_ec_attribute_group = {
+ .is_visible = tt_toggle_is_visible,
+ .attrs = oxp_ec_attrs,
+};
+
+static const struct attribute_group *oxp_ec_groups[] = {
+ &oxp_ec_attribute_group,
+ NULL
+};
static const struct hwmon_ops oxp_ec_hwmon_ops = {
.is_visible = oxp_ec_hwmon_is_visible,
const struct dmi_system_id *dmi_entry;
struct device *dev = &pdev->dev;
struct device *hwdev;
- int ret;
/*
* Have to check for AMD processor here because DMI strings are the
board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
- switch (board) {
- case aok_zoe_a1:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- ret = devm_device_add_groups(dev, oxp_ec_groups);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL,
&oxp_ec_chip_info, NULL);
static struct platform_driver oxp_platform_driver = {
.driver = {
.name = "oxp-platform",
+ .dev_groups = oxp_ec_groups,
},
.probe = oxp_platform_probe,
};
},
};
-static int _pmbus_is_enabled(struct device *dev, u8 page)
+static int _pmbus_is_enabled(struct i2c_client *client, u8 page)
{
- struct i2c_client *client = to_i2c_client(dev->parent);
int ret;
ret = _pmbus_read_byte_data(client, page, PMBUS_OPERATION);
return !!(ret & PB_OPERATION_CONTROL_ON);
}
-static int __maybe_unused pmbus_is_enabled(struct device *dev, u8 page)
+static int __maybe_unused pmbus_is_enabled(struct i2c_client *client, u8 page)
{
- struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_data *data = i2c_get_clientdata(client);
int ret;
mutex_lock(&data->update_lock);
- ret = _pmbus_is_enabled(dev, page);
+ ret = _pmbus_is_enabled(client, page);
mutex_unlock(&data->update_lock);
- return !!(ret & PB_OPERATION_CONTROL_ON);
+ return ret;
}
#define to_dev_attr(_dev_attr) \
if (status < 0)
return status;
- if (_pmbus_is_enabled(dev, page)) {
+ if (_pmbus_is_enabled(client, page)) {
if (status & PB_STATUS_OFF) {
*flags |= REGULATOR_ERROR_FAIL;
*event |= REGULATOR_EVENT_FAIL;
#if IS_ENABLED(CONFIG_REGULATOR)
static int pmbus_regulator_is_enabled(struct regulator_dev *rdev)
{
- return pmbus_is_enabled(rdev_get_dev(rdev), rdev_get_id(rdev));
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+
+ return pmbus_is_enabled(client, rdev_get_id(rdev));
}
static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable)
struct pmbus_data *data = i2c_get_clientdata(client);
u8 page = rdev_get_id(rdev);
int status, ret;
+ int event;
mutex_lock(&data->update_lock);
status = pmbus_get_status(client, page, PMBUS_STATUS_WORD);
goto unlock;
}
- ret = pmbus_regulator_get_error_flags(rdev, &status);
+ ret = _pmbus_get_flags(data, rdev_get_id(rdev), &status, &event, false);
if (ret)
goto unlock;
RDMA_CM_ADDR_QUERY)))
return -EINVAL;
+ } else {
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
}
if (cma_family(id_priv) != dst_addr->sa_family) {
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_qplib_nq *scq_nq = NULL;
+ struct bnxt_qplib_nq *rcq_nq = NULL;
unsigned int flags;
int rc;
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
+ /* Flush all the entries of notification queue associated with
+ * given qp.
+ */
+ scq_nq = qplib_qp->scq->nq;
+ rcq_nq = qplib_qp->rcq->nq;
+ bnxt_re_synchronize_nq(scq_nq);
+ if (scq_nq != rcq_nq)
+ bnxt_re_synchronize_nq(rcq_nq);
+
return 0;
}
spin_unlock_bh(&hwq->lock);
}
+/* bnxt_re_synchronize_nq - self polling notification queue.
+ * @nq - notification queue pointer
+ *
+ * This function will start polling entries of a given notification queue
+ * for all pending entries.
+ * This function is useful to synchronize notification entries while resources
+ * are going away.
+ */
+
+void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
+{
+ int budget = nq->budget;
+
+ nq->budget = nq->hwq.max_elements;
+ bnxt_qplib_service_nq(&nq->nq_tasklet);
+ nq->budget = budget;
+}
+
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_nq *nq = dev_instance;
if (!nq->requested)
return;
- tasklet_disable(&nq->nq_tasklet);
+ nq->requested = false;
/* Mask h/w interrupt */
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
/* Sync with last running IRQ handler */
synchronize_irq(nq->msix_vec);
- if (kill)
- tasklet_kill(&nq->nq_tasklet);
-
irq_set_affinity_hint(nq->msix_vec, NULL);
free_irq(nq->msix_vec, nq);
kfree(nq->name);
nq->name = NULL;
- nq->requested = false;
+
+ if (kill)
+ tasklet_kill(&nq->nq_tasklet);
+ tasklet_disable(&nq->nq_tasklet);
}
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
struct bnxt_qplib_cqe *cqe,
int num_cqes);
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
+void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq);
static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx)
{
if (!creq->requested)
return;
- tasklet_disable(&creq->creq_tasklet);
+ creq->requested = false;
/* Mask h/w interrupts */
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
/* Sync with last running IRQ-handler */
synchronize_irq(creq->msix_vec);
- if (kill)
- tasklet_kill(&creq->creq_tasklet);
-
free_irq(creq->msix_vec, rcfw);
kfree(creq->irq_name);
creq->irq_name = NULL;
- creq->requested = false;
atomic_set(&rcfw->rcfw_intr_enabled, 0);
+ if (kill)
+ tasklet_kill(&creq->creq_tasklet);
+ tasklet_disable(&creq->creq_tasklet);
}
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
*/
void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
{
- if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
- timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
+
+ if (timeout->compl_cqp_cmds != completed_ops) {
+ timeout->compl_cqp_cmds = completed_ops;
timeout->count = 0;
- } else {
- if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
- timeout->compl_cqp_cmds)
- timeout->count++;
+ } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
+ timeout->count++;
}
}
if (newtail != tail) {
/* SUCCESS */
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
- cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+ atomic64_inc(&cqp->completed_ops);
return 0;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
- cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
- cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
+ cqp->requested_ops = 0;
+ atomic64_set(&cqp->completed_ops, 0);
/* for the cqp commands backlog. */
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
if (ret_code)
return NULL;
- cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
+ cqp->requested_ops++;
if (!*wqe_idx)
cqp->polarity = !cqp->polarity;
wqe = cqp->sq_base[*wqe_idx].elem;
if (polarity != ccq->cq_uk.polarity)
return -ENOENT;
+ /* Ensure CEQE contents are read after valid bit is checked */
+ dma_rmb();
+
get_64bit_val(cqe, 8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
dma_wmb(); /* make sure shadow area is updated before moving tail */
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
- ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
+ atomic64_inc(&cqp->completed_ops);
return ret_code;
}
u8 polarity;
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
- get_64bit_val(aeqe, 0, &compl_ctx);
get_64bit_val(aeqe, 8, &temp);
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
return -ENOENT;
+ /* Ensure AEQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(aeqe, 0, &compl_ctx);
+
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
IRDMA_OP_QUERY_FPM_VAL = 26,
IRDMA_OP_COMMIT_FPM_VAL = 27,
- IRDMA_OP_REQ_CMDS = 28,
- IRDMA_OP_CMPL_CMDS = 29,
- IRDMA_OP_AH_CREATE = 30,
- IRDMA_OP_AH_MODIFY = 31,
- IRDMA_OP_AH_DESTROY = 32,
- IRDMA_OP_MC_CREATE = 33,
- IRDMA_OP_MC_DESTROY = 34,
- IRDMA_OP_MC_MODIFY = 35,
- IRDMA_OP_STATS_ALLOCATE = 36,
- IRDMA_OP_STATS_FREE = 37,
- IRDMA_OP_STATS_GATHER = 38,
- IRDMA_OP_WS_ADD_NODE = 39,
- IRDMA_OP_WS_MODIFY_NODE = 40,
- IRDMA_OP_WS_DELETE_NODE = 41,
- IRDMA_OP_WS_FAILOVER_START = 42,
- IRDMA_OP_WS_FAILOVER_COMPLETE = 43,
- IRDMA_OP_SET_UP_MAP = 44,
- IRDMA_OP_GEN_AE = 45,
- IRDMA_OP_QUERY_RDMA_FEATURES = 46,
- IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47,
- IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48,
- IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49,
- IRDMA_OP_CQ_MODIFY = 50,
+ IRDMA_OP_AH_CREATE = 28,
+ IRDMA_OP_AH_MODIFY = 29,
+ IRDMA_OP_AH_DESTROY = 30,
+ IRDMA_OP_MC_CREATE = 31,
+ IRDMA_OP_MC_DESTROY = 32,
+ IRDMA_OP_MC_MODIFY = 33,
+ IRDMA_OP_STATS_ALLOCATE = 34,
+ IRDMA_OP_STATS_FREE = 35,
+ IRDMA_OP_STATS_GATHER = 36,
+ IRDMA_OP_WS_ADD_NODE = 37,
+ IRDMA_OP_WS_MODIFY_NODE = 38,
+ IRDMA_OP_WS_DELETE_NODE = 39,
+ IRDMA_OP_WS_FAILOVER_START = 40,
+ IRDMA_OP_WS_FAILOVER_COMPLETE = 41,
+ IRDMA_OP_SET_UP_MAP = 42,
+ IRDMA_OP_GEN_AE = 43,
+ IRDMA_OP_QUERY_RDMA_FEATURES = 44,
+ IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45,
+ IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
+ IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
+ IRDMA_OP_CQ_MODIFY = 48,
/* Must be last entry*/
- IRDMA_MAX_CQP_OPS = 51,
+ IRDMA_MAX_CQP_OPS = 49,
};
/* CQP SQ WQES */
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
qp->flush_code = FLUSH_MW_BIND_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
cqp_request->compl_info.error = info.error;
if (cqp_request->waiting) {
- cqp_request->request_done = true;
+ WRITE_ONCE(cqp_request->request_done, true);
wake_up(&cqp_request->waitq);
irdma_put_cqp_request(&rf->cqp, cqp_request);
} else {
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
void *param;
struct irdma_cqp_compl_info compl_info;
+ bool request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
- bool request_done:1;
bool dynamic:1;
};
if (valid_bit != cq_uk->polarity)
return -ENOENT;
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
if (polarity != cq_uk->polarity)
return -ENOENT;
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
cq_uk->polarity = !cq_uk->polarity;
struct irdma_dcqcn_cc_params dcqcn_params;
__le64 *host_ctx;
u64 *scratch_array;
+ u64 requested_ops;
+ atomic64_t completed_ops;
u32 cqp_id;
u32 sq_size;
u32 hw_sq_size;
}
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
- info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
u32 array_idx;
if (polarity != temp)
break;
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
get_64bit_val(cqe, 8, &comp_ctx);
if ((void *)(unsigned long)comp_ctx == q)
set_64bit_val(cqe, 8, 0);
if (cqp_request->dynamic) {
kfree(cqp_request);
} else {
- cqp_request->request_done = false;
+ WRITE_ONCE(cqp_request->request_done, false);
cqp_request->callback_fcn = NULL;
cqp_request->waiting = false;
{
if (cqp_request->waiting) {
cqp_request->compl_info.error = true;
- cqp_request->request_done = true;
+ WRITE_ONCE(cqp_request->request_done, true);
wake_up(&cqp_request->waitq);
}
wait_event_timeout(cqp->remove_wq,
bool cqp_error = false;
int err_code = 0;
- cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
+ cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
do {
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
if (wait_event_timeout(cqp_request->waitq,
- cqp_request->request_done,
+ READ_ONCE(cqp_request->request_done),
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
break;
return (-EOPNOTSUPP);
}
- if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
- MLX4_IB_RX_HASH_DST_IPV4 |
- MLX4_IB_RX_HASH_SRC_IPV6 |
- MLX4_IB_RX_HASH_DST_IPV6 |
- MLX4_IB_RX_HASH_SRC_PORT_TCP |
- MLX4_IB_RX_HASH_DST_PORT_TCP |
- MLX4_IB_RX_HASH_SRC_PORT_UDP |
- MLX4_IB_RX_HASH_DST_PORT_UDP |
- MLX4_IB_RX_HASH_INNER)) {
+ if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP |
+ MLX4_IB_RX_HASH_INNER)) {
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
ucmd->rx_hash_fields_mask);
return (-EOPNOTSUPP);
if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY;
else
- mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
+ mthca_array_set(&dev->qp_table.qp, mqpn, qp);
spin_unlock_irq(&dev->qp_table.lock);
if (err)
if (access & ~RXE_ACCESS_SUPPORTED_MW) {
rxe_err_mw(mw, "access %#x not supported", access);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto err_drop_mr;
}
spin_lock_bh(&mw->lock);
*/
void iommufd_device_unbind(struct iommufd_device *idev)
{
- bool was_destroyed;
-
- was_destroyed = iommufd_object_destroy_user(idev->ictx, &idev->obj);
- WARN_ON(!was_destroyed);
+ iommufd_object_destroy_user(idev->ictx, &idev->obj);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
mutex_unlock(&hwpt->devices_lock);
if (hwpt->auto_domain)
- iommufd_object_destroy_user(idev->ictx, &hwpt->obj);
+ iommufd_object_deref_user(idev->ictx, &hwpt->obj);
else
refcount_dec(&hwpt->obj.users);
*/
void iommufd_access_destroy(struct iommufd_access *access)
{
- bool was_destroyed;
-
- was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj);
- WARN_ON(!was_destroyed);
+ iommufd_object_destroy_user(access->ictx, &access->obj);
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
struct iommufd_object *obj);
void iommufd_object_finalize(struct iommufd_ctx *ictx,
struct iommufd_object *obj);
-bool iommufd_object_destroy_user(struct iommufd_ctx *ictx,
- struct iommufd_object *obj);
+void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj, bool allow_fail);
+static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
+{
+ __iommufd_object_destroy_user(ictx, obj, false);
+}
+static inline void iommufd_object_deref_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
+{
+ __iommufd_object_destroy_user(ictx, obj, true);
+}
+
struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
size_t size,
enum iommufd_object_type type);
return obj;
}
+/*
+ * Remove the given object id from the xarray if the only reference to the
+ * object is held by the xarray. The caller must call ops destroy().
+ */
+static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx,
+ u32 id, bool extra_put)
+{
+ struct iommufd_object *obj;
+ XA_STATE(xas, &ictx->objects, id);
+
+ xa_lock(&ictx->objects);
+ obj = xas_load(&xas);
+ if (xa_is_zero(obj) || !obj) {
+ obj = ERR_PTR(-ENOENT);
+ goto out_xa;
+ }
+
+ /*
+ * If the caller is holding a ref on obj we put it here under the
+ * spinlock.
+ */
+ if (extra_put)
+ refcount_dec(&obj->users);
+
+ if (!refcount_dec_if_one(&obj->users)) {
+ obj = ERR_PTR(-EBUSY);
+ goto out_xa;
+ }
+
+ xas_store(&xas, NULL);
+ if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
+ ictx->vfio_ioas = NULL;
+
+out_xa:
+ xa_unlock(&ictx->objects);
+
+ /* The returned object reference count is zero */
+ return obj;
+}
+
/*
* The caller holds a users refcount and wants to destroy the object. Returns
* true if the object was destroyed. In all cases the caller no longer has a
* reference on obj.
*/
-bool iommufd_object_destroy_user(struct iommufd_ctx *ictx,
- struct iommufd_object *obj)
+void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj, bool allow_fail)
{
+ struct iommufd_object *ret;
+
/*
* The purpose of the destroy_rwsem is to ensure deterministic
* destruction of objects used by external drivers and destroyed by this
* side of this, such as during ioctl execution.
*/
down_write(&obj->destroy_rwsem);
- xa_lock(&ictx->objects);
- refcount_dec(&obj->users);
- if (!refcount_dec_if_one(&obj->users)) {
- xa_unlock(&ictx->objects);
- up_write(&obj->destroy_rwsem);
- return false;
- }
- __xa_erase(&ictx->objects, obj->id);
- if (ictx->vfio_ioas && &ictx->vfio_ioas->obj == obj)
- ictx->vfio_ioas = NULL;
- xa_unlock(&ictx->objects);
+ ret = iommufd_object_remove(ictx, obj->id, true);
up_write(&obj->destroy_rwsem);
+ if (allow_fail && IS_ERR(ret))
+ return;
+
+ /*
+ * If there is a bug and we couldn't destroy the object then we did put
+ * back the caller's refcount and will eventually try to free it again
+ * during close.
+ */
+ if (WARN_ON(IS_ERR(ret)))
+ return;
+
iommufd_object_ops[obj->type].destroy(obj);
kfree(obj);
- return true;
}
static int iommufd_destroy(struct iommufd_ucmd *ucmd)
struct iommu_destroy *cmd = ucmd->cmd;
struct iommufd_object *obj;
- obj = iommufd_get_object(ucmd->ictx, cmd->id, IOMMUFD_OBJ_ANY);
+ obj = iommufd_object_remove(ucmd->ictx, cmd->id, false);
if (IS_ERR(obj))
return PTR_ERR(obj);
- iommufd_ref_to_users(obj);
- /* See iommufd_ref_to_users() */
- if (!iommufd_object_destroy_user(ucmd->ictx, obj))
- return -EBUSY;
+ iommufd_object_ops[obj->type].destroy(obj);
+ kfree(obj);
return 0;
}
batch->pfns[0] = batch->pfns[batch->end - 1] +
(batch->npfns[batch->end - 1] - keep_pfns);
batch->npfns[0] = keep_pfns;
- batch->end = 0;
+ batch->end = 1;
}
static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
};
struct bcm6345_l1_cpu {
+ struct bcm6345_l1_chip *intc;
void __iomem *map_base;
unsigned int parent_irq;
u32 enable_cache[];
static void bcm6345_l1_irq_handle(struct irq_desc *desc)
{
- struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
- struct bcm6345_l1_cpu *cpu;
+ struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
+ struct bcm6345_l1_chip *intc = cpu->intc;
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
-#ifdef CONFIG_SMP
- cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
-#else
- cpu = intc->cpus[0];
-#endif
-
chained_irq_enter(chip, desc);
for (idx = 0; idx < intc->n_words; idx++) {
if (!cpu)
return -ENOMEM;
+ cpu->intc = intc;
cpu->map_base = ioremap(res.start, sz);
if (!cpu->map_base)
return -ENOMEM;
return -EINVAL;
}
irq_set_chained_handler_and_data(cpu->parent_irq,
- bcm6345_l1_irq_handle, intc);
+ bcm6345_l1_irq_handle, cpu);
return 0;
}
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
}
+static struct irq_chip its_vpe_irq_chip;
+
static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
{
- struct its_vlpi_map *map = get_vlpi_map(d);
+ struct its_vpe *vpe = NULL;
int cpu;
- if (map) {
- cpu = vpe_to_cpuid_lock(map->vpe, flags);
+ if (d->chip == &its_vpe_irq_chip) {
+ vpe = irq_data_get_irq_chip_data(d);
+ } else {
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ if (map)
+ vpe = map->vpe;
+ }
+
+ if (vpe) {
+ cpu = vpe_to_cpuid_lock(vpe, flags);
} else {
/* Physical LPIs are already locked via the irq_desc lock */
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
{
- struct its_vlpi_map *map = get_vlpi_map(d);
+ struct its_vpe *vpe = NULL;
+
+ if (d->chip == &its_vpe_irq_chip) {
+ vpe = irq_data_get_irq_chip_data(d);
+ } else {
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ if (map)
+ vpe = map->vpe;
+ }
- if (map)
- vpe_to_cpuid_unlock(map->vpe, flags);
+ if (vpe)
+ vpe_to_cpuid_unlock(vpe, flags);
}
static struct its_collection *valid_col(struct its_collection *col)
cpu_relax();
}
-static void direct_lpi_inv(struct irq_data *d)
+static void __direct_lpi_inv(struct irq_data *d, u64 val)
{
- struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
unsigned long flags;
- u64 val;
int cpu;
+ /* Target the redistributor this LPI is currently routed to */
+ cpu = irq_to_cpuid_lock(d, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
+ gic_write_lpir(val, rdbase + GICR_INVLPIR);
+ wait_for_syncr(rdbase);
+
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ irq_to_cpuid_unlock(d, flags);
+}
+
+static void direct_lpi_inv(struct irq_data *d)
+{
+ struct its_vlpi_map *map = get_vlpi_map(d);
+ u64 val;
+
if (map) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
val = d->hwirq;
}
- /* Target the redistributor this LPI is currently routed to */
- cpu = irq_to_cpuid_lock(d, &flags);
- raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
- rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
- gic_write_lpir(val, rdbase + GICR_INVLPIR);
-
- wait_for_syncr(rdbase);
- raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
- irq_to_cpuid_unlock(d, flags);
+ __direct_lpi_inv(d, val);
}
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- if (gic_rdists->has_direct_lpi) {
- void __iomem *rdbase;
-
- /* Target the redistributor this VPE is currently known on */
- raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
- rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
- gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
- wait_for_syncr(rdbase);
- raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
- } else {
+ if (gic_rdists->has_direct_lpi)
+ __direct_lpi_inv(d, d->parent_data->hwirq);
+ else
its_vpe_send_cmd(vpe, its_send_inv);
- }
}
static void its_vpe_mask_irq(struct irq_data *d)
{
struct its_node *its = data;
- if (!of_machine_is_compatible("rockchip,rk3588"))
+ if (!of_machine_is_compatible("rockchip,rk3588") &&
+ !of_machine_is_compatible("rockchip,rk3588s"))
return false;
its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
+static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum);
+
static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
gic_irq_set_prio(d, GICD_INT_DEF_PRI);
}
+static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
+{
+ enum gic_intid_range range;
+
+ if (!static_branch_unlikely(&gic_arm64_2941627_erratum))
+ return false;
+
+ range = get_intid_range(d);
+
+ /*
+ * The workaround is needed if the IRQ is an SPI and
+ * the target cpu is different from the one we are
+ * executing on.
+ */
+ return (range == SPI_RANGE || range == ESPI_RANGE) &&
+ !cpumask_test_cpu(raw_smp_processor_id(),
+ irq_data_get_effective_affinity_mask(d));
+}
+
static void gic_eoi_irq(struct irq_data *d)
{
write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
isb();
+
+ if (gic_arm64_erratum_2941627_needed(d)) {
+ /*
+ * Make sure the GIC stream deactivate packet
+ * issued by ICC_EOIR1_EL1 has completed before
+ * deactivating through GICD_IACTIVER.
+ */
+ dsb(sy);
+ gic_poke_irq(d, GICD_ICACTIVER);
+ }
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
*/
if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
return;
- gic_write_dir(gic_irq(d));
+
+ if (!gic_arm64_erratum_2941627_needed(d))
+ gic_write_dir(gic_irq(d));
+ else
+ gic_poke_irq(d, GICD_ICACTIVER);
}
static int gic_set_type(struct irq_data *d, unsigned int type)
return true;
}
+static bool gic_enable_quirk_arm64_2941627(void *data)
+{
+ static_branch_enable(&gic_arm64_2941627_erratum);
+ return true;
+}
+
static const struct gic_quirk gic_quirks[] = {
{
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
.mask = 0xffffffff,
.init = gic_enable_quirk_nvidia_t241,
},
+ {
+ /*
+ * GIC-700: 2941627 workaround - IP variant [0,1]
+ *
+ */
+ .desc = "GICv3: ARM64 erratum 2941627",
+ .iidr = 0x0400043b,
+ .mask = 0xff0e0fff,
+ .init = gic_enable_quirk_arm64_2941627,
+ },
+ {
+ /*
+ * GIC-700: 2941627 workaround - IP variant [2]
+ */
+ .desc = "GICv3: ARM64 erratum 2941627",
+ .iidr = 0x0402043b,
+ .mask = 0xff0f0fff,
+ .init = gic_enable_quirk_arm64_2941627,
+ },
{
}
};
struct background_tracker *bg_work;
- bool migrations_allowed;
+ bool migrations_allowed:1;
+
+ /*
+ * If this is set the policy will try and clean the whole cache
+ * even if the device is not idle.
+ */
+ bool cleaner:1;
};
/*----------------------------------------------------------------*/
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- if (idle) {
+ if (idle || mq->cleaner) {
/*
* We'd like to clean everything.
*/
*hotspot_block_size /= 2u;
}
-static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
- sector_t origin_size,
- sector_t cache_block_size,
- bool mimic_mq,
- bool migrations_allowed)
+static struct dm_cache_policy *
+__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
+ bool mimic_mq, bool migrations_allowed, bool cleaner)
{
unsigned int i;
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
goto bad_btracker;
mq->migrations_allowed = migrations_allowed;
+ mq->cleaner = cleaner;
return &mq->policy;
sector_t origin_size,
sector_t cache_block_size)
{
- return __smq_create(cache_size, origin_size, cache_block_size, false, true);
+ return __smq_create(cache_size, origin_size, cache_block_size,
+ false, true, false);
}
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- return __smq_create(cache_size, origin_size, cache_block_size, true, true);
+ return __smq_create(cache_size, origin_size, cache_block_size,
+ true, true, false);
}
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- return __smq_create(cache_size, origin_size, cache_block_size, false, false);
+ return __smq_create(cache_size, origin_size, cache_block_size,
+ false, false, true);
}
/*----------------------------------------------------------------*/
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
if (!recalc_tags) {
vfree(recalc_buffer);
+ recalc_buffer = NULL;
goto oom;
}
r = md_start(&rs->md);
if (r) {
ti->error = "Failed to start raid array";
- mddev_unlock(&rs->md);
- goto bad_md_start;
+ goto bad_unlock;
}
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
if (r) {
ti->error = "Failed to set raid4/5/6 journal mode";
- mddev_unlock(&rs->md);
- goto bad_journal_mode_set;
+ goto bad_unlock;
}
}
if (rs_is_raid456(rs)) {
r = rs_set_raid456_stripe_cache(rs);
if (r)
- goto bad_stripe_cache;
+ goto bad_unlock;
}
/* Now do an early reshape check */
if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
r = rs_check_reshape(rs);
if (r)
- goto bad_check_reshape;
+ goto bad_unlock;
/* Restore new, ctr requested layout to perform check */
rs_config_restore(rs, &rs_layout);
r = rs->md.pers->check_reshape(&rs->md);
if (r) {
ti->error = "Reshape check failed";
- goto bad_check_reshape;
+ goto bad_unlock;
}
}
}
mddev_unlock(&rs->md);
return 0;
-bad_md_start:
-bad_journal_mode_set:
-bad_stripe_cache:
-bad_check_reshape:
+bad_unlock:
md_stop(&rs->md);
+ mddev_unlock(&rs->md);
bad:
raid_set_free(rs);
{
struct raid_set *rs = ti->private;
+ mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
+ mddev_unlock(&rs->md);
raid_set_free(rs);
}
void md_stop(struct mddev *mddev)
{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
mutex_lock(&pulse8->lock);
cmd = MSGCODE_PING;
- pulse8_send_and_wait(pulse8, &cmd, 1,
- MSGCODE_COMMAND_ACCEPTED, 0);
+ if (pulse8_send_and_wait(pulse8, &cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 0)) {
+ dev_warn(pulse8->dev, "failed to ping EEPROM\n");
+ goto unlock;
+ }
if (pulse8->vers < 2)
goto unlock;
u32 min_delta = 0xffffffff;
u16 prediv_max = 17;
u16 prediv_min = 1;
- u16 m_best, mul;
- u16 p_best, p;
+ u16 m_best = 0, mul;
+ u16 p_best = 1, p;
u8 postdiv;
if (fout > 1000 * HZ_PER_MHZ) {
request_module("%s", info.type);
client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
if (!i2c_client_has_driver(client_tuner)) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- port->i2c_client_demod = NULL;
goto frontend_detach;
}
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- port->i2c_client_demod = NULL;
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
request_module("%s", info.type);
client_tuner = i2c_new_client_device(&dev->i2c_bus[1].i2c_adap, &info);
if (!i2c_client_has_driver(client_tuner)) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- port->i2c_client_demod = NULL;
goto frontend_detach;
}
if (!try_module_get(client_tuner->dev.driver->owner)) {
i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- port->i2c_client_demod = NULL;
goto frontend_detach;
}
port->i2c_client_tuner = client_tuner;
static struct vpu_core_resources imx8q_enc = {
.type = VPU_CORE_TYPE_ENC,
- .fwname = "vpu/vpu_fw_imx8_enc.bin",
+ .fwname = "amphion/vpu/vpu_fw_imx8_enc.bin",
.stride = 16,
.max_width = 1920,
.max_height = 1920,
static struct vpu_core_resources imx8q_dec = {
.type = VPU_CORE_TYPE_DEC,
- .fwname = "vpu/vpu_fw_imx8_dec.bin",
+ .fwname = "amphion/vpu/vpu_fw_imx8_dec.bin",
.stride = 256,
.max_width = 8188,
.max_height = 8188,
cl->rx_callback = vpu_mbox_rx_callback;
ch = mbox_request_channel_byname(cl, mbox->name);
- if (IS_ERR(ch)) {
- dev_err(dev, "Failed to request mbox chan %s, ret : %ld\n",
- mbox->name, PTR_ERR(ch));
- return PTR_ERR(ch);
- }
+ if (IS_ERR(ch))
+ return dev_err_probe(dev, PTR_ERR(ch),
+ "Failed to request mbox chan %s\n",
+ mbox->name);
mbox->ch = ch;
return 0;
#include "mtk_jpeg_core.h"
#include "mtk_jpeg_dec_parse.h"
-#if defined(CONFIG_OF)
static struct mtk_jpeg_fmt mtk_jpeg_enc_formats[] = {
{
.fourcc = V4L2_PIX_FMT_JPEG,
.flags = MTK_JPEG_FMT_FLAG_CAPTURE,
},
};
-#endif
#define MTK_JPEG_ENC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_enc_formats)
#define MTK_JPEG_DEC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_dec_formats)
SET_RUNTIME_PM_OPS(mtk_jpeg_pm_suspend, mtk_jpeg_pm_resume, NULL)
};
-#if defined(CONFIG_OF)
static int mtk_jpegenc_get_hw(struct mtk_jpeg_ctx *ctx)
{
struct mtk_jpegenc_comp_dev *comp_jpeg;
};
MODULE_DEVICE_TABLE(of, mtk_jpeg_match);
-#endif
static struct platform_driver mtk_jpeg_driver = {
.probe = mtk_jpeg_probe,
.remove_new = mtk_jpeg_remove,
.driver = {
.name = MTK_JPEG_NAME,
- .of_match_table = of_match_ptr(mtk_jpeg_match),
+ .of_match_table = mtk_jpeg_match,
.pm = &mtk_jpeg_pm_ops,
},
};
MTK_JPEG_COLOR_400 = 0x00110000
};
-#if defined(CONFIG_OF)
static const struct of_device_id mtk_jpegdec_hw_ids[] = {
{
.compatible = "mediatek,mt8195-jpgdec-hw",
{},
};
MODULE_DEVICE_TABLE(of, mtk_jpegdec_hw_ids);
-#endif
static inline int mtk_jpeg_verify_align(u32 val, int align, u32 reg)
{
.probe = mtk_jpegdec_hw_probe,
.driver = {
.name = "mtk-jpegdec-hw",
- .of_match_table = of_match_ptr(mtk_jpegdec_hw_ids),
+ .of_match_table = mtk_jpegdec_hw_ids,
},
};
{.quality_param = 97, .hardware_value = JPEG_ENC_QUALITY_Q97},
};
-#if defined(CONFIG_OF)
static const struct of_device_id mtk_jpegenc_drv_ids[] = {
{
.compatible = "mediatek,mt8195-jpgenc-hw",
{},
};
MODULE_DEVICE_TABLE(of, mtk_jpegenc_drv_ids);
-#endif
void mtk_jpeg_enc_reset(void __iomem *base)
{
.probe = mtk_jpegenc_hw_probe,
.driver = {
.name = "mtk-jpegenc-hw",
- .of_match_table = of_match_ptr(mtk_jpegenc_drv_ids),
+ .of_match_table = mtk_jpegenc_drv_ids,
},
};
kfree(lat_buf->private_data);
}
- cancel_work_sync(&msg_queue->core_work);
+ if (msg_queue->wdma_addr.size)
+ cancel_work_sync(&msg_queue->core_work);
}
static void vdec_msg_queue_core_work(struct work_struct *work)
#define CAST_OFBSIZE_LO CAST_STATUS18
#define CAST_OFBSIZE_HI CAST_STATUS19
-#define MXC_MAX_SLOTS 1 /* TODO use all 4 slots*/
/* JPEG-Decoder Wrapper Slot Registers 0..3 */
#define SLOT_BASE 0x10000
#define SLOT_STATUS 0x0
v4l2_event_queue_fh(&ctx->fh, &ev);
}
-static int mxc_get_free_slot(struct mxc_jpeg_slot_data slot_data[], int n)
+static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data)
{
- int free_slot = 0;
-
- while (slot_data[free_slot].used && free_slot < n)
- free_slot++;
-
- return free_slot; /* >=n when there are no more free slots */
+ if (!slot_data->used)
+ return slot_data->slot;
+ return -1;
}
-static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg,
- unsigned int slot)
+static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
{
struct mxc_jpeg_desc *desc;
struct mxc_jpeg_desc *cfg_desc;
void *cfg_stm;
- if (jpeg->slot_data[slot].desc)
+ if (jpeg->slot_data.desc)
goto skip_alloc; /* already allocated, reuse it */
/* allocate descriptor for decoding/encoding phase */
desc = dma_alloc_coherent(jpeg->dev,
sizeof(struct mxc_jpeg_desc),
- &jpeg->slot_data[slot].desc_handle,
+ &jpeg->slot_data.desc_handle,
GFP_ATOMIC);
if (!desc)
goto err;
- jpeg->slot_data[slot].desc = desc;
+ jpeg->slot_data.desc = desc;
/* allocate descriptor for configuration phase (encoder only) */
cfg_desc = dma_alloc_coherent(jpeg->dev,
sizeof(struct mxc_jpeg_desc),
- &jpeg->slot_data[slot].cfg_desc_handle,
+ &jpeg->slot_data.cfg_desc_handle,
GFP_ATOMIC);
if (!cfg_desc)
goto err;
- jpeg->slot_data[slot].cfg_desc = cfg_desc;
+ jpeg->slot_data.cfg_desc = cfg_desc;
/* allocate configuration stream */
cfg_stm = dma_alloc_coherent(jpeg->dev,
MXC_JPEG_MAX_CFG_STREAM,
- &jpeg->slot_data[slot].cfg_stream_handle,
+ &jpeg->slot_data.cfg_stream_handle,
GFP_ATOMIC);
if (!cfg_stm)
goto err;
- jpeg->slot_data[slot].cfg_stream_vaddr = cfg_stm;
+ jpeg->slot_data.cfg_stream_vaddr = cfg_stm;
skip_alloc:
- jpeg->slot_data[slot].used = true;
+ jpeg->slot_data.used = true;
return true;
err:
- dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", slot);
+ dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot);
return false;
}
-static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg,
- unsigned int slot)
+static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
{
- if (slot >= MXC_MAX_SLOTS) {
- dev_err(jpeg->dev, "Invalid slot %d, nothing to free.", slot);
- return;
- }
-
/* free descriptor for decoding/encoding phase */
dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
- jpeg->slot_data[slot].desc,
- jpeg->slot_data[slot].desc_handle);
+ jpeg->slot_data.desc,
+ jpeg->slot_data.desc_handle);
/* free descriptor for encoder configuration phase / decoder DHT */
dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
- jpeg->slot_data[slot].cfg_desc,
- jpeg->slot_data[slot].cfg_desc_handle);
+ jpeg->slot_data.cfg_desc,
+ jpeg->slot_data.cfg_desc_handle);
/* free configuration stream */
dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
- jpeg->slot_data[slot].cfg_stream_vaddr,
- jpeg->slot_data[slot].cfg_stream_handle);
+ jpeg->slot_data.cfg_stream_vaddr,
+ jpeg->slot_data.cfg_stream_handle);
- jpeg->slot_data[slot].used = false;
+ jpeg->slot_data.used = false;
}
static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx,
v4l2_m2m_buf_done(dst_buf, state);
mxc_jpeg_disable_irq(reg, ctx->slot);
- ctx->mxc_jpeg->slot_data[ctx->slot].used = false;
+ jpeg->slot_data.used = false;
if (reset)
mxc_jpeg_sw_reset(reg);
}
goto job_unlock;
}
- if (!jpeg->slot_data[slot].used)
+ if (!jpeg->slot_data.used)
goto job_unlock;
dec_ret = readl(reg + MXC_SLOT_OFFSET(slot, SLOT_STATUS));
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
void __iomem *reg = jpeg->base_reg;
unsigned int slot = ctx->slot;
- struct mxc_jpeg_desc *desc = jpeg->slot_data[slot].desc;
- struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data[slot].cfg_desc;
- dma_addr_t desc_handle = jpeg->slot_data[slot].desc_handle;
- dma_addr_t cfg_desc_handle = jpeg->slot_data[slot].cfg_desc_handle;
- dma_addr_t cfg_stream_handle = jpeg->slot_data[slot].cfg_stream_handle;
- unsigned int *cfg_size = &jpeg->slot_data[slot].cfg_stream_size;
- void *cfg_stream_vaddr = jpeg->slot_data[slot].cfg_stream_vaddr;
+ struct mxc_jpeg_desc *desc = jpeg->slot_data.desc;
+ struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data.cfg_desc;
+ dma_addr_t desc_handle = jpeg->slot_data.desc_handle;
+ dma_addr_t cfg_desc_handle = jpeg->slot_data.cfg_desc_handle;
+ dma_addr_t cfg_stream_handle = jpeg->slot_data.cfg_stream_handle;
+ unsigned int *cfg_size = &jpeg->slot_data.cfg_stream_size;
+ void *cfg_stream_vaddr = jpeg->slot_data.cfg_stream_vaddr;
struct mxc_jpeg_src_buf *jpeg_src_buf;
jpeg_src_buf = vb2_to_mxc_buf(src_buf);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
void __iomem *reg = jpeg->base_reg;
unsigned int slot = ctx->slot;
- struct mxc_jpeg_desc *desc = jpeg->slot_data[slot].desc;
- struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data[slot].cfg_desc;
- dma_addr_t desc_handle = jpeg->slot_data[slot].desc_handle;
- dma_addr_t cfg_desc_handle = jpeg->slot_data[slot].cfg_desc_handle;
- void *cfg_stream_vaddr = jpeg->slot_data[slot].cfg_stream_vaddr;
+ struct mxc_jpeg_desc *desc = jpeg->slot_data.desc;
+ struct mxc_jpeg_desc *cfg_desc = jpeg->slot_data.cfg_desc;
+ dma_addr_t desc_handle = jpeg->slot_data.desc_handle;
+ dma_addr_t cfg_desc_handle = jpeg->slot_data.cfg_desc_handle;
+ void *cfg_stream_vaddr = jpeg->slot_data.cfg_stream_vaddr;
struct mxc_jpeg_q_data *q_data;
enum mxc_jpeg_image_format img_fmt;
int w, h;
q_data = mxc_jpeg_get_q_data(ctx, src_buf->vb2_queue->type);
- jpeg->slot_data[slot].cfg_stream_size =
+ jpeg->slot_data.cfg_stream_size =
mxc_jpeg_setup_cfg_stream(cfg_stream_vaddr,
q_data->fmt->fourcc,
q_data->crop.width,
/* chain the config descriptor with the encoding descriptor */
cfg_desc->next_descpt_ptr = desc_handle | MXC_NXT_DESCPT_EN;
- cfg_desc->buf_base0 = jpeg->slot_data[slot].cfg_stream_handle;
+ cfg_desc->buf_base0 = jpeg->slot_data.cfg_stream_handle;
cfg_desc->buf_base1 = 0;
cfg_desc->line_pitch = 0;
cfg_desc->stm_bufbase = 0; /* no output expected */
unsigned long flags;
spin_lock_irqsave(&ctx->mxc_jpeg->hw_lock, flags);
- if (ctx->slot < MXC_MAX_SLOTS && ctx->mxc_jpeg->slot_data[ctx->slot].used) {
+ if (ctx->mxc_jpeg->slot_data.used) {
dev_warn(jpeg->dev, "%s timeout, cancel it\n",
ctx->mxc_jpeg->mode == MXC_JPEG_DECODE ? "decode" : "encode");
mxc_jpeg_job_finish(ctx, VB2_BUF_STATE_ERROR, true);
mxc_jpeg_enable(reg);
mxc_jpeg_set_l_endian(reg, 1);
- ctx->slot = mxc_get_free_slot(jpeg->slot_data, MXC_MAX_SLOTS);
- if (ctx->slot >= MXC_MAX_SLOTS) {
+ ctx->slot = mxc_get_free_slot(&jpeg->slot_data);
+ if (ctx->slot < 0) {
dev_err(dev, "No more free slots\n");
goto end;
}
- if (!mxc_jpeg_alloc_slot_data(jpeg, ctx->slot)) {
+ if (!mxc_jpeg_alloc_slot_data(jpeg)) {
dev_err(dev, "Cannot allocate slot data\n");
goto end;
}
}
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
mxc_jpeg_set_default_params(ctx);
- ctx->slot = MXC_MAX_SLOTS; /* slot not allocated yet */
+ ctx->slot = -1; /* slot not allocated yet */
INIT_DELAYED_WORK(&ctx->task_timer, mxc_jpeg_device_run_timeout);
if (mxc_jpeg->mode == MXC_JPEG_DECODE)
dev_err(dev, "No power domains defined for jpeg node\n");
return jpeg->num_domains;
}
+ if (jpeg->num_domains == 1) {
+ /* genpd_dev_pm_attach() attach automatically if power domains count is 1 */
+ jpeg->num_domains = 0;
+ return 0;
+ }
jpeg->pd_dev = devm_kmalloc_array(dev, jpeg->num_domains,
sizeof(*jpeg->pd_dev), GFP_KERNEL);
int ret;
int mode;
const struct of_device_id *of_id;
- unsigned int slot;
of_id = of_match_node(mxc_jpeg_match, dev->of_node);
if (!of_id)
if (IS_ERR(jpeg->base_reg))
return PTR_ERR(jpeg->base_reg);
- for (slot = 0; slot < MXC_MAX_SLOTS; slot++) {
- dec_irq = platform_get_irq(pdev, slot);
- if (dec_irq < 0) {
- ret = dec_irq;
- goto err_irq;
- }
- ret = devm_request_irq(&pdev->dev, dec_irq, mxc_jpeg_dec_irq,
- 0, pdev->name, jpeg);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq %d (%d)\n",
- dec_irq, ret);
- goto err_irq;
- }
+ ret = of_property_read_u32_index(pdev->dev.of_node, "slot", 0, &jpeg->slot_data.slot);
+ if (ret)
+ jpeg->slot_data.slot = 0;
+ dev_info(&pdev->dev, "choose slot %d\n", jpeg->slot_data.slot);
+ dec_irq = platform_get_irq(pdev, 0);
+ if (dec_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq %d\n", dec_irq);
+ ret = dec_irq;
+ goto err_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, dec_irq, mxc_jpeg_dec_irq,
+ 0, pdev->name, jpeg);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d (%d)\n",
+ dec_irq, ret);
+ goto err_irq;
}
jpeg->pdev = pdev;
static void mxc_jpeg_remove(struct platform_device *pdev)
{
- unsigned int slot;
struct mxc_jpeg_dev *jpeg = platform_get_drvdata(pdev);
- for (slot = 0; slot < MXC_MAX_SLOTS; slot++)
- mxc_jpeg_free_slot_data(jpeg, slot);
+ mxc_jpeg_free_slot_data(jpeg);
pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->dec_vdev);
struct mxc_jpeg_q_data cap_q;
struct v4l2_fh fh;
enum mxc_jpeg_enc_state enc_state;
- unsigned int slot;
+ int slot;
unsigned int source_change;
bool header_parsed;
struct v4l2_ctrl_handler ctrl_handler;
};
struct mxc_jpeg_slot_data {
+ int slot;
bool used;
struct mxc_jpeg_desc *desc; // enc/dec descriptor
struct mxc_jpeg_desc *cfg_desc; // configuration descriptor
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
struct video_device *dec_vdev;
- struct mxc_jpeg_slot_data slot_data[MXC_MAX_SLOTS];
+ struct mxc_jpeg_slot_data slot_data;
int num_domains;
struct device **pd_dev;
struct device_link **pd_link;
pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
/* Structure access helpers. */
-static inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static __always_inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
{
return container_of(fh, struct hantro_ctx, fh);
}
/* Register accessors. */
-static inline void vepu_write_relaxed(struct hantro_dev *vpu,
- u32 val, u32 reg)
+static __always_inline void vepu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
{
vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
writel_relaxed(val, vpu->enc_base + reg);
}
-static inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+static __always_inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg)
{
vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
writel(val, vpu->enc_base + reg);
}
-static inline u32 vepu_read(struct hantro_dev *vpu, u32 reg)
+static __always_inline u32 vepu_read(struct hantro_dev *vpu, u32 reg)
{
u32 val = readl(vpu->enc_base + reg);
return val;
}
-static inline void vdpu_write_relaxed(struct hantro_dev *vpu,
- u32 val, u32 reg)
+static __always_inline void vdpu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
{
vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
writel_relaxed(val, vpu->dec_base + reg);
}
-static inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+static __always_inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
{
vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
writel(val, vpu->dec_base + reg);
}
-static inline void hantro_write_addr(struct hantro_dev *vpu,
- unsigned long offset,
- dma_addr_t addr)
+static __always_inline void hantro_write_addr(struct hantro_dev *vpu,
+ unsigned long offset,
+ dma_addr_t addr)
{
vdpu_write(vpu, addr & 0xffffffff, offset);
}
-static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
+static __always_inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
{
u32 val = readl(vpu->dec_base + reg);
return val;
}
-static inline u32 vdpu_read_mask(struct hantro_dev *vpu,
- const struct hantro_reg *reg,
- u32 val)
+static __always_inline u32 vdpu_read_mask(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
{
u32 v;
return v;
}
-static inline void hantro_reg_write(struct hantro_dev *vpu,
- const struct hantro_reg *reg,
- u32 val)
+static __always_inline void hantro_reg_write(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
{
- vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+ vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
}
-static inline void hantro_reg_write_s(struct hantro_dev *vpu,
- const struct hantro_reg *reg,
- u32 val)
+static __always_inline void hantro_reg_write_relaxed(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
{
- vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+ vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
}
void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
val); \
}
-#define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
+#define HANTRO_PP_REG_WRITE_RELAXED(vpu, reg_name, val) \
{ \
- hantro_reg_write_s(vpu, \
- &hantro_g1_postproc_regs.reg_name, \
- val); \
+ hantro_reg_write_relaxed(vpu, \
+ &hantro_g1_postproc_regs.reg_name, \
+ val); \
}
#define VPU_PP_IN_YUYV 0x0
dma_addr_t dst_dma;
/* Turn on pipeline mode. Must be done first. */
- HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x1);
src_pp_fmt = VPU_PP_IN_NV12;
{
struct hantro_dev *vpu = ctx->dev;
- HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
+ HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x0);
}
static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
}
if (!label)
block->label = devm_kasprintf(sram->dev, GFP_KERNEL,
- "%s", dev_name(sram->dev));
+ "%s", of_node_full_name(child));
else
block->label = devm_kstrdup(sram->dev,
label, GFP_KERNEL);
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
+
+ if (slave_dev->flags & IFF_POINTOPOINT) {
+ bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ }
}
/* On bonding slaves other than the currently active slave, suppress
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
+ dev->can.state = CAN_STATE_STOPPED;
+
/* reset the device */
rc = gs_cmd_reset(dev);
if (rc < 0)
.rd_table = &qca8k_readable_table,
.disable_locking = true, /* Locking is handled by qca8k read/write */
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
- .max_raw_read = 32, /* mgmt eth can read/write up to 8 registers at time */
- .max_raw_write = 32,
+ .max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */
+ /* ATU regs suffer from a bug where some data are not correctly
+ * written. Disable bulk write to correctly write ATU entry.
+ */
+ .use_single_write = true,
};
static int
}
static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
+ const u8 *mac, u16 vid, u8 aging)
{
struct qca8k_fdb fdb = { 0 };
int ret;
goto exit;
/* Rule exist. Delete first */
- if (!fdb.aging) {
+ if (fdb.aging) {
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
if (ret)
goto exit;
+ } else {
+ fdb.aging = aging;
}
/* Add port to fdb portmask */
if (ret < 0)
goto exit;
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
/* Rule doesn't exist. Why delete? */
if (!fdb.aging) {
ret = -EINVAL;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
+ QCA8K_ATU_STATUS_STATIC);
}
int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
}
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
real_len = (((unsigned char *)iph - skb->data) +
ntohs(iph->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
(lancer_chip(adapter) || BE3_chip(adapter) ||
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
- pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
+ goto tx_drop;
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
}
static void
-fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep;
struct xdp_frame *xdpf;
if (!skb)
goto tx_buf_done;
} else {
+ /* Tx processing cannot call any XDP (or page pool) APIs if
+ * the "budget" is 0. Because NAPI is called with budget of
+ * 0 (such as netpoll) indicates we may be in an IRQ context,
+ * however, we can't use the page pool from IRQ context.
+ */
+ if (unlikely(!budget))
+ break;
+
xdpf = txq->tx_buf[index].xdp;
if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
writel(0, txq->bd.reg_desc_active);
}
-static void fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int i;
/* Make sure that AVB queues are processed first. */
for (i = fep->num_tx_queues - 1; i >= 0; i--)
- fec_enet_tx_queue(ndev, i);
+ fec_enet_tx_queue(ndev, i, budget);
}
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
do {
done += fec_enet_rx(ndev, budget - done);
- fec_enet_tx(ndev);
+ fec_enet_tx(ndev, budget);
} while ((done < budget) && fec_enet_collect_events(fep));
if (done < budget) {
__netif_tx_lock(nq, cpu);
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
break;
#include <linux/pci.h>
#include <linux/pkt_sched.h>
#include <linux/types.h>
+#include <linux/bitmap.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
HNAE3_DEV_SUPPORT_FEC_STATS_B,
HNAE3_DEV_SUPPORT_LANE_NUM_B,
HNAE3_DEV_SUPPORT_WOL_B,
+ HNAE3_DEV_SUPPORT_TM_FLUSH_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
#define hnae3_ae_dev_wol_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps)
+#define hnae3_ae_dev_tm_flush_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
unsigned long hw_err_reset_req;
struct hnae3_dev_specs dev_specs;
u32 dev_version;
- unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
+ DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
void *priv;
};
{HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
+ {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
+static void
+hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps)
+{
+ const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH;
+ u32 val[HCLGE_COMM_QUERY_CAP_LENGTH];
+ unsigned int i;
+
+ for (i = 0; i < words; i++)
+ val[i] = __le32_to_cpu(caps[i]);
+
+ bitmap_from_arr32(bitmap, val,
+ HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
+}
+
static void
hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
struct hclge_comm_query_version_cmd *cmd)
is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
ARRAY_SIZE(hclge_vf_cmd_caps);
- u32 caps, i;
+ DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
+ u32 i;
- caps = __le32_to_cpu(cmd->caps[0]);
+ hclge_comm_capability_to_bitmap(caps, cmd->caps);
for (i = 0; i < size; i++)
- if (hnae3_get_bit(caps, caps_map[i].imp_bit))
+ if (test_bit(caps_map[i].imp_bit, caps))
set_bit(caps_map[i].local_bit, ae_dev->caps);
}
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
+ HCLGE_OPC_TM_FLUSH = 0x0872,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_COMM_CAP_FEC_STATS_B = 25,
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
+ HCLGE_COMM_CAP_TM_FLUSH_B = 31,
};
enum HCLGE_COMM_API_CAP_BITS {
}, {
.name = "support wake on lan",
.cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
+ }, {
+ .name = "support tm flush",
+ .cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
}
};
for (i = 0; i < HNAE3_MAX_TC; i++) {
ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
- ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
+ if (i < hdev->tm_info.num_tc)
+ ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
+ else
+ ets->tc_tx_bw[i] = 0;
if (hdev->tm_info.tc_info[i].tc_sch_mode ==
HCLGE_SCH_MODE_SP)
}
static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
- struct ieee_ets *ets, bool *changed)
+ struct ieee_ets *ets, bool *changed,
+ u8 tc_num)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
+ if (i >= tc_num) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u is disabled, cannot set ets bw\n",
+ i);
+ return -EINVAL;
+ }
+
/* The hardware will switch to sp mode if bandwidth is
* 0, so limit ets bandwidth must be greater than 0.
*/
if (ret)
return ret;
- ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
+ ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
if (ret)
return ret;
if (ret)
return ret;
+ ret = hclge_tm_flush_cfg(hdev, true);
+ if (ret)
+ return ret;
+
return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
}
if (ret)
return ret;
+ ret = hclge_tm_flush_cfg(hdev, false);
+ if (ret)
+ return ret;
+
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
+ int last_bad_ret = 0;
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
if (ret)
return ret;
- ret = hclge_buffer_alloc(hdev);
- if (ret) {
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclge_tm_flush_cfg(hdev, true);
+ if (ret)
return ret;
- }
- return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ /* No matter whether the following operations are performed
+ * successfully or not, disabling the tm flush and notify
+ * the network status to up are necessary.
+ * Do not return immediately.
+ */
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ last_bad_ret = ret;
+
+ ret = hclge_tm_flush_cfg(hdev, false);
+ if (ret)
+ last_bad_ret = ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ last_bad_ret = ret;
+
+ return last_bad_ret;
}
static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
for (i = 0; i < HNAE3_MAX_TC; i++) {
sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
- i, sch_mode_str,
- hdev->tm_info.pg_info[0].tc_dwrr[i]);
+ i, sch_mode_str, ets_weight->tc_weight[i]);
}
return 0;
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{
#define BW_PERCENT 100
+#define DEFAULT_BW_WEIGHT 1
u8 i;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
for (; k < HNAE3_MAX_TC; k++)
- hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
}
}
return ret;
/* Cfg schd mode for each level schd */
- return hclge_tm_schd_mode_hw(hdev);
+ ret = hclge_tm_schd_mode_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_flush_cfg(hdev, false);
}
static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
return 0;
}
+
+int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_ae_dev_tm_flush_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false);
+
+ desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config tm flush, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (enable)
+ msleep(HCLGE_TM_FLUSH_TIME_MS);
+
+ return ret;
+}
#define HCLGE_DSCP_MAP_TC_BD_NUM 2
#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+#define HCLGE_TM_FLUSH_TIME_MS 10
+#define HCLGE_TM_FLUSH_EN_MSK BIT(0)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
struct hclge_tm_shaper_para *para);
int hclge_up_to_tc_map(struct hclge_dev *hdev);
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
+int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
#endif
void i40e_dbg_init(void)
{
i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
- if (!i40e_dbg_root)
+ if (IS_ERR(i40e_dbg_root))
pr_info("init of debugfs failed\n");
}
u32 val, oldval;
u16 pending;
- if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
- goto out;
-
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state == __IAVF_REMOVE)
return;
goto out;
}
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
+ goto unlock;
+
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
- goto out;
+ goto unlock;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
if (pending != 0)
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
} while (pending);
- mutex_unlock(&adapter->crit_lock);
if (iavf_is_reset_in_progress(adapter))
goto freedom;
freedom:
kfree(event.msg_buf);
+unlock:
+ mutex_unlock(&adapter->crit_lock);
out:
/* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter);
ICE_FLOW_FLD_OFF_INVAL);
}
- /* add filter for outer headers */
fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
+
+ assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter);
+
+ /* add filter for outer headers */
ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
ICE_FD_HW_SEG_NON_TUN);
- if (ret == -EEXIST)
- /* Rule already exists, free memory and continue */
- devm_kfree(dev, seg);
- else if (ret)
+ if (ret == -EEXIST) {
+ /* Rule already exists, free memory and count as success */
+ ret = 0;
+ goto err_exit;
+ } else if (ret) {
/* could not write filter, free memory */
goto err_exit;
+ }
/* make tunneled filter HW entries if possible */
memcpy(&tun_seg[1], seg, sizeof(*seg));
devm_kfree(dev, tun_seg);
}
- if (perfect_filter)
- set_bit(fltr_idx, hw->fdir_perfect_fltr);
- else
- clear_bit(fltr_idx, hw->fdir_perfect_fltr);
-
return ret;
err_exit:
devm_kfree(dev, tun_seg);
devm_kfree(dev, seg);
- return -EOPNOTSUPP;
+ return ret;
}
/**
input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
/* input struct is added to the HW filter list */
- ice_fdir_update_list_entry(pf, input, fsp->location);
+ ret = ice_fdir_update_list_entry(pf, input, fsp->location);
+ if (ret)
+ goto release_lock;
ret = ice_fdir_write_all_fltr(pf, input, true);
if (ret)
igc_clean_tx_ring(adapter->tx_ring[i]);
}
+static void igc_disable_tx_ring_hw(struct igc_ring *ring)
+{
+ struct igc_hw *hw = &ring->q_vector->adapter->hw;
+ u8 idx = ring->reg_idx;
+ u32 txdctl;
+
+ txdctl = rd32(IGC_TXDCTL(idx));
+ txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
+ txdctl |= IGC_TXDCTL_SWFLUSH;
+ wr32(IGC_TXDCTL(idx), txdctl);
+}
+
+/**
+ * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
+ * @adapter: board private structure
+ */
+static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ igc_disable_tx_ring_hw(tx_ring);
+ }
+}
+
/**
* igc_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup
/* clear VLAN promisc flag so VFTA will be updated if necessary */
adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
+ igc_disable_all_tx_rings_hw(adapter);
igc_clean_all_tx_rings(adapter);
igc_clean_all_rx_rings(adapter);
}
igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
}
-static void igc_disable_tx_ring_hw(struct igc_ring *ring)
-{
- struct igc_hw *hw = &ring->q_vector->adapter->hw;
- u8 idx = ring->reg_idx;
- u32 txdctl;
-
- txdctl = rd32(IGC_TXDCTL(idx));
- txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
- txdctl |= IGC_TXDCTL_SWFLUSH;
- wr32(IGC_TXDCTL(idx), txdctl);
-}
-
void igc_disable_tx_ring(struct igc_ring *ring)
{
igc_disable_tx_ring_hw(ring);
struct ixgbe_adapter *adapter = q_vector->adapter;
if (unlikely(skb_tail_pointer(skb) < hdr.network +
- VXLAN_HEADROOM))
+ vxlan_headroom(0)))
return;
/* verify the port is recognized as VXLAN */
void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
struct hw_cap *hwcap = &rvu->hw->cap;
+ u8 intf, ld, hdr_offset, byte_len;
struct rvu_hwinfo *hw = rvu->hw;
- u8 intf;
+ u64 cfg;
+ /* Check if hardware supports hash extraction */
if (!hwcap->npc_hash_extract)
return;
+ /* Check if IPv6 source/destination address
+ * should be hash enabled.
+ * Hashing reduces 128bit SIP/DIP fields to 32bit
+ * so that 224 bit X2 key can be used for IPv6 based filters as well,
+ * which in turn results in more number of MCAM entries available for
+ * use.
+ *
+ * Hashing of IPV6 SIP/DIP is enabled in below scenarios
+ * 1. If the silicon variant supports hashing feature
+ * 2. If the number of bytes of IP addr being extracted is 4 bytes ie
+ * 32bit. The assumption here is that if user wants 8bytes of LSB of
+ * IP addr or full 16 bytes then his intention is not to use 32bit
+ * hash.
+ */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
+ NPC_LID_LC,
+ NPC_LT_LC_IP6,
+ ld));
+ hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ byte_len = FIELD_GET(NPC_BYTESM, cfg);
+ /* Hashing of IPv6 source/destination address should be
+ * enabled if,
+ * hdr_offset == 8 (offset of source IPv6 address) or
+ * hdr_offset == 24 (offset of destination IPv6)
+ * address) and the number of byte to be
+ * extracted is 4. As per hardware configuration
+ * byte_len should be == actual byte_len - 1.
+ * Hence byte_len is checked against 3 but nor 4.
+ */
+ if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
+ mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
+ }
+ }
+
+ /* Update hash configuration if the field is hash enabled */
for (intf = 0; intf < hw->npc_intfs; intf++) {
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
npc_program_mkex_hash_tx(rvu, blkaddr, intf);
[NIX_INTF_RX] = {
[NPC_LID_LC] = {
[NPC_LT_LC_IP6] = {
- true,
- true,
+ false,
+ false,
},
},
},
[NIX_INTF_TX] = {
[NPC_LID_LC] = {
[NPC_LT_LC_IP6] = {
- true,
- true,
+ false,
+ false,
},
},
},
void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
+ u32 old_val = value;
if (enable)
value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
else
value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
- writel(value, ioaddr + GMAC_CONFIG);
+ if (value != old_val)
+ writel(value, ioaddr + GMAC_CONFIG);
}
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, true, false, modem);
- if (ret)
+ ret = ipa_filter_reset_table(ipa, false, true, modem);
+ if (ret || !ipa_table_hash_support(ipa))
return ret;
- ret = ipa_filter_reset_table(ipa, false, true, modem);
+ ret = ipa_filter_reset_table(ipa, true, false, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, true, true, modem);
- return ret;
+ return ipa_filter_reset_table(ipa, true, true, modem);
}
/* The AP routes and modem routes are each contiguous within the
* */
static int ipa_route_reset(struct ipa *ipa, bool modem)
{
+ bool hash_support = ipa_table_hash_support(ipa);
u32 modem_route_count = ipa->modem_route_count;
struct gsi_trans *trans;
u16 first;
u16 count;
- trans = ipa_cmd_trans_alloc(ipa, 4);
+ trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for %s route reset\n",
}
ipa_table_reset_add(trans, false, false, false, first, count);
- ipa_table_reset_add(trans, false, true, false, first, count);
-
ipa_table_reset_add(trans, false, false, true, first, count);
- ipa_table_reset_add(trans, false, true, true, first, count);
+
+ if (hash_support) {
+ ipa_table_reset_add(trans, false, true, false, first, count);
+ ipa_table_reset_add(trans, false, true, true, first, count);
+ }
gsi_trans_commit_wait(trans);
[IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 },
[IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 },
[IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT },
+ [IFLA_MACVLAN_BC_CUTOFF] = { .type = NLA_S32 },
};
int macvlan_link_register(struct rtnl_link_ops *ops)
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
MV_V2_PORT_CTRL_PWRDOWN);
+ /* Sometimes, the power down bit doesn't clear immediately, and
+ * a read of this register causes the bit not to clear. Delay
+ * 100us to allow the PHY to come out of power down mode before
+ * the next access.
+ */
+ udelay(100);
+
if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 ||
priv->firmware_ver < 0x00030000)
return ret;
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
eth_hw_addr_inherit(dev, port_dev);
+
+ if (port_dev->flags & IFF_POINTOPOINT) {
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ==
+ (IFF_BROADCAST | IFF_MULTICAST)) {
+ dev->flags |= (IFF_BROADCAST | IFF_MULTICAST);
+ dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP);
+ }
}
static int team_dev_type_check_change(struct net_device *dev,
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ _virtnet_set_queues(vi, vi->curr_queue_pairs);
+
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
goto free_unregister_netdev;
}
- virtnet_set_queues(vi, vi->curr_queue_pairs);
-
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);
return 1;
}
+static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
+
+ /* Need to have Next Protocol set for interfaces in GPE mode. */
+ if (!gpe->np_applied)
+ return false;
+ /* "The initial version is 0. If a receiver does not support the
+ * version indicated it MUST drop the packet.
+ */
+ if (gpe->version != 0)
+ return false;
+ /* "When the O bit is set to 1, the packet is an OAM packet and OAM
+ * processing MUST occur." However, we don't implement OAM
+ * processing, thus drop the packet.
+ */
+ if (gpe->oam_flag)
+ return false;
+
+ *protocol = tun_p_to_eth_p(gpe->next_protocol);
+ if (!*protocol)
+ return false;
+
+ return true;
+}
+
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
unsigned int off,
struct vxlanhdr *vh, size_t hdrlen,
return vh;
}
-static struct sk_buff *vxlan_gro_receive(struct sock *sk,
- struct list_head *head,
- struct sk_buff *skb)
+static struct vxlanhdr *vxlan_gro_prepare_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb,
+ struct gro_remcsum *grc)
{
- struct sk_buff *pp = NULL;
struct sk_buff *p;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
- int flush = 1;
struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
__be32 flags;
- struct gro_remcsum grc;
- skb_gro_remcsum_init(&grc);
+ skb_gro_remcsum_init(grc);
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
vh = skb_gro_header(skb, hlen, off_vx);
if (unlikely(!vh))
- goto out;
+ return NULL;
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
- vh->vx_vni, &grc,
+ vh->vx_vni, grc,
!!(vs->flags &
VXLAN_F_REMCSUM_NOPARTIAL));
if (!vh)
- goto out;
+ return NULL;
}
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
}
}
- pp = call_gro_receive(eth_gro_receive, head, skb);
- flush = 0;
+ return vh;
+}
-out:
+static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ struct sk_buff *pp = NULL;
+ struct gro_remcsum grc;
+ int flush = 1;
+
+ if (vxlan_gro_prepare_receive(sk, head, skb, &grc)) {
+ pp = call_gro_receive(eth_gro_receive, head, skb);
+ flush = 0;
+ }
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
+ return pp;
+}
+
+static struct sk_buff *vxlan_gpe_gro_receive(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ const struct packet_offload *ptype;
+ struct sk_buff *pp = NULL;
+ struct gro_remcsum grc;
+ struct vxlanhdr *vh;
+ __be16 protocol;
+ int flush = 1;
+ vh = vxlan_gro_prepare_receive(sk, head, skb, &grc);
+ if (vh) {
+ if (!vxlan_parse_gpe_proto(vh, &protocol))
+ goto out;
+ ptype = gro_find_receive_by_type(protocol);
+ if (!ptype)
+ goto out;
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ flush = 0;
+ }
+out:
+ skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
return pp;
}
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
+static int vxlan_gpe_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
+{
+ struct vxlanhdr *vh = (struct vxlanhdr *)(skb->data + nhoff);
+ const struct packet_offload *ptype;
+ int err = -ENOSYS;
+ __be16 protocol;
+
+ if (!vxlan_parse_gpe_proto(vh, &protocol))
+ return err;
+ ptype = gro_find_complete_by_type(protocol);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
+ return err;
+}
+
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
__u16 state, __be32 src_vni,
__u16 ndm_flags)
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
-static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
- __be16 *protocol,
- struct sk_buff *skb, u32 vxflags)
-{
- struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
-
- /* Need to have Next Protocol set for interfaces in GPE mode. */
- if (!gpe->np_applied)
- return false;
- /* "The initial version is 0. If a receiver does not support the
- * version indicated it MUST drop the packet.
- */
- if (gpe->version != 0)
- return false;
- /* "When the O bit is set to 1, the packet is an OAM packet and OAM
- * processing MUST occur." However, we don't implement OAM
- * processing, thus drop the packet.
- */
- if (gpe->oam_flag)
- return false;
-
- *protocol = tun_p_to_eth_p(gpe->next_protocol);
- if (!*protocol)
- return false;
-
- unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
- return true;
-}
-
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
struct sk_buff *skb, __be32 vni)
* used by VXLAN extensions if explicitly requested.
*/
if (vs->flags & VXLAN_F_GPE) {
- if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
+ if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
goto drop;
+ unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
}
ndst = &rt->dst;
- err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM,
+ err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE),
netif_is_any_bridge_port(dev));
if (err < 0) {
goto tx_error;
goto out_unlock;
}
- err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM,
+ err = skb_tunnel_check_pmtu(skb, ndst,
+ vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6),
netif_is_any_bridge_port(dev));
if (err < 0) {
goto tx_error;
struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
dst->remote_ifindex);
- bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
/* This check is different than dev->max_mtu, because it looks at
* the lowerdev->mtu, rather than the static dev->max_mtu
*/
if (lowerdev) {
- int max_mtu = lowerdev->mtu -
- (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
+ int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags);
if (new_mtu > max_mtu)
return -EINVAL;
}
tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
tunnel_cfg.encap_destroy = NULL;
- tunnel_cfg.gro_receive = vxlan_gro_receive;
- tunnel_cfg.gro_complete = vxlan_gro_complete;
+ if (vs->flags & VXLAN_F_GPE) {
+ tunnel_cfg.gro_receive = vxlan_gpe_gro_receive;
+ tunnel_cfg.gro_complete = vxlan_gpe_gro_complete;
+ } else {
+ tunnel_cfg.gro_receive = vxlan_gro_receive;
+ tunnel_cfg.gro_complete = vxlan_gro_complete;
+ }
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
- bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
int max_mtu = ETH_MAX_MTU;
+ u32 flags = conf->flags;
if (!changelink) {
- if (conf->flags & VXLAN_F_GPE)
+ if (flags & VXLAN_F_GPE)
vxlan_raw_setup(dev);
else
vxlan_ether_setup(dev);
dev->needed_tailroom = lowerdev->needed_tailroom;
- max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
- VXLAN_HEADROOM);
+ max_mtu = lowerdev->mtu - vxlan_headroom(flags);
if (max_mtu < ETH_MIN_MTU)
max_mtu = ETH_MIN_MTU;
if (dev->mtu > max_mtu)
dev->mtu = max_mtu;
- if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
- needed_headroom += VXLAN6_HEADROOM;
- else
- needed_headroom += VXLAN_HEADROOM;
+ if (flags & VXLAN_F_COLLECT_METADATA)
+ flags |= VXLAN_F_IPV6;
+ needed_headroom += vxlan_headroom(flags);
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
phy_set_drvdata(phy, &priv->ports[i]);
i++;
- if (i > INNO_PHY_PORT_NUM) {
+ if (i >= INNO_PHY_PORT_NUM) {
dev_warn(dev, "Support %d ports in maximum\n", i);
of_node_put(child);
break;
regs = *(struct regmap **)dev->platform_data;
if (!regs)
- return dev_err_probe(dev, EINVAL,
+ return dev_err_probe(dev, -EINVAL,
"No data passed, requires struct regmap**\n");
dp_phy = devm_kzalloc(dev, sizeof(*dp_phy), GFP_KERNEL);
for (i = 0; i < ARRAY_SIZE(txpredivs); i++) {
ns_hdmipll_ck = 5 * tmds_clk * txposdiv * txpredivs[i];
if (ns_hdmipll_ck >= 5 * GIGA &&
- ns_hdmipll_ck <= 1 * GIGA)
+ ns_hdmipll_ck <= 12 * GIGA)
break;
}
if (i == (ARRAY_SIZE(txpredivs) - 1) &&
/**
* struct qcom_snps_hsphy - snps hs phy attributes
*
+ * @dev: device structure
+ *
* @phy: generic phy
* @base: iomapped memory space for snps hs phy
*
- * @cfg_ahb_clk: AHB2PHY interface clock
- * @ref_clk: phy reference clock
+ * @num_clks: number of clocks
+ * @clks: array of clocks
* @phy_reset: phy reset control
* @vregs: regulator supplies bulk data
* @phy_initialized: if PHY has been initialized correctly
* @update_seq_cfg: tuning parameters for phy init
*/
struct qcom_snps_hsphy {
+ struct device *dev;
+
struct phy *phy;
void __iomem *base;
- struct clk *cfg_ahb_clk;
- struct clk *ref_clk;
+ int num_clks;
+ struct clk_bulk_data *clks;
struct reset_control *phy_reset;
struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
struct phy_override_seq update_seq_cfg[NUM_HSPHY_TUNING_PARAMS];
};
+static int qcom_snps_hsphy_clk_init(struct qcom_snps_hsphy *hsphy)
+{
+ struct device *dev = hsphy->dev;
+
+ hsphy->num_clks = 2;
+ hsphy->clks = devm_kcalloc(dev, hsphy->num_clks, sizeof(*hsphy->clks), GFP_KERNEL);
+ if (!hsphy->clks)
+ return -ENOMEM;
+
+ /*
+ * TODO: Currently no device tree instantiation of the PHY is using the clock.
+ * This needs to be fixed in order for this code to be able to use devm_clk_bulk_get().
+ */
+ hsphy->clks[0].id = "cfg_ahb";
+ hsphy->clks[0].clk = devm_clk_get_optional(dev, "cfg_ahb");
+ if (IS_ERR(hsphy->clks[0].clk))
+ return dev_err_probe(dev, PTR_ERR(hsphy->clks[0].clk),
+ "failed to get cfg_ahb clk\n");
+
+ hsphy->clks[1].id = "ref";
+ hsphy->clks[1].clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(hsphy->clks[1].clk))
+ return dev_err_probe(dev, PTR_ERR(hsphy->clks[1].clk),
+ "failed to get ref clk\n");
+
+ return 0;
+}
+
static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
u32 mask, u32 val)
{
0, USB2_AUTO_RESUME);
}
- clk_disable_unprepare(hsphy->cfg_ahb_clk);
return 0;
}
static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy)
{
- int ret;
-
dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n");
- ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
- if (ret) {
- dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n");
- return ret;
- }
-
return 0;
}
if (!hsphy->phy_initialized)
return 0;
- qcom_snps_hsphy_suspend(hsphy);
- return 0;
+ return qcom_snps_hsphy_suspend(hsphy);
}
static int __maybe_unused qcom_snps_hsphy_runtime_resume(struct device *dev)
if (!hsphy->phy_initialized)
return 0;
- qcom_snps_hsphy_resume(hsphy);
- return 0;
+ return qcom_snps_hsphy_resume(hsphy);
}
static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
if (ret)
return ret;
- ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
+ ret = clk_bulk_prepare_enable(hsphy->num_clks, hsphy->clks);
if (ret) {
- dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ dev_err(&phy->dev, "failed to enable clocks, %d\n", ret);
goto poweroff_phy;
}
ret = reset_control_assert(hsphy->phy_reset);
if (ret) {
dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret);
- goto disable_ahb_clk;
+ goto disable_clks;
}
usleep_range(100, 150);
ret = reset_control_deassert(hsphy->phy_reset);
if (ret) {
dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret);
- goto disable_ahb_clk;
+ goto disable_clks;
}
qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
return 0;
-disable_ahb_clk:
- clk_disable_unprepare(hsphy->cfg_ahb_clk);
+disable_clks:
+ clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks);
poweroff_phy:
regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
reset_control_assert(hsphy->phy_reset);
- clk_disable_unprepare(hsphy->cfg_ahb_clk);
+ clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks);
regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
hsphy->phy_initialized = false;
if (!hsphy)
return -ENOMEM;
+ hsphy->dev = dev;
+
hsphy->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsphy->base))
return PTR_ERR(hsphy->base);
- hsphy->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(hsphy->ref_clk))
- return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk),
- "failed to get ref clk\n");
+ ret = qcom_snps_hsphy_clk_init(hsphy);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize clocks\n");
hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(hsphy->phy_reset)) {
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/slab.h>
#include "pmc.h"
*/
static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio)
{
- struct resource *res;
void __iomem *addr;
u8 val;
- res = request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80");
- if (!res)
+ if (!request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80"))
return;
addr = ioremap(s2idle_bug_mmio, 1);
iounmap(addr);
cleanup_resource:
- release_resource(res);
- kfree(res);
+ release_mem_region(s2idle_bug_mmio, 1);
}
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev)
data, sizeof(*data));
}
+int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
+{
+ struct os_power_slider args;
+ struct acpi_buffer params;
+ union acpi_object *info;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.slider_event = event;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, ¶ms);
+ if (!info)
+ err = -EIO;
+
+ kfree(info);
+ return err;
+}
+
static void apmf_sbios_heartbeat_notify(struct work_struct *work)
{
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
ret = apmf_get_system_params(pmf_dev);
if (ret) {
- dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
goto out;
}
return NOTIFY_DONE;
}
- amd_pmf_set_sps_power_limits(pmf);
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_set_sps_power_limits(pmf);
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
+ amd_pmf_power_slider_update_event(pmf);
return NOTIFY_OK;
}
int ret;
/* Enable Static Slider */
- if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
amd_pmf_init_sps(dev);
dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
power_supply_reg_notifier(&dev->pwr_src_notifier);
#define APMF_FUNC_SBIOS_HEARTBEAT 4
#define APMF_FUNC_AUTO_MODE 5
#define APMF_FUNC_SET_FAN_IDX 7
+#define APMF_FUNC_OS_POWER_SLIDER_UPDATE 8
#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
#define APMF_FUNC_DYN_SLIDER_AC 11
#define APMF_FUNC_DYN_SLIDER_DC 12
#define GET_STT_LIMIT_APU 0x20
#define GET_STT_LIMIT_HS2 0x21
+/* OS slider update notification */
+#define DC_BEST_PERF 0
+#define DC_BETTER_PERF 1
+#define DC_BATTERY_SAVER 3
+#define AC_BEST_PERF 4
+#define AC_BETTER_PERF 5
+#define AC_BETTER_BATTERY 6
+
/* Fan Index for Auto Mode */
#define FAN_INDEX_AUTO 0xFFFFFFFF
struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
};
+struct os_power_slider {
+ u16 size;
+ u8 slider_event;
+} __packed;
+
struct fan_table_control {
bool manual;
unsigned long fan_id;
int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
int amd_pmf_get_power_source(void);
int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
bool is_pprof_balanced(struct amd_pmf_dev *pmf);
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
return mode;
}
+int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
+{
+ u8 mode, flag = 0;
+ int src;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+
+ if (src == POWER_SOURCE_AC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(AC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(AC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(AC_BETTER_BATTERY);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+
+ } else if (src == POWER_SOURCE_DC) {
+ switch (mode) {
+ case POWER_MODE_PERFORMANCE:
+ flag |= BIT(DC_BEST_PERF);
+ break;
+ case POWER_MODE_BALANCED_POWER:
+ flag |= BIT(DC_BETTER_PERF);
+ break;
+ case POWER_MODE_POWER_SAVER:
+ flag |= BIT(DC_BATTERY_SAVER);
+ break;
+ default:
+ dev_err(dev->dev, "unsupported platform profile\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ apmf_os_power_slider_update(dev, flag);
+
+ return 0;
+}
+
static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ int ret = 0;
pmf->current_profile = profile;
- return amd_pmf_set_sps_power_limits(pmf);
+ /* Notify EC about the slider position change */
+ if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
+ ret = amd_pmf_power_slider_update_event(pmf);
+ if (ret)
+ return ret;
+ }
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ ret = amd_pmf_set_sps_power_limits(pmf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int amd_pmf_init_sps(struct amd_pmf_dev *dev)
int err;
dev->current_profile = PLATFORM_PROFILE_BALANCED;
- amd_pmf_load_defaults_sps(dev);
- /* update SPS balanced power mode thermals */
- amd_pmf_set_sps_power_limits(dev);
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ amd_pmf_load_defaults_sps(dev);
+
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+ }
dev->pprof.profile_get = amd_pmf_profile_get;
dev->pprof.profile_set = amd_pmf_profile_set;
struct device_attribute *attr,
const char *buf, size_t count)
{
- u32 cmd, mode, r, g, b, speed;
+ u32 cmd, mode, r, g, b, speed;
int err;
if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
return -EINVAL;
- cmd = !!cmd;
+ /* B3 is set and B4 is save to BIOS */
+ switch (cmd) {
+ case 0:
+ cmd = 0xb3;
+ break;
+ case 1:
+ cmd = 0xb4;
+ break;
+ default:
+ return -EINVAL;
+ }
/* These are the known usable modes across all TUF/ROG */
if (mode >= 12 || mode == 9)
{ KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
{ KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
+ // Ignore Ambient Light Sensoring
+ { KE_KEY, 0x2c1, { KEY_RESERVED } },
{ KE_END, 0 }
};
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"),
+ },
+ },
{ }
};
static int intel_hid_probe(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
- unsigned long long mode;
+ unsigned long long mode, dummy;
struct intel_hid_priv *priv;
acpi_status status;
int err;
if (err)
goto err_remove_notify;
- if (priv->array) {
- unsigned long long dummy;
+ intel_button_array_enable(&device->dev, true);
- intel_button_array_enable(&device->dev, true);
-
- /* Call button load method to enable HID power button */
- if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN,
- &dummy)) {
- dev_warn(&device->dev,
- "failed to enable HID power button\n");
- }
- }
+ /*
+ * Call button load method to enable HID power button
+ * Always do this since it activates events on some devices without
+ * a button array too.
+ */
+ if (!intel_hid_evaluate_method(handle, INTEL_HID_DSM_BTNL_FN, &dummy))
+ dev_warn(&device->dev, "failed to enable HID power button\n");
device_init_wakeup(&device->dev, true);
/*
return -EINVAL;
if (quirks->ec_read_only)
- return -EOPNOTSUPP;
+ return 0;
/* read current device state */
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
static void msi_init_rfkill(struct work_struct *ignored)
{
if (rfk_wlan) {
- rfkill_set_sw_state(rfk_wlan, !wlan_s);
+ msi_rfkill_set_state(rfk_wlan, !wlan_s);
rfkill_wlan_set(NULL, !wlan_s);
}
if (rfk_bluetooth) {
- rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+ msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
rfkill_bluetooth_set(NULL, !bluetooth_s);
}
if (rfk_threeg) {
- rfkill_set_sw_state(rfk_threeg, !threeg_s);
+ msi_rfkill_set_state(rfk_threeg, !threeg_s);
rfkill_threeg_set(NULL, !threeg_s);
}
}
#define IRQ_RESOURCE_NONE 0
#define IRQ_RESOURCE_GPIO 1
#define IRQ_RESOURCE_APIC 2
+#define IRQ_RESOURCE_AUTO 3
enum smi_bus_type {
SMI_I2C,
int ret;
switch (inst->flags & IRQ_RESOURCE_TYPE) {
+ case IRQ_RESOURCE_AUTO:
+ ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
+ if (ret > 0) {
+ dev_dbg(&pdev->dev, "Using gpio irq\n");
+ break;
+ }
+ ret = platform_get_irq(pdev, inst->irq_idx);
+ if (ret > 0) {
+ dev_dbg(&pdev->dev, "Using platform irq\n");
+ break;
+ }
+ break;
case IRQ_RESOURCE_GPIO:
ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
break;
static const struct smi_node cs35l41_hda = {
.instances = {
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
- { "cs35l41-hda", IRQ_RESOURCE_GPIO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
{}
},
.bus_type = SMI_AUTO_DETECT,
/* Format: 'Password,Signature' */
auth_str = kasprintf(GFP_KERNEL, "%s,%s", passwd, setting->signature);
if (!auth_str) {
- kfree(passwd);
+ kfree_sensitive(passwd);
return -ENOMEM;
}
ret = tlmi_simple_call(LENOVO_CERT_TO_PASSWORD_GUID, auth_str);
kfree(auth_str);
- kfree(passwd);
+ kfree_sensitive(passwd);
return ret ?: count;
}
/* NOTE: Please keep all entries sorted alphabetically */
static const struct property_entry archos_101_cesium_educ_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1280),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1850),
- PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"),
{ }
};
/* Disable VCN33_WIFI */
ret = regmap_update_bits(mt6397->regmap, MT6358_LDO_VCN33_CON0_1, BIT(0), 0);
if (ret) {
- dev_err(dev, "Failed to disable VCN33_BT\n");
+ dev_err(dev, "Failed to disable VCN33_WIFI\n");
return ret;
}
const struct mt6358_regulator_info *mt6358_info;
int i, max_regulator, ret;
- ret = mt6358_sync_vcn33_setting(&pdev->dev);
- if (ret)
- return ret;
-
if (mt6397->chip_id == MT6366_CHIP_ID) {
max_regulator = MT6366_MAX_REGULATOR;
mt6358_info = mt6366_regulators;
mt6358_info = mt6358_regulators;
}
+ ret = mt6358_sync_vcn33_setting(&pdev->dev);
+ if (ret)
+ return ret;
+
for (i = 0; i < max_regulator; i++) {
config.dev = &pdev->dev;
config.regmap = mt6397->regmap;
* Requeue a request back to the block request queue
* only works for block requests
*/
-static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
- struct dasd_block *block = cqr->block;
struct request *req;
- if (!block)
- return -EINVAL;
/*
* If the request is an ERP request there is nothing to requeue.
* This will be done with the remaining original request.
*/
if (cqr->refers)
- return 0;
+ return;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock);
- return 0;
+ return;
}
-/*
- * Go through all request on the dasd_block request queue, cancel them
- * on the respective dasd_device, and return them to the generic
- * block layer.
- */
-static int dasd_flush_block_queue(struct dasd_block *block)
+static int _dasd_requests_to_flushqueue(struct dasd_block *block,
+ struct list_head *flush_queue)
{
struct dasd_ccw_req *cqr, *n;
- int rc, i;
- struct list_head flush_queue;
unsigned long flags;
+ int rc, i;
- INIT_LIST_HEAD(&flush_queue);
- spin_lock_bh(&block->queue_lock);
+ spin_lock_irqsave(&block->queue_lock, flags);
rc = 0;
restart:
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
* is returned from the dasd_device layer.
*/
cqr->callback = _dasd_wake_block_flush_cb;
- for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
- list_move_tail(&cqr->blocklist, &flush_queue);
+ for (i = 0; cqr; cqr = cqr->refers, i++)
+ list_move_tail(&cqr->blocklist, flush_queue);
if (i > 1)
/* moved more than one request - need to restart */
goto restart;
}
- spin_unlock_bh(&block->queue_lock);
+ spin_unlock_irqrestore(&block->queue_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Go through all request on the dasd_block request queue, cancel them
+ * on the respective dasd_device, and return them to the generic
+ * block layer.
+ */
+static int dasd_flush_block_queue(struct dasd_block *block)
+{
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head flush_queue;
+ unsigned long flags;
+ int rc;
+
+ INIT_LIST_HEAD(&flush_queue);
+ rc = _dasd_requests_to_flushqueue(block, &flush_queue);
+
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
*/
int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
+ struct dasd_block *block = device->block;
struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
- struct dasd_ccw_req *refers;
int rc;
- INIT_LIST_HEAD(&requeue_queue);
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- rc = 0;
- list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
- /* Check status and move request to flush_queue */
- if (cqr->status == DASD_CQR_IN_IO) {
- rc = device->discipline->term_IO(cqr);
- if (rc) {
- /* unable to terminate requeust */
- dev_err(&device->cdev->dev,
- "Unable to terminate request %p "
- "on suspend\n", cqr);
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- dasd_put_device(device);
- return rc;
- }
- }
- list_move_tail(&cqr->devlist, &requeue_queue);
- }
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
-
- list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
- wait_event(dasd_flush_wq,
- (cqr->status != DASD_CQR_CLEAR_PENDING));
+ if (!block)
+ return 0;
- /*
- * requeue requests to blocklayer will only work
- * for block device requests
- */
- if (_dasd_requeue_request(cqr))
- continue;
+ INIT_LIST_HEAD(&requeue_queue);
+ rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
- /* remove requests from device and block queue */
- list_del_init(&cqr->devlist);
- while (cqr->refers != NULL) {
- refers = cqr->refers;
- /* remove the request from the block queue */
- list_del(&cqr->blocklist);
- /* free the finished erp request */
- dasd_free_erp_request(cqr, cqr->memdev);
- cqr = refers;
+ /* Now call the callback function of flushed requests */
+restart_cb:
+ list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
+ wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ spin_lock_bh(&block->queue_lock);
+ __dasd_process_erp(block->base, cqr);
+ spin_unlock_bh(&block->queue_lock);
+ /* restart list_for_xx loop since dasd_process_erp
+ * might remove multiple elements
+ */
+ goto restart_cb;
}
-
- /*
- * _dasd_requeue_request already checked for a valid
- * blockdevice, no need to check again
- * all erp requests (cqr->refers) have a cqr->block
- * pointer copy from the original cqr
- */
+ _dasd_requeue_request(cqr);
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
}
-
- /*
- * if requests remain then they are internal request
- * and go back to the device queue
- */
- if (!list_empty(&requeue_queue)) {
- /* move freeze_queue to start of the ccw_queue */
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- list_splice_tail(&requeue_queue, &device->ccw_queue);
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- }
dasd_schedule_device_bh(device);
return rc;
}
dev_err(&device->cdev->dev, "An I/O request was rejected"
" because writing is inhibited\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
- } else if (sense[7] & SNS7_INVALID_ON_SEC) {
+ } else if (sense[7] == SNS7_INVALID_ON_SEC) {
dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
/* suppress dump of sense data for this error */
set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
erp->block = cqr->block;
erp->magic = cqr->magic;
erp->expires = cqr->expires;
- erp->retries = 256;
+ erp->retries = device->default_retries;
erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
+ dasd_schedule_device_bh(base);
return 0;
}
int error;
unsigned long iflags;
- error = blk_get_queue(scsidp->request_queue);
- if (error)
- return error;
+ if (!blk_get_queue(scsidp->request_queue)) {
+ pr_warn("%s: get scsi_device queue failed\n", __func__);
+ return -ENODEV;
+ }
error = -ENOMEM;
cdev = cdev_alloc();
return -ENOMEM;
amd_manager->acp_mmio = devm_ioremap(dev, res->start, resource_size(res));
- if (IS_ERR(amd_manager->mmio)) {
+ if (!amd_manager->acp_mmio) {
dev_err(dev, "mmio not found\n");
- return PTR_ERR(amd_manager->mmio);
+ return -ENOMEM;
}
amd_manager->instance = pdata->instance;
amd_manager->mmio = amd_manager->acp_mmio +
"initializing enumeration and init completion for Slave %d\n",
slave->dev_num);
- init_completion(&slave->enumeration_complete);
- init_completion(&slave->initialization_complete);
+ reinit_completion(&slave->enumeration_complete);
+ reinit_completion(&slave->initialization_complete);
} else if ((status == SDW_SLAVE_ATTACHED) &&
(slave->status == SDW_SLAVE_UNATTACHED)) {
"signaling enumeration completion for Slave %d\n",
slave->dev_num);
- complete(&slave->enumeration_complete);
+ complete_all(&slave->enumeration_complete);
}
slave->status = status;
mutex_unlock(&bus->bus_lock);
"signaling initialization completion for Slave %d\n",
slave->dev_num);
- complete(&slave->initialization_complete);
+ complete_all(&slave->initialization_complete);
/*
* If the manager became pm_runtime active, the peripherals will be
status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
- ctrl->status[dev_num] = status;
+ ctrl->status[dev_num] = status & SWRM_MCP_SLV_STATUS_MASK;
return dev_num;
}
}
WR_FIFO_OVERRUN)
#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
WR_FIFO_EMPTY | WR_FIFO_FULL | \
- TRANSACTION_DONE)
+ TRANSACTION_DONE | DMA_CHAIN_DONE)
#define PIO_XFER_CTRL 0x0014
#define REQUEST_COUNT_MSK 0xffff
dma_addr_t dma_cmd_desc;
/* allocate for dma cmd descriptor */
- virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_KERNEL | __GFP_ZERO, &dma_cmd_desc);
- if (!virt_cmd_desc)
- return -ENOMEM;
+ virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_ATOMIC | __GFP_ZERO, &dma_cmd_desc);
+ if (!virt_cmd_desc) {
+ dev_warn_once(ctrl->dev, "Couldn't find memory for descriptor\n");
+ return -EAGAIN;
+ }
ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc;
ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc;
for (i = 0; i < sgt->nents; i++) {
dma_ptr_sg = sg_dma_address(sgt->sgl + i);
+ dma_len_sg = sg_dma_len(sgt->sgl + i);
if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ);
return -EAGAIN;
}
+ /*
+ * When reading with DMA the controller writes to memory 1 word
+ * at a time. If the length isn't a multiple of 4 bytes then
+ * the controller can clobber the things later in memory.
+ * Fallback to PIO to be safe.
+ */
+ if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) {
+ dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n",
+ dma_len_sg);
+ return -EAGAIN;
+ }
}
for (i = 0; i < sgt->nents; i++) {
ret = qcom_qspi_setup_dma_desc(ctrl, xfer);
if (ret != -EAGAIN) {
- if (!ret)
+ if (!ret) {
+ dma_wmb();
qcom_qspi_dma_xfer(ctrl);
+ }
goto exit;
}
dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n");
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
+ /* Ignore disabled interrupts */
+ int_status &= readl(ctrl->base + MSTR_INT_EN);
+
/* PIO mode handling */
if (ctrl->xfer.dir == QSPI_WRITE) {
if (int_status & WR_FIFO_EMPTY)
return ret;
}
+static int qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ /*
+ * If qcom_qspi_can_dma() is going to return false we don't need to
+ * adjust anything.
+ */
+ if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO)
+ return 0;
+
+ /*
+ * When reading, the transfer needs to be a multiple of 4 bytes so
+ * shrink the transfer if that's not true. The caller will then do a
+ * second transfer to finish things up.
+ */
+ if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3))
+ op->data.nbytes &= ~0x3;
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops qcom_qspi_mem_ops = {
+ .adjust_op_size = qcom_qspi_adjust_op_size,
+};
+
static int qcom_qspi_probe(struct platform_device *pdev)
{
int ret;
if (of_property_read_bool(pdev->dev.of_node, "iommus"))
master->can_dma = qcom_qspi_can_dma;
master->auto_runtime_pm = true;
+ master->mem_ops = &qcom_qspi_mem_ops;
ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
if (ret)
},
};
-FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9341", &display);
+FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "ilitek", "ili9341", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
commit |= SME_WEP_FLAG;
}
if (enc->key_len) {
- memcpy(&key->key_val[0], &enc->key[0], enc->key_len);
- key->key_len = enc->key_len;
+ int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
+
+ memcpy(&key->key_val[0], &enc->key[0], key_len);
+ key->key_len = key_len;
commit |= (SME_WEP_VAL1 << index);
}
break;
tristate "Intel Atom Image Signal Processor Driver"
depends on VIDEO_DEV && INTEL_ATOMISP
depends on PMIC_OPREGION
+ select V4L2_FWNODE
select IOSF_MBI
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_SUBDEV_API
#include "osdep_intf.h"
#include "usb_ops.h"
+#include <linux/usb.h>
#include <linux/ieee80211.h>
static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8};
sint i;
struct xmit_buf *pxmitbuf;
struct xmit_frame *pxframe;
+ int j;
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
_init_queue(&pxmitpriv->pending_xmitbuf_queue);
pxmitpriv->pallocated_xmitbuf =
kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC);
- if (!pxmitpriv->pallocated_xmitbuf) {
- kfree(pxmitpriv->pallocated_frame_buf);
- pxmitpriv->pallocated_frame_buf = NULL;
- return -ENOMEM;
- }
+ if (!pxmitpriv->pallocated_xmitbuf)
+ goto clean_up_frame_buf;
pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
INIT_LIST_HEAD(&pxmitbuf->list);
pxmitbuf->pallocated_buf =
kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC);
- if (!pxmitbuf->pallocated_buf)
- return -ENOMEM;
+ if (!pxmitbuf->pallocated_buf) {
+ j = 0;
+ goto clean_up_alloc_buf;
+ }
pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
((addr_t) (pxmitbuf->pallocated_buf) &
(XMITBUF_ALIGN_SZ - 1));
- if (r8712_xmit_resource_alloc(padapter, pxmitbuf))
- return -ENOMEM;
+ if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) {
+ j = 1;
+ goto clean_up_alloc_buf;
+ }
list_add_tail(&pxmitbuf->list,
&(pxmitpriv->free_xmitbuf_queue.queue));
pxmitbuf++;
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh);
return 0;
+
+clean_up_alloc_buf:
+ if (j) {
+ /* failure happened in r8712_xmit_resource_alloc()
+ * delete extra pxmitbuf->pallocated_buf
+ */
+ kfree(pxmitbuf->pallocated_buf);
+ }
+ for (j = 0; j < i; j++) {
+ int k;
+
+ pxmitbuf--; /* reset pointer */
+ kfree(pxmitbuf->pallocated_buf);
+ for (k = 0; k < 8; k++) /* delete xmit urb's */
+ usb_free_urb(pxmitbuf->pxmit_urb[k]);
+ }
+ kfree(pxmitpriv->pallocated_xmitbuf);
+ pxmitpriv->pallocated_xmitbuf = NULL;
+clean_up_frame_buf:
+ kfree(pxmitpriv->pallocated_frame_buf);
+ pxmitpriv->pallocated_frame_buf = NULL;
+ return -ENOMEM;
}
void _free_xmit_priv(struct xmit_priv *pxmitpriv)
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!pxmitbuf->pxmit_urb[i]) {
+ int k;
+
+ for (k = i - 1; k >= 0; k--) {
+ /* handle allocation errors part way through loop */
+ usb_free_urb(pxmitbuf->pxmit_urb[k]);
+ }
netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
return -ENOMEM;
}
struct thermal_zone_device *
thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *trips, int num_trips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp, int passive_delay,
+ const struct thermal_zone_params *tzp, int passive_delay,
int polling_delay)
{
struct thermal_zone_device *tz;
struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
void *devdata, struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp, int passive_delay,
+ const struct thermal_zone_params *tzp, int passive_delay,
int polling_delay)
{
return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask,
return 0;
}
-static struct thermal_zone_params *thermal_of_parameters_init(struct device_node *np)
+static void thermal_of_parameters_init(struct device_node *np,
+ struct thermal_zone_params *tzp)
{
- struct thermal_zone_params *tzp;
int coef[2];
int ncoef = ARRAY_SIZE(coef);
int prop, ret;
- tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
- if (!tzp)
- return ERR_PTR(-ENOMEM);
-
tzp->no_hwmon = true;
if (!of_property_read_u32(np, "sustainable-power", &prop))
tzp->slope = coef[0];
tzp->offset = coef[1];
-
- return tzp;
}
static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_device *tz)
static void thermal_of_zone_unregister(struct thermal_zone_device *tz)
{
struct thermal_trip *trips = tz->trips;
- struct thermal_zone_params *tzp = tz->tzp;
struct thermal_zone_device_ops *ops = tz->ops;
thermal_zone_device_disable(tz);
thermal_zone_device_unregister(tz);
kfree(trips);
- kfree(tzp);
kfree(ops);
}
{
struct thermal_zone_device *tz;
struct thermal_trip *trips;
- struct thermal_zone_params *tzp;
+ struct thermal_zone_params tzp = {};
struct thermal_zone_device_ops *of_ops;
struct device_node *np;
int delay, pdelay;
goto out_kfree_trips;
}
- tzp = thermal_of_parameters_init(np);
- if (IS_ERR(tzp)) {
- ret = PTR_ERR(tzp);
- pr_err("Failed to initialize parameter from %pOFn: %d\n", np, ret);
- goto out_kfree_trips;
- }
+ thermal_of_parameters_init(np, &tzp);
of_ops->bind = thermal_of_bind;
of_ops->unbind = thermal_of_unbind;
mask = GENMASK_ULL((ntrips) - 1, 0);
tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
- mask, data, of_ops, tzp,
+ mask, data, of_ops, &tzp,
pdelay, delay);
if (IS_ERR(tz)) {
ret = PTR_ERR(tz);
pr_err("Failed to register thermal zone %pOFn: %d\n", np, ret);
- goto out_kfree_tzp;
+ goto out_kfree_trips;
}
ret = thermal_zone_device_enable(tz);
return tz;
-out_kfree_tzp:
- kfree(tzp);
out_kfree_trips:
kfree(trips);
out_kfree_of_ops:
gsm->has_devices = false;
}
for (i = NUM_DLCI - 1; i >= 0; i--)
- if (gsm->dlci[i])
+ if (gsm->dlci[i]) {
gsm_dlci_release(gsm->dlci[i]);
+ gsm->dlci[i] = NULL;
+ }
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
struct dw8250_port_data *pd = p->private_data;
struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
- u32 reg;
+ u32 reg, old_dlf;
pd->hw_rs485_support = dw8250_detect_rs485_hw(p);
if (pd->hw_rs485_support) {
dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
(reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+ /* Preserve value written by firmware or bootloader */
+ old_dlf = dw8250_readl_ext(p, DW_UART_DLF);
dw8250_writel_ext(p, DW_UART_DLF, ~0U);
reg = dw8250_readl_ext(p, DW_UART_DLF);
- dw8250_writel_ext(p, DW_UART_DLF, 0);
+ dw8250_writel_ext(p, DW_UART_DLF, old_dlf);
if (reg) {
pd->dlf_size = fls(reg);
if (ret)
return ret;
- /*
- * Set pm_runtime status as ACTIVE so that wakeup_irq gets
- * enabled/disabled from dev_pm_arm_wake_irq during system
- * suspend/resume respectively.
- */
- pm_runtime_set_active(&pdev->dev);
-
if (port->wakeup_irq > 0) {
device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
dma_submit_error(s->cookie_tx)) {
if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE)
/* Switch irq from SCIF to DMA */
- disable_irq(s->irqs[SCIx_TXI_IRQ]);
+ disable_irq_nosync(s->irqs[SCIx_TXI_IRQ]);
s->cookie_tx = 0;
schedule_work(&s->work_tx);
local_irq_restore(flags);
}
-static int __init sifive_serial_console_setup(struct console *co, char *options)
+static int sifive_serial_console_setup(struct console *co, char *options)
{
struct sifive_serial_port *ssp;
int baud = SIFIVE_DEFAULT_BAUD_RATE;
/* #define LOOPBACK */
/* The major and minor device numbers are defined in
- * http://www.lanana.org/docs/device-list/devices-2.6+.txt. For the QE
+ * Documentation/admin-guide/devices.txt. For the QE
* UART, we have major number 204 and minor numbers 46 - 49, which are the
* same as for the CPM2. This decision was made because no Freescale part
* has both a CPM and a QE.
char ch, mbz = 0;
struct tty_ldisc *ld;
- if (!tty_legacy_tiocsti)
+ if (!tty_legacy_tiocsti && !capable(CAP_SYS_ADMIN))
return -EIO;
if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
static int cdns3_gadget_check_config(struct usb_gadget *gadget)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ struct cdns3_endpoint *priv_ep;
struct usb_ep *ep;
int n_in = 0;
int total;
list_for_each_entry(ep, &gadget->ep_list, ep_list) {
- if (ep->claimed && (ep->address & USB_DIR_IN))
+ priv_ep = ep_to_cdns3_ep(ep);
+ if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
n_in++;
}
/* novation SoundControl XL */
{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Focusrite Scarlett Solo USB */
+ { USB_DEVICE(0x1235, 0x8211), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
/*
* We're resetting only the device side because, if we're in host mode,
* XHCI driver will reset the host block. If dwc3 was configured for
- * host-only mode, then we can return early.
+ * host-only mode or current role is host, then we can return early.
*/
- if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
- if (dwc->dr_mode == USB_DR_MODE_HOST ||
- dwc->dr_mode == USB_DR_MODE_OTG) {
- reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
-
- /*
- * Enable Auto retry Feature to make the controller operating in
- * Host mode on seeing transaction errors(CRC errors or internal
- * overrun scenerios) on IN transfers to reply to the device
- * with a non-terminating retry ACK (i.e, an ACK transcation
- * packet with Retry=1 & Nump != 0)
- */
- reg |= DWC3_GUCTL_HSTINAUTORETRY;
-
- dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
- }
-
/*
* Must config both number of packets and max burst settings to enable
* RX and/or TX threshold.
#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
#define DWC3_GCTL_DSBLCLKGTNG BIT(0)
-/* Global User Control Register */
-#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
-
/* Global User Control 1 Register */
#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31)
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
/*
* A lot of BYT devices lack ACPI resource entries for
- * the GPIOs, add a fallback mapping to the reference
+ * the GPIOs. If the ACPI entry for the GPIO controller
+ * is present add a fallback mapping to the reference
* design GPIOs which all boards seem to use.
*/
- gpiod_add_lookup_table(&platform_bytcr_gpios);
+ if (acpi_dev_present("INT33FC", NULL, -1))
+ gpiod_add_lookup_table(&platform_bytcr_gpios);
/*
* These GPIOs will turn on the USB2 PHY. Note that we have to
goto done;
status = bind(config);
+
+ if (status == 0)
+ status = usb_gadget_check_config(cdev->gadget);
+
if (status < 0) {
while (!list_empty(&config->functions)) {
struct usb_function *f;
dev->eps_num = i;
spin_unlock_irqrestore(&dev->lock, flags);
- /* Matches kref_put() in gadget_unbind(). */
- kref_get(&dev->count);
-
ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
- if (ret < 0)
+ if (ret < 0) {
dev_err(&gadget->dev, "failed to queue event\n");
+ set_gadget_data(gadget, NULL);
+ return ret;
+ }
+ /* Matches kref_put() in gadget_unbind(). */
+ kref_get(&dev->count);
return ret;
}
*/
if (gadget->connected)
ret = usb_gadget_connect_locked(gadget);
- mutex_unlock(&gadget->udc->connect_lock);
unlock:
mutex_unlock(&gadget->udc->connect_lock);
int err;
xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
- if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) {
- err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA;
+ if (IS_ERR(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device);
dev_err(dev, "failed to get device power domain: %d\n", err);
return err;
}
xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
- if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) {
- err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA;
+ if (IS_ERR(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss);
dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
return err;
}
else
at91_start_clock(ohci_at91);
- ohci_resume(hcd, false);
+ /*
+ * According to the comment in ohci_hcd_at91_drv_suspend()
+ * we need to do a reset if the 48Mhz clock was stopped,
+ * that is, if ohci_at91->wakeup is clear. Tell ohci_resume()
+ * to reset in this case by setting its "hibernated" flag.
+ */
+ ohci_resume(hcd, !ohci_at91->wakeup);
return 0;
}
}
device_init_wakeup(dev, true);
+ dma_set_max_seg_size(dev, UINT_MAX);
xhci = hcd_to_xhci(hcd);
xhci->main_hcd = hcd;
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
xhci->quirks |= XHCI_LPM_SUPPORT;
- xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
- }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
struct xhci_ring *ep_ring;
struct xhci_command *cmd;
struct xhci_segment *new_seg;
- struct xhci_segment *halted_seg = NULL;
union xhci_trb *new_deq;
int new_cycle;
- union xhci_trb *halted_trb;
- int index = 0;
dma_addr_t addr;
u64 hw_dequeue;
bool cycle_found = false;
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
-
- /*
- * Quirk: xHC write-back of the DCS field in the hardware dequeue
- * pointer is wrong - use the cycle state of the TRB pointed to by
- * the dequeue pointer.
- */
- if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
- !(ep->ep_state & EP_HAS_STREAMS))
- halted_seg = trb_in_td(xhci, td->start_seg,
- td->first_trb, td->last_trb,
- hw_dequeue & ~0xf, false);
- if (halted_seg) {
- index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
- sizeof(*halted_trb);
- halted_trb = &halted_seg->trbs[index];
- new_cycle = halted_trb->generic.field[3] & 0x1;
- xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
- (u8)(hw_dequeue & 0x1), index, new_cycle);
- } else {
- new_cycle = hw_dequeue & 0x1;
- }
+ new_cycle = hw_dequeue & 0x1;
/*
* We want to find the pointer, segment and cycle state of the new trb
int err;
tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
- if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) {
- err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA;
+ if (IS_ERR(tegra->genpd_dev_host)) {
+ err = PTR_ERR(tegra->genpd_dev_host);
dev_err(dev, "failed to get host pm-domain: %d\n", err);
return err;
}
tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
- if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) {
- err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA;
+ if (IS_ERR(tegra->genpd_dev_ss)) {
+ err = PTR_ERR(tegra->genpd_dev_ss);
dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
return err;
}
switch (test_pid) {
case TEST_SE0_NAK_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
- if (!ret)
+ if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
break;
case TEST_J_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
- if (!ret)
+ if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
break;
case TEST_K_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
- if (!ret)
+ if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
break;
case TEST_PACKET_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
- if (!ret)
+ if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
#define QUECTEL_PRODUCT_EM061K_LTA 0x0123
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_EM060K_128 0x0128
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200U 0x0901
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
+#define QUECTEL_PRODUCT_EC200A 0x6005
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */
DEVICE(carelink, CARELINK_IDS);
-/* ZIO Motherboard USB driver */
-#define ZIO_IDS() \
- { USB_DEVICE(0x1CBE, 0x0103) }
-DEVICE(zio, ZIO_IDS);
-
-/* Funsoft Serial USB driver */
-#define FUNSOFT_IDS() \
- { USB_DEVICE(0x1404, 0xcddc) }
-DEVICE(funsoft, FUNSOFT_IDS);
-
/* Infineon Flashloader driver */
#define FLASHLOADER_IDS() \
{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
{ USB_DEVICE(0x8087, 0x0801) }
DEVICE(flashloader, FLASHLOADER_IDS);
+/* Funsoft Serial USB driver */
+#define FUNSOFT_IDS() \
+ { USB_DEVICE(0x1404, 0xcddc) }
+DEVICE(funsoft, FUNSOFT_IDS);
+
/* Google Serial USB SubClass */
#define GOOGLE_IDS() \
{ USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* HP4x (48/49) Generic Serial driver */
+#define HP4X_IDS() \
+ { USB_DEVICE(0x03f0, 0x0121) }
+DEVICE(hp4x, HP4X_IDS);
+
+/* KAUFMANN RKS+CAN VCP */
+#define KAUFMANN_IDS() \
+ { USB_DEVICE(0x16d0, 0x0870) }
+DEVICE(kaufmann, KAUFMANN_IDS);
+
/* Libtransistor USB console */
#define LIBTRANSISTOR_IDS() \
{ USB_DEVICE(0x1209, 0x8b00) }
DEVICE(libtransistor, LIBTRANSISTOR_IDS);
-/* ViVOpay USB Serial Driver */
-#define VIVOPAY_IDS() \
- { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
-DEVICE(vivopay, VIVOPAY_IDS);
-
/* Motorola USB Phone driver */
#define MOTO_IDS() \
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
-/* HP4x (48/49) Generic Serial driver */
-#define HP4X_IDS() \
- { USB_DEVICE(0x03f0, 0x0121) }
-DEVICE(hp4x, HP4X_IDS);
+/* Siemens USB/MPI adapter */
+#define SIEMENS_IDS() \
+ { USB_DEVICE(0x908, 0x0004) }
+DEVICE(siemens_mpi, SIEMENS_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
{ USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
-/* Siemens USB/MPI adapter */
-#define SIEMENS_IDS() \
- { USB_DEVICE(0x908, 0x0004) }
-DEVICE(siemens_mpi, SIEMENS_IDS);
+/* ViVOpay USB Serial Driver */
+#define VIVOPAY_IDS() \
+ { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
+DEVICE(vivopay, VIVOPAY_IDS);
+
+/* ZIO Motherboard USB driver */
+#define ZIO_IDS() \
+ { USB_DEVICE(0x1CBE, 0x0103) }
+DEVICE(zio, ZIO_IDS);
/* All of the above structures mushed into two lists */
static struct usb_serial_driver * const serial_drivers[] = {
&carelink_device,
- &zio_device,
- &funsoft_device,
&flashloader_device,
+ &funsoft_device,
&google_device,
+ &hp4x_device,
+ &kaufmann_device,
&libtransistor_device,
- &vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
&nokia_device,
&novatel_gps_device,
- &hp4x_device,
- &suunto_device,
&siemens_mpi_device,
+ &suunto_device,
+ &vivopay_device,
+ &zio_device,
NULL
};
static const struct usb_device_id id_table[] = {
CARELINK_IDS(),
- ZIO_IDS(),
- FUNSOFT_IDS(),
FLASHLOADER_IDS(),
+ FUNSOFT_IDS(),
GOOGLE_IDS(),
+ HP4X_IDS(),
+ KAUFMANN_IDS(),
LIBTRANSISTOR_IDS(),
- VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
NOKIA_IDS(),
NOVATEL_IDS(),
- HP4X_IDS(),
- SUUNTO_IDS(),
SIEMENS_IDS(),
+ SUUNTO_IDS(),
+ VIVOPAY_IDS(),
+ ZIO_IDS(),
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
{
struct typec_port *port = to_typec_port(dev);
struct usb_power_delivery **pds;
- struct usb_power_delivery *pd;
- int ret = 0;
+ int i, ret = 0;
if (!port->ops || !port->ops->pd_get)
return -EOPNOTSUPP;
if (!pds)
return 0;
- for (pd = pds[0]; pd; pd++) {
- if (pd == port->pd)
- ret += sysfs_emit(buf + ret, "[%s] ", dev_name(&pd->dev));
+ for (i = 0; pds[i]; i++) {
+ if (pds[i] == port->pd)
+ ret += sysfs_emit_at(buf, ret, "[%s] ", dev_name(&pds[i]->dev));
else
- ret += sysfs_emit(buf + ret, "%s ", dev_name(&pd->dev));
+ ret += sysfs_emit_at(buf, ret, "%s ", dev_name(&pds[i]->dev));
}
buf[ret - 1] = '\n';
return ERR_PTR(ret);
}
+ port->pd = cap->pd;
+
ret = device_add(&port->dev);
if (ret) {
dev_err(parent, "failed to register port (%d)\n", ret);
return ERR_PTR(ret);
}
- ret = typec_port_set_usb_power_delivery(port, cap->pd);
+ ret = usb_power_delivery_link_device(port->pd, &port->dev);
if (ret) {
dev_err(&port->dev, "failed to link pd\n");
device_unregister(&port->dev);
platform_set_drvdata(pdev, tcpm);
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
- if (IS_ERR(tcpm->tcpc.fwnode))
- return PTR_ERR(tcpm->tcpc.fwnode);
+ if (!tcpm->tcpc.fwnode)
+ return -EINVAL;
tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
if (IS_ERR(tcpm->tcpm_port)) {
if (!con->partner)
return;
+ typec_set_mode(con->port, TYPEC_STATE_SAFE);
+
ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
typec_unregister_partner(con->partner);
UCSI_CONSTAT_PARTNER_FLAG_USB)
typec_set_mode(con->port, TYPEC_STATE_USB);
}
- } else {
- typec_set_mode(con->port, TYPEC_STATE_SAFE);
}
/* Only notify USB controller if partner supports USB data */
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
u64 eoi_time; /* Time in jiffies when to EOI. */
raw_spinlock_t lock;
+ bool is_static; /* Is event channel static */
union {
unsigned short virq;
irq_free_desc(irq);
}
-static void xen_evtchn_close(evtchn_port_t port)
-{
- struct evtchn_close close;
-
- close.port = port;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
-}
-
/* Not called for lateeoi events. */
static void event_handler_exit(struct irq_info *info)
{
unsigned int cpu = cpu_from_irq(irq);
struct xenbus_device *dev;
- xen_evtchn_close(evtchn);
+ if (!info->is_static)
+ xen_evtchn_close(evtchn);
switch (type_from_irq(irq)) {
case IRQT_VIRQ:
}
EXPORT_SYMBOL_GPL(xen_set_irq_priority);
-int evtchn_make_refcounted(evtchn_port_t evtchn)
+int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
{
int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info;
WARN_ON(info->refcnt != -1);
info->refcnt = 1;
+ info->is_static = is_static;
return 0;
}
return 0;
}
-static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
+static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
+ bool is_static)
{
struct user_evtchn *evtchn;
- struct evtchn_close close;
int rc = 0;
/*
if (rc < 0)
goto err;
- rc = evtchn_make_refcounted(port);
+ rc = evtchn_make_refcounted(port, is_static);
return rc;
err:
/* bind failed, should close the port now */
- close.port = port;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
+ if (!is_static)
+ xen_evtchn_close(port);
+
del_evtchn(u, evtchn);
return rc;
}
if (rc != 0)
break;
- rc = evtchn_bind_to_user(u, bind_virq.port);
+ rc = evtchn_bind_to_user(u, bind_virq.port, false);
if (rc == 0)
rc = bind_virq.port;
break;
if (rc != 0)
break;
- rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
+ rc = evtchn_bind_to_user(u, bind_interdomain.local_port, false);
if (rc == 0)
rc = bind_interdomain.local_port;
break;
if (rc != 0)
break;
- rc = evtchn_bind_to_user(u, alloc_unbound.port);
+ rc = evtchn_bind_to_user(u, alloc_unbound.port, false);
if (rc == 0)
rc = alloc_unbound.port;
break;
break;
}
+ case IOCTL_EVTCHN_BIND_STATIC: {
+ struct ioctl_evtchn_bind bind;
+ struct user_evtchn *evtchn;
+
+ rc = -EFAULT;
+ if (copy_from_user(&bind, uarg, sizeof(bind)))
+ break;
+
+ rc = -EISCONN;
+ evtchn = find_evtchn(u, bind.port);
+ if (evtchn)
+ break;
+
+ rc = evtchn_bind_to_user(u, bind.port, true);
+ break;
+ }
+
case IOCTL_EVTCHN_NOTIFY: {
struct ioctl_evtchn_notify notify;
struct user_evtchn *evtchn;
static void gnttab_handle_deferred(struct timer_list *);
static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
+static atomic64_t deferred_count;
+static atomic64_t leaked_count;
+static unsigned int free_per_iteration = 10;
+module_param(free_per_iteration, uint, 0600);
+
static void gnttab_handle_deferred(struct timer_list *unused)
{
- unsigned int nr = 10;
+ unsigned int nr = READ_ONCE(free_per_iteration);
+ const bool ignore_limit = nr == 0;
struct deferred_entry *first = NULL;
unsigned long flags;
+ size_t freed = 0;
spin_lock_irqsave(&gnttab_list_lock, flags);
- while (nr--) {
+ while ((ignore_limit || nr--) && !list_empty(&deferred_list)) {
struct deferred_entry *entry
= list_first_entry(&deferred_list,
struct deferred_entry, list);
list_del(&entry->list);
spin_unlock_irqrestore(&gnttab_list_lock, flags);
if (_gnttab_end_foreign_access_ref(entry->ref)) {
+ uint64_t ret = atomic64_dec_return(&deferred_count);
+
put_free_entry(entry->ref);
- pr_debug("freeing g.e. %#x (pfn %#lx)\n",
- entry->ref, page_to_pfn(entry->page));
+ pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n",
+ entry->ref, page_to_pfn(entry->page),
+ (unsigned long long)ret);
put_page(entry->page);
+ freed++;
kfree(entry);
entry = NULL;
} else {
spin_lock_irqsave(&gnttab_list_lock, flags);
if (entry)
list_add_tail(&entry->list, &deferred_list);
- else if (list_empty(&deferred_list))
- break;
}
- if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
+ if (list_empty(&deferred_list))
+ WARN_ON(atomic64_read(&deferred_count));
+ else if (!timer_pending(&deferred_timer)) {
deferred_timer.expires = jiffies + HZ;
add_timer(&deferred_timer);
}
spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ pr_debug("Freed %zu references", freed);
}
static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
{
struct deferred_entry *entry;
gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
- const char *what = KERN_WARNING "leaking";
+ uint64_t leaked, deferred;
entry = kmalloc(sizeof(*entry), gfp);
if (!page) {
add_timer(&deferred_timer);
}
spin_unlock_irqrestore(&gnttab_list_lock, flags);
- what = KERN_DEBUG "deferring";
+ deferred = atomic64_inc_return(&deferred_count);
+ leaked = atomic64_read(&leaked_count);
+ pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
+ ref, page ? page_to_pfn(page) : -1, deferred, leaked);
+ } else {
+ deferred = atomic64_read(&deferred_count);
+ leaked = atomic64_inc_return(&leaked_count);
+ pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
+ ref, page ? page_to_pfn(page) : -1, deferred, leaked);
}
- printk("%s g.e. %#x (pfn %#lx)\n",
- what, ref, page ? page_to_pfn(page) : -1);
}
int gnttab_try_end_foreign_access(grant_ref_t ref)
static int __init xenbus_probe_initcall(void)
{
+ if (!xen_domain())
+ return -ENODEV;
+
/*
* Probe XenBus here in the XS_PV case, and also XS_HVM unless we
* need to wait for the platform PCI device to come up or
* NOTE: these are set after open so only reflect 9p client not
* underlying file system on server.
*/
-static inline void v9fs_fid_add_modes(struct p9_fid *fid, int s_flags,
- int s_cache, unsigned int f_flags)
+static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags,
+ unsigned int s_cache, unsigned int f_flags)
{
if (fid->qid.type != P9_QTFILE)
return;
(s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
fid->mode |= P9L_DIRECT; /* no read or write cache */
} else if ((!(s_cache & CACHE_WRITEBACK)) ||
- (f_flags & O_DSYNC) | (s_flags & V9FS_SYNC)) {
+ (f_flags & O_DSYNC) || (s_flags & V9FS_SYNC)) {
fid->mode |= P9L_NOWRITECACHE;
}
}
p9_client_begin_disconnect(v9ses->clnt);
}
-extern int v9fs_error_init(void);
-
static struct kobject *v9fs_kobj;
#ifdef CONFIG_9P_FSCACHE
struct v9fs_session_info {
/* options */
- unsigned char flags;
+ unsigned int flags;
unsigned char nodev;
unsigned short debug;
unsigned int afid;
struct p9_fid *fid;
__le32 version;
loff_t i_size;
- int retval = 0;
+ int retval = 0, put_err;
fid = filp->private_data;
p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n",
spin_lock(&inode->i_lock);
hlist_del(&fid->ilist);
spin_unlock(&inode->i_lock);
- retval = p9_fid_put(fid);
+ put_err = p9_fid_put(fid);
+ retval = retval < 0 ? retval : put_err;
}
if ((filp->f_mode & FMODE_WRITE)) {
p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp);
if (!(v9ses->cache & CACHE_WRITEBACK)) {
- p9_debug(P9_DEBUG_CACHE, "(no mmap mode)");
- if (vma->vm_flags & VM_MAYSHARE)
- return -ENODEV;
- invalidate_inode_pages2(filp->f_mapping);
+ p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
return generic_file_readonly_mmap(filp, vma);
}
{
int ret;
- ret = 0;
switch (uflags&3) {
default:
case O_RDONLY:
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- err = 0;
name = dentry->d_name.name;
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
if (!(flags & O_CREAT) || d_really_is_positive(dentry))
return finish_no_open(file, res);
- err = 0;
-
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode);
p9_omode = v9fs_uflags2omode(flags, v9fs_proto_dotu(v9ses));
return -EINVAL;
p9_debug(P9_DEBUG_VFS, "\n");
- retval = 0;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
v9ses = v9fs_inode2v9ses(old_inode);
if (retval)
return retval;
- retval = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (iattr->ia_valid & ATTR_FILE) {
fid = iattr->ia_file->private_data;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- err = 0;
v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
# SPDX-License-Identifier: GPL-2.0-only
-config AUTOFS4_FS
- tristate "Old Kconfig name for Kernel automounter support"
- select AUTOFS_FS
- help
- This name exists for people to just automatically pick up the
- new name of the autofs Kconfig option. All it does is select
- the new option name.
-
- It will go away in a release or two as people have
- transitioned to just plain AUTOFS_FS.
-
config AUTOFS_FS
tristate "Kernel automounter support (supports v3, v4 and v5)"
- default n
help
The automounter is a tool to automatically mount remote file systems
on demand. This implementation is partially kernel-based to reduce
* used yet since their free space will be released as soon as the transaction
* commits.
*/
-u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
+int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
+ u64 *total_added_ret)
{
struct btrfs_fs_info *info = block_group->fs_info;
- u64 extent_start, extent_end, size, total_added = 0;
+ u64 extent_start, extent_end, size;
int ret;
+ if (total_added_ret)
+ *total_added_ret = 0;
+
while (start < end) {
ret = find_first_extent_bit(&info->excluded_extents, start,
&extent_start, &extent_end,
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
- total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group,
start, size);
- BUG_ON(ret); /* -ENOMEM or logic error */
+ if (ret)
+ return ret;
+ if (total_added_ret)
+ *total_added_ret += size;
start = extent_end + 1;
} else {
break;
if (start < end) {
size = end - start;
- total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group, start,
size);
- BUG_ON(ret); /* -ENOMEM or logic error */
+ if (ret)
+ return ret;
+ if (total_added_ret)
+ *total_added_ret += size;
}
- return total_added;
+ return 0;
}
/*
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY) {
- total_found += add_new_free_space(block_group, last,
- key.objectid);
+ u64 space_added;
+
+ ret = add_new_free_space(block_group, last, key.objectid,
+ &space_added);
+ if (ret)
+ goto out;
+ total_found += space_added;
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
fs_info->nodesize;
}
path->slots[0]++;
}
- ret = 0;
-
- total_found += add_new_free_space(block_group, last,
- block_group->start + block_group->length);
+ ret = add_new_free_space(block_group, last,
+ block_group->start + block_group->length,
+ NULL);
out:
btrfs_free_path(path);
return ret;
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, cache->start,
- cache->start + cache->length);
+ ret = add_new_free_space(cache, cache->start,
+ cache->start + cache->length, NULL);
btrfs_free_excluded_extents(cache);
+ if (ret)
+ goto error;
}
ret = btrfs_add_block_group_cache(info, cache);
return ERR_PTR(ret);
}
- add_new_free_space(cache, chunk_offset, chunk_offset + size);
-
+ ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
btrfs_free_excluded_extents(cache);
+ if (ret) {
+ btrfs_put_block_group(cache);
+ return ERR_PTR(ret);
+ }
/*
* Ensure the corresponding space_info object is created and
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group *cache);
-u64 add_new_free_space(struct btrfs_block_group *block_group,
- u64 start, u64 end);
+int add_new_free_space(struct btrfs_block_group *block_group,
+ u64 start, u64 end, u64 *total_added_ret);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
}
read_unlock(&fs_info->global_root_lock);
+ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
+ num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
+ min_items++;
+ }
+
/*
* But we also want to reserve enough space so we can do the fallback
* global reserve for an unlink, which is an additional
* For devices supporting discard turn on discard=async automatically,
* unless it's already set or disabled. This could be turned off by
* nodiscard for the same mount.
+ *
+ * The zoned mode piggy backs on the discard functionality for
+ * resetting a zone. There is no reason to delay the zone reset as it is
+ * fast enough. So, do not enable async discard for zoned mode.
*/
if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
btrfs_test_opt(fs_info, NODISCARD)) &&
- fs_info->fs_devices->discardable) {
+ fs_info->fs_devices->discardable &&
+ !btrfs_is_zoned(fs_info)) {
btrfs_set_and_info(fs_info, DISCARD_ASYNC,
"auto enabling async discard");
}
if (prev_bit == 0 && bit == 1) {
extent_start = offset;
} else if (prev_bit == 1 && bit == 0) {
- total_found += add_new_free_space(block_group,
- extent_start,
- offset);
+ u64 space_added;
+
+ ret = add_new_free_space(block_group, extent_start,
+ offset, &space_added);
+ if (ret)
+ goto out;
+ total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
}
}
if (prev_bit == 1) {
- total_found += add_new_free_space(block_group, extent_start,
- end);
+ ret = add_new_free_space(block_group, extent_start, end, NULL);
+ if (ret)
+ goto out;
extent_count++;
}
end = block_group->start + block_group->length;
while (1) {
+ u64 space_added;
+
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;
ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
ASSERT(key.objectid < end && key.objectid + key.offset <= end);
- total_found += add_new_free_space(block_group, key.objectid,
- key.objectid + key.offset);
+ ret = add_new_free_space(block_group, key.objectid,
+ key.objectid + key.offset, &space_added);
+ if (ret)
+ goto out;
+ total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
- if (trans == ERR_PTR(-ENOENT))
- btrfs_wait_for_commit(root->fs_info, 0);
+ if (trans == ERR_PTR(-ENOENT)) {
+ int ret;
+
+ ret = btrfs_wait_for_commit(root->fs_info, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
return trans;
}
}
wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
+ ret = cur_trans->aborted;
btrfs_put_transaction(cur_trans);
out:
return ret;
return -EINVAL;
}
+ btrfs_clear_and_info(info, DISCARD_ASYNC,
+ "zoned: async discard ignored and disabled for zoned mode");
+
return 0;
}
struct ceph_mds_client *mdsc =
container_of(m, struct ceph_mds_client, metric);
- if (mdsc->stopping)
+ if (mdsc->stopping || disable_send_metrics)
return;
if (!m->session || !check_session_state(m->session)) {
struct file *file = (struct file *)(v & ~3);
if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
- if (file_count(file) > 1) {
- v |= FDPUT_POS_UNLOCK;
- mutex_lock(&file->f_pos_lock);
- }
+ v |= FDPUT_POS_UNLOCK;
+ mutex_lock(&file->f_pos_lock);
}
return v;
}
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
- if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
- return status;
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
return -EINVAL;
}
-static struct nls_table *find_nls(char *charset)
+static struct nls_table *find_nls(const char *charset)
{
struct nls_table *nls;
spin_lock(&nls_lock);
return nls;
}
-struct nls_table *load_nls(char *charset)
+struct nls_table *load_nls(const char *charset)
{
return try_then_request_module(find_nls(charset), "nls_%s", charset);
}
ovl_trusted_xattr_handlers;
sb->s_fs_info = ofs;
sb->s_flags |= SB_POSIXACL;
- sb->s_iflags |= SB_I_SKIP_SYNC;
+ sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
u64 *ppos, bool encrypted)
{
unsigned long pfn, offset;
- size_t nr_bytes;
+ ssize_t nr_bytes;
ssize_t read = 0, tmp;
int idx;
unsigned long chans_need_reconnect;
/* ========= end: protected by chan_lock ======== */
struct cifs_ses *dfs_root_ses;
+ struct nls_table *local_nls;
};
static inline bool
}
spin_unlock(&server->srv_lock);
- nls_codepage = load_nls_default();
+ nls_codepage = ses->local_nls;
/*
* need to prevent multiple threads trying to simultaneously
rc = -EAGAIN;
}
- unload_nls(nls_codepage);
return rc;
}
CIFS_MAX_PASSWORD_LEN))
return 0;
}
+
+ if (strcmp(ctx->local_nls->charset, ses->local_nls->charset))
+ return 0;
+
return 1;
}
ses->sectype = ctx->sectype;
ses->sign = ctx->sign;
+ ses->local_nls = load_nls(ctx->local_nls->charset);
/* add server as first channel */
spin_lock(&ses->chan_lock);
}
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ break;
+ }
+
tcon = tlink_tcon(tlink);
rc = cifs_dump_full_key(tcon, (void __user *)arg);
cifs_put_tlink(tlink);
return;
}
+ unload_nls(buf_to_free->local_nls);
atomic_dec(&sesInfoAllocCount);
kfree(buf_to_free->serverOS);
kfree(buf_to_free->serverDomain);
}
+/* See MS-NLMP 2.2.1.3 */
int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
-
+ /* we only send version information in ntlmssp negotiate, so do not set this flag */
+ flags = flags & ~NTLMSSP_NEGOTIATE_VERSION;
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
}
spin_unlock(&server->srv_lock);
- nls_codepage = load_nls_default();
+ nls_codepage = ses->local_nls;
/*
* need to prevent multiple threads trying to simultaneously
rc = -EAGAIN;
}
failed:
- unload_nls(nls_codepage);
return rc;
}
#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
-#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
+#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
+#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
/*
* Tree connect request flags.
static int queue_ksmbd_work(struct ksmbd_conn *conn)
{
struct ksmbd_work *work;
+ int err;
work = ksmbd_alloc_work_struct();
if (!work) {
work->request_buf = conn->request_buf;
conn->request_buf = NULL;
- ksmbd_init_smb_server(work);
+ err = ksmbd_init_smb_server(work);
+ if (err) {
+ ksmbd_free_work_struct(work);
+ return 0;
+ }
ksmbd_conn_enqueue_request(work);
atomic_inc(&conn->r_count);
*/
int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
{
- struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+ struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
unsigned int cmd = le16_to_cpu(req_hdr->Command);
- int tree_id;
+ unsigned int tree_id;
if (cmd == SMB2_TREE_CONNECT_HE ||
cmd == SMB2_CANCEL_HE ||
pr_err("The first operation in the compound does not have tcon\n");
return -EINVAL;
}
- if (work->tcon->id != tree_id) {
+ if (tree_id != UINT_MAX && work->tcon->id != tree_id) {
pr_err("tree id(%u) is different with id(%u) in first operation\n",
tree_id, work->tcon->id);
return -EINVAL;
*/
int smb2_check_user_session(struct ksmbd_work *work)
{
- struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+ struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
struct ksmbd_conn *conn = work->conn;
- unsigned int cmd = conn->ops->get_cmd_val(work);
+ unsigned int cmd = le16_to_cpu(req_hdr->Command);
unsigned long long sess_id;
/*
pr_err("The first operation in the compound does not have sess\n");
return -EINVAL;
}
- if (work->sess->id != sess_id) {
+ if (sess_id != ULLONG_MAX && work->sess->id != sess_id) {
pr_err("session id(%llu) is different with the first operation(%lld)\n",
sess_id, work->sess->id);
return -EINVAL;
}
}
-static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
- int open_flags, umode_t posix_mode, bool is_dir)
+static int smb2_creat(struct ksmbd_work *work, struct path *parent_path,
+ struct path *path, char *name, int open_flags,
+ umode_t posix_mode, bool is_dir)
{
struct ksmbd_tree_connect *tcon = work->tcon;
struct ksmbd_share_config *share = tcon->share_conf;
return rc;
}
- rc = ksmbd_vfs_kern_path_locked(work, name, 0, path, 0);
+ rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0);
if (rc) {
pr_err("cannot get linux path (%s), err = %d\n",
name, rc);
struct ksmbd_tree_connect *tcon = work->tcon;
struct smb2_create_req *req;
struct smb2_create_rsp *rsp;
- struct path path;
+ struct path path, parent_path;
struct ksmbd_share_config *share = tcon->share_conf;
struct ksmbd_file *fp = NULL;
struct file *filp = NULL;
goto err_out1;
}
- rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+ rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+ &parent_path, &path, 1);
if (!rc) {
file_present = true;
/*create file if not present */
if (!file_present) {
- rc = smb2_creat(work, &path, name, open_flags, posix_mode,
+ rc = smb2_creat(work, &parent_path, &path, name, open_flags,
+ posix_mode,
req->CreateOptions & FILE_DIRECTORY_FILE_LE);
if (rc) {
if (rc == -ENOENT) {
err_out:
if (file_present || created) {
- inode_unlock(d_inode(path.dentry->d_parent));
- dput(path.dentry);
+ inode_unlock(d_inode(parent_path.dentry));
+ path_put(&path);
+ path_put(&parent_path);
}
ksmbd_revert_fsids(work);
err_out1:
struct nls_table *local_nls)
{
char *link_name = NULL, *target_name = NULL, *pathname = NULL;
- struct path path;
+ struct path path, parent_path;
bool file_present = false;
int rc;
ksmbd_debug(SMB, "target name is %s\n", target_name);
rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS,
- &path, 0);
+ &parent_path, &path, 0);
if (rc) {
if (rc != -ENOENT)
goto out;
rc = -EINVAL;
out:
if (file_present) {
- inode_unlock(d_inode(path.dentry->d_parent));
+ inode_unlock(d_inode(parent_path.dentry));
path_put(&path);
+ path_put(&parent_path);
}
if (!IS_ERR(link_name))
kfree(link_name);
unsigned int max_read_size = conn->vals->max_read_size;
WORK_BUFFERS(work, req, rsp);
+ if (work->next_smb2_rcv_hdr_off) {
+ work->send_no_response = 1;
+ err = -EOPNOTSUPP;
+ goto out;
+ }
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_PIPE)) {
struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
int rc = 0;
- if (buf_data_size < sizeof(struct smb2_hdr)) {
+ if (pdu_length < sizeof(struct smb2_transform_hdr) ||
+ buf_data_size < sizeof(struct smb2_hdr)) {
pr_err("Transform message is too small (%u)\n",
pdu_length);
return -ECONNABORTED;
[SMB_COM_NEGOTIATE_EX] = { .proc = smb1_negotiate, },
};
-static void init_smb1_server(struct ksmbd_conn *conn)
+static int init_smb1_server(struct ksmbd_conn *conn)
{
conn->ops = &smb1_server_ops;
conn->cmds = smb1_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+ return 0;
}
-void ksmbd_init_smb_server(struct ksmbd_work *work)
+int ksmbd_init_smb_server(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
__le32 proto;
- if (conn->need_neg == false)
- return;
-
proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
+ if (conn->need_neg == false) {
+ if (proto == SMB1_PROTO_NUMBER)
+ return -EINVAL;
+ return 0;
+ }
+
if (proto == SMB1_PROTO_NUMBER)
- init_smb1_server(conn);
- else
- init_smb3_11_server(conn);
+ return init_smb1_server(conn);
+ return init_smb3_11_server(conn);
}
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
-void ksmbd_init_smb_server(struct ksmbd_work *work);
+int ksmbd_init_smb_server(struct ksmbd_work *work);
struct ksmbd_kstat;
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
char *pathname, unsigned int flags,
+ struct path *parent_path,
struct path *path)
{
struct qstr last;
struct filename *filename;
struct path *root_share_path = &share_conf->vfs_path;
int err, type;
- struct path parent_path;
struct dentry *d;
if (pathname[0] == '\0') {
return PTR_ERR(filename);
err = vfs_path_parent_lookup(filename, flags,
- &parent_path, &last, &type,
+ parent_path, &last, &type,
root_share_path);
if (err) {
putname(filename);
}
if (unlikely(type != LAST_NORM)) {
- path_put(&parent_path);
+ path_put(parent_path);
putname(filename);
return -ENOENT;
}
- inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT);
- d = lookup_one_qstr_excl(&last, parent_path.dentry, 0);
+ inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
+ d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
if (IS_ERR(d))
goto err_out;
}
path->dentry = d;
- path->mnt = share_conf->vfs_path.mnt;
- path_put(&parent_path);
- putname(filename);
+ path->mnt = mntget(parent_path->mnt);
+ if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) {
+ err = follow_down(path, 0);
+ if (err < 0) {
+ path_put(path);
+ goto err_out;
+ }
+ }
+
+ putname(filename);
return 0;
err_out:
- inode_unlock(parent_path.dentry->d_inode);
- path_put(&parent_path);
+ inode_unlock(d_inode(parent_path->dentry));
+ path_put(parent_path);
putname(filename);
return -ENOENT;
}
{
char *stream_buf = NULL, *wbuf;
struct mnt_idmap *idmap = file_mnt_idmap(fp->filp);
- size_t size, v_len;
+ size_t size;
+ ssize_t v_len;
int err = 0;
ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
fp->stream.name,
fp->stream.size,
&stream_buf);
- if ((int)v_len < 0) {
+ if (v_len < 0) {
pr_err("not found stream in xattr : %zd\n", v_len);
- err = (int)v_len;
+ err = v_len;
goto out;
}
* Return: 0 on success, otherwise error
*/
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
- unsigned int flags, struct path *path,
- bool caseless)
+ unsigned int flags, struct path *parent_path,
+ struct path *path, bool caseless)
{
struct ksmbd_share_config *share_conf = work->tcon->share_conf;
int err;
- struct path parent_path;
- err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, path);
+ err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path,
+ path);
if (!err)
return 0;
path_len = strlen(filepath);
remain_len = path_len;
- parent_path = share_conf->vfs_path;
- path_get(&parent_path);
+ *parent_path = share_conf->vfs_path;
+ path_get(parent_path);
- while (d_can_lookup(parent_path.dentry)) {
+ while (d_can_lookup(parent_path->dentry)) {
char *filename = filepath + path_len - remain_len;
char *next = strchrnul(filename, '/');
size_t filename_len = next - filename;
if (filename_len == 0)
break;
- err = ksmbd_vfs_lookup_in_dir(&parent_path, filename,
+ err = ksmbd_vfs_lookup_in_dir(parent_path, filename,
filename_len,
work->conn->um);
if (err)
goto out2;
else if (is_last)
goto out1;
- path_put(&parent_path);
- parent_path = *path;
+ path_put(parent_path);
+ *parent_path = *path;
next[0] = '/';
remain_len -= filename_len + 1;
err = -EINVAL;
out2:
- path_put(&parent_path);
+ path_put(parent_path);
out1:
kfree(filepath);
}
if (!err) {
- err = ksmbd_vfs_lock_parent(parent_path.dentry, path->dentry);
- if (err)
- dput(path->dentry);
- path_put(&parent_path);
+ err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
+ if (err) {
+ path_put(path);
+ path_put(parent_path);
+ }
}
return err;
}
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
const struct path *path, char *attr_name);
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
- unsigned int flags, struct path *path,
- bool caseless);
+ unsigned int flags, struct path *parent_path,
+ struct path *path, bool caseless);
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
const char *name,
unsigned int flags,
msg.msg_flags |= MSG_MORE;
if (remain && pipe_occupancy(pipe->head, tail) > 0)
msg.msg_flags |= MSG_MORE;
+ if (out->f_flags & O_NONBLOCK)
+ msg.msg_flags |= MSG_DONTWAIT;
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bc,
len - remain);
{
}
-static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- return -ENODEV;
-}
-
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
bool suspend)
{
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
/* defined in arch */
-extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
}
#endif
-/* May be defined in arch */
-extern int ftrace_arch_read_dyn_info(char *buf, int size);
-
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
*/
static inline bool vma_start_read(struct vm_area_struct *vma)
{
- /* Check before locking. A race might cause false locked result. */
- if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
+ /*
+ * Check before locking. A race might cause false locked result.
+ * We can use READ_ONCE() for the mm_lock_seq here, and don't need
+ * ACQUIRE semantics, because this is just a lockless check whose result
+ * we don't rely on for anything - the mm_lock_seq read against which we
+ * need ordering is below.
+ */
+ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
return false;
if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
* False unlocked result is impossible because we modify and check
* vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
* modification invalidates all existing locks.
+ *
+ * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
+ * racing with vma_end_write_all(), we only start reading from the VMA
+ * after it has been unlocked.
+ * This pairs with RELEASE semantics in vma_end_write_all().
*/
- if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
+ if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
up_read(&vma->vm_lock->lock);
return false;
}
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
+ *mm_lock_seq = vma->vm_mm->mm_lock_seq;
return (vma->vm_lock_seq == *mm_lock_seq);
}
return;
down_write(&vma->vm_lock->lock);
- vma->vm_lock_seq = mm_lock_seq;
+ /*
+ * We should use WRITE_ONCE() here because we can have concurrent reads
+ * from the early lockless pessimistic check in vma_start_read().
+ * We don't really care about the correctness of that early check, but
+ * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
+ */
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
up_write(&vma->vm_lock->lock);
}
if (!down_write_trylock(&vma->vm_lock->lock))
return false;
- vma->vm_lock_seq = mm_lock_seq;
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
up_write(&vma->vm_lock->lock);
return true;
}
};
#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * Can only be written (using WRITE_ONCE()) while holding both:
+ * - mmap_lock (in write mode)
+ * - vm_lock->lock (in write mode)
+ * Can be read reliably while holding one of:
+ * - mmap_lock (in read or write mode)
+ * - vm_lock->lock (in read or write mode)
+ * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
+ * while holding nothing (except RCU to keep the VMA struct allocated).
+ *
+ * This sequence counter is explicitly allowed to overflow; sequence
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
+ */
int vm_lock_seq;
struct vma_lock *vm_lock;
* by mmlist_lock
*/
#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * This field has lock-like semantics, meaning it is sometimes
+ * accessed with ACQUIRE/RELEASE semantics.
+ * Roughly speaking, incrementing the sequence number is
+ * equivalent to releasing locks on VMAs; reading the sequence
+ * number can be part of taking a read lock on a VMA.
+ *
+ * Can be modified under write mmap_lock using RELEASE
+ * semantics.
+ * Can be read with no other protection when holding write
+ * mmap_lock.
+ * Can be read with ACQUIRE semantics if not holding write
+ * mmap_lock.
+ */
int mm_lock_seq;
#endif
static inline void vma_end_write_all(struct mm_struct *mm)
{
mmap_assert_write_locked(mm);
- /* No races during update due to exclusive mmap_lock being held */
- WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
+ /*
+ * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
+ * mmap_lock being held.
+ * We need RELEASE semantics here to ensure that preceding stores into
+ * the VMA take effect before we unlock it with this store.
+ * Pairs with ACQUIRE semantics in vma_start_read().
+ */
+ smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
}
#else
static inline void vma_end_write_all(struct mm_struct *mm) {}
/* nls_base.c */
extern int __register_nls(struct nls_table *, struct module *);
extern int unregister_nls(struct nls_table *);
-extern struct nls_table *load_nls(char *);
+extern struct nls_table *load_nls(const char *charset);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
#define register_nls(nls) __register_nls((nls), THIS_MODULE)
extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
-extern void dev_pm_enable_wake_irq(struct device *dev);
-extern void dev_pm_disable_wake_irq(struct device *dev);
#else /* !CONFIG_PM */
{
}
-static inline void dev_pm_enable_wake_irq(struct device *dev)
-{
-}
-
-static inline void dev_pm_disable_wake_irq(struct device *dev)
-{
-}
-
#endif /* CONFIG_PM */
#endif /* _LINUX_PM_WAKEIRQ_H */
#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
void *, struct thermal_zone_device_ops *,
- struct thermal_zone_params *, int, int);
+ const struct thermal_zone_params *, int, int);
void thermal_zone_device_unregister(struct thermal_zone_device *);
struct thermal_zone_device *
thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
void *, struct thermal_zone_device_ops *,
- struct thermal_zone_params *, int, int);
+ const struct thermal_zone_params *, int, int);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
static inline struct thermal_zone_device *thermal_zone_device_register(
const char *type, int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp,
+ const struct thermal_zone_params *tzp,
int passive_delay, int polling_delay)
{ return ERR_PTR(-ENODEV); }
static inline void thermal_zone_device_unregister(
/* more secured version of ipv6_addr_hash() */
static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
{
- u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
-
- return jhash_3words(v,
- (__force u32)a->s6_addr32[2],
- (__force u32)a->s6_addr32[3],
- initval);
+ return jhash2((__force const u32 *)a->s6_addr32,
+ ARRAY_SIZE(a->s6_addr32), initval);
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
return features;
}
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+static inline int vxlan_headroom(u32 flags)
+{
+ /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */
+ /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
+ return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
+ (flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
+}
static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
{
*
* The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as:
- * - ZC1: Empty | BLK_ZONE_EMPTY
+ * - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
- * - ZC4: Closed | BLK_ZONE_CLOSED
- * - ZC5: Full | BLK_ZONE_FULL
- * - ZC6: Read Only | BLK_ZONE_READONLY
- * - ZC7: Offline | BLK_ZONE_OFFLINE
+ * - ZC4: Closed | BLK_ZONE_COND_CLOSED
+ * - ZC5: Full | BLK_ZONE_COND_FULL
+ * - ZC6: Read Only | BLK_ZONE_COND_READONLY
+ * - ZC7: Offline | BLK_ZONE_COND_OFFLINE
*
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid.
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
- unsigned char sll_addr[8];
+ union {
+ unsigned char sll_addr[8];
+ /* Actual length is in sll_halen. */
+ __DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex);
+ };
};
/* Packet types */
domid_t domid;
};
+/*
+ * Bind statically allocated @port.
+ */
+#define IOCTL_EVTCHN_BIND_STATIC \
+ _IOC(_IOC_NONE, 'E', 7, sizeof(struct ioctl_evtchn_bind))
+struct ioctl_evtchn_bind {
+ unsigned int port;
+};
+
#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
/*
* Allow extra references to event channels exposed to userspace by evtchn
*/
-int evtchn_make_refcounted(evtchn_port_t evtchn);
+int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static);
int evtchn_get(evtchn_port_t evtchn);
void evtchn_put(evtchn_port_t evtchn);
irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
+static inline void xen_evtchn_close(evtchn_port_t port)
+{
+ struct evtchn_close close;
+
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+}
+
#endif /* _XEN_EVENTS_H */
return 0;
}
+static bool current_pending_io(void)
+{
+ struct io_uring_task *tctx = current->io_uring;
+
+ if (!tctx)
+ return false;
+ return percpu_counter_read_positive(&tctx->inflight);
+}
+
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
- int token, ret;
+ int io_wait, ret;
if (unlikely(READ_ONCE(ctx->check_cq)))
return 1;
return 0;
/*
- * Use io_schedule_prepare/finish, so cpufreq can take into account
- * that the task is waiting for IO - turns out to be important for low
- * QD IO.
+ * Mark us as being in io_wait if we have pending requests, so cpufreq
+ * can take into account that the task is waiting for IO - turns out
+ * to be important for low QD IO.
*/
- token = io_schedule_prepare();
+ io_wait = current->in_iowait;
+ if (current_pending_io())
+ current->in_iowait = 1;
ret = 0;
if (iowq->timeout == KTIME_MAX)
schedule();
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
ret = -ETIME;
- io_schedule_finish(token);
+ current->in_iowait = io_wait;
return ret;
}
return 0;
}
+static bool is_cfi_preamble_symbol(unsigned long addr)
+{
+ char symbuf[KSYM_NAME_LEN];
+
+ if (lookup_symbol_name(addr, symbuf))
+ return false;
+
+ return str_has_prefix("__cfi_", symbuf) ||
+ str_has_prefix("__pfx_", symbuf);
+}
+
static int check_kprobe_address_safe(struct kprobe *p,
struct module **probed_mod)
{
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
- find_bug((unsigned long)p->addr)) {
+ find_bug((unsigned long)p->addr) ||
+ is_cfi_preamble_symbol((unsigned long)p->addr)) {
ret = -EINVAL;
goto out;
}
return prio;
}
+/*
+ * Update the waiter->tree copy of the sort keys.
+ */
static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{
- waiter->prio = __waiter_prio(task);
- waiter->deadline = task->dl.deadline;
+ lockdep_assert_held(&waiter->lock->wait_lock);
+ lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
+
+ waiter->tree.prio = __waiter_prio(task);
+ waiter->tree.deadline = task->dl.deadline;
+}
+
+/*
+ * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
+ */
+static __always_inline void
+waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+{
+ lockdep_assert_held(&waiter->lock->wait_lock);
+ lockdep_assert_held(&task->pi_lock);
+ lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry));
+
+ waiter->pi_tree.prio = waiter->tree.prio;
+ waiter->pi_tree.deadline = waiter->tree.deadline;
}
/*
- * Only use with rt_mutex_waiter_{less,equal}()
+ * Only use with rt_waiter_node_{less,equal}()
*/
+#define task_to_waiter_node(p) \
+ &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
#define task_to_waiter(p) \
- &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
+ &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
-static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- struct rt_mutex_waiter *right)
+static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ struct rt_waiter_node *right)
{
if (left->prio < right->prio)
return 1;
return 0;
}
-static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- struct rt_mutex_waiter *right)
+static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ struct rt_waiter_node *right)
{
if (left->prio != right->prio)
return 0;
static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
struct rt_mutex_waiter *top_waiter)
{
- if (rt_mutex_waiter_less(waiter, top_waiter))
+ if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree))
return true;
#ifdef RT_MUTEX_BUILD_SPINLOCKS
* Note that RT tasks are excluded from same priority (lateral)
* steals to prevent the introduction of an unbounded latency.
*/
- if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
+ if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
return false;
- return rt_mutex_waiter_equal(waiter, top_waiter);
+ return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
#else
return false;
#endif
}
#define __node_2_waiter(node) \
- rb_entry((node), struct rt_mutex_waiter, tree_entry)
+ rb_entry((node), struct rt_mutex_waiter, tree.entry)
static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
{
struct rt_mutex_waiter *aw = __node_2_waiter(a);
struct rt_mutex_waiter *bw = __node_2_waiter(b);
- if (rt_mutex_waiter_less(aw, bw))
+ if (rt_waiter_node_less(&aw->tree, &bw->tree))
return 1;
if (!build_ww_mutex())
return 0;
- if (rt_mutex_waiter_less(bw, aw))
+ if (rt_waiter_node_less(&bw->tree, &aw->tree))
return 0;
/* NOTE: relies on waiter->ww_ctx being set before insertion */
static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{
- rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
+ lockdep_assert_held(&lock->wait_lock);
+
+ rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less);
}
static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{
- if (RB_EMPTY_NODE(&waiter->tree_entry))
+ lockdep_assert_held(&lock->wait_lock);
+
+ if (RB_EMPTY_NODE(&waiter->tree.entry))
return;
- rb_erase_cached(&waiter->tree_entry, &lock->waiters);
- RB_CLEAR_NODE(&waiter->tree_entry);
+ rb_erase_cached(&waiter->tree.entry, &lock->waiters);
+ RB_CLEAR_NODE(&waiter->tree.entry);
}
-#define __node_2_pi_waiter(node) \
- rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
+#define __node_2_rt_node(node) \
+ rb_entry((node), struct rt_waiter_node, entry)
-static __always_inline bool
-__pi_waiter_less(struct rb_node *a, const struct rb_node *b)
+static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
{
- return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
+ return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b));
}
static __always_inline void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
- rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
+ lockdep_assert_held(&task->pi_lock);
+
+ rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
}
static __always_inline void
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
- if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
+ lockdep_assert_held(&task->pi_lock);
+
+ if (RB_EMPTY_NODE(&waiter->pi_tree.entry))
return;
- rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
- RB_CLEAR_NODE(&waiter->pi_tree_entry);
+ rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
+ RB_CLEAR_NODE(&waiter->pi_tree.entry);
}
-static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
+static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock,
+ struct task_struct *p)
{
struct task_struct *pi_task = NULL;
+ lockdep_assert_held(&lock->wait_lock);
+ lockdep_assert(rt_mutex_owner(lock) == p);
lockdep_assert_held(&p->pi_lock);
if (task_has_pi_waiters(p))
* Chain walk basics and protection scope
*
* [R] refcount on task
- * [P] task->pi_lock held
+ * [Pn] task->pi_lock held
* [L] rtmutex->wait_lock held
*
+ * Normal locking order:
+ *
+ * rtmutex->wait_lock
+ * task->pi_lock
+ *
* Step Description Protected by
* function arguments:
* @task [R]
* again:
* loop_sanity_check();
* retry:
- * [1] lock(task->pi_lock); [R] acquire [P]
- * [2] waiter = task->pi_blocked_on; [P]
- * [3] check_exit_conditions_1(); [P]
- * [4] lock = waiter->lock; [P]
- * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
- * unlock(task->pi_lock); release [P]
+ * [1] lock(task->pi_lock); [R] acquire [P1]
+ * [2] waiter = task->pi_blocked_on; [P1]
+ * [3] check_exit_conditions_1(); [P1]
+ * [4] lock = waiter->lock; [P1]
+ * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L]
+ * unlock(task->pi_lock); release [P1]
* goto retry;
* }
- * [6] check_exit_conditions_2(); [P] + [L]
- * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
- * [8] unlock(task->pi_lock); release [P]
+ * [6] check_exit_conditions_2(); [P1] + [L]
+ * [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
+ * [8] unlock(task->pi_lock); release [P1]
* put_task_struct(task); release [R]
* [9] check_exit_conditions_3(); [L]
* [10] task = owner(lock); [L]
* get_task_struct(task); [L] acquire [R]
- * lock(task->pi_lock); [L] acquire [P]
- * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
- * [12] check_exit_conditions_4(); [P] + [L]
- * [13] unlock(task->pi_lock); release [P]
+ * lock(task->pi_lock); [L] acquire [P2]
+ * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
+ * [12] check_exit_conditions_4(); [P2] + [L]
+ * [13] unlock(task->pi_lock); release [P2]
* unlock(lock->wait_lock); release [L]
* goto again;
+ *
+ * Where P1 is the blocking task and P2 is the lock owner; going up one step
+ * the owner becomes the next blocked task etc..
+ *
+*
*/
static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
enum rtmutex_chainwalk chwalk,
* enabled we continue, but stop the requeueing in the chain
* walk.
*/
- if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
if (!detect_deadlock)
goto out_unlock_pi;
else
}
/*
- * [4] Get the next lock
+ * [4] Get the next lock; per holding task->pi_lock we can't unblock
+ * and guarantee @lock's existence.
*/
lock = waiter->lock;
/*
* [5] We need to trylock here as we are holding task->pi_lock,
* which is the reverse lock order versus the other rtmutex
* operations.
+ *
+ * Per the above, holding task->pi_lock guarantees lock exists, so
+ * inverting this lock order is infeasible from a life-time
+ * perspective.
*/
if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irq(&task->pi_lock);
* or
*
* DL CBS enforcement advancing the effective deadline.
- *
- * Even though pi_waiters also uses these fields, and that tree is only
- * updated in [11], we can do this here, since we hold [L], which
- * serializes all pi_waiters access and rb_erase() does not care about
- * the values of the node being removed.
*/
waiter_update_prio(waiter, task);
rt_mutex_enqueue(lock, waiter);
- /* [8] Release the task */
+ /*
+ * [8] Release the (blocking) task in preparation for
+ * taking the owner task in [10].
+ *
+ * Since we hold lock->waiter_lock, task cannot unblock, even if we
+ * release task->pi_lock.
+ */
raw_spin_unlock(&task->pi_lock);
put_task_struct(task);
return 0;
}
- /* [10] Grab the next task, i.e. the owner of @lock */
+ /*
+ * [10] Grab the next task, i.e. the owner of @lock
+ *
+ * Per holding lock->wait_lock and checking for !owner above, there
+ * must be an owner and it cannot go away.
+ */
task = get_task_struct(rt_mutex_owner(lock));
raw_spin_lock(&task->pi_lock);
* and adjust the priority of the owner.
*/
rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
+ waiter_clone_prio(waiter, task);
rt_mutex_enqueue_pi(task, waiter);
- rt_mutex_adjust_prio(task);
+ rt_mutex_adjust_prio(lock, task);
} else if (prerequeue_top_waiter == waiter) {
/*
*/
rt_mutex_dequeue_pi(task, waiter);
waiter = rt_mutex_top_waiter(lock);
+ waiter_clone_prio(waiter, task);
rt_mutex_enqueue_pi(task, waiter);
- rt_mutex_adjust_prio(task);
+ rt_mutex_adjust_prio(lock, task);
} else {
/*
* Nothing changed. No need to do any priority
waiter->task = task;
waiter->lock = lock;
waiter_update_prio(waiter, task);
+ waiter_clone_prio(waiter, task);
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
rt_mutex_dequeue_pi(owner, top_waiter);
rt_mutex_enqueue_pi(owner, waiter);
- rt_mutex_adjust_prio(owner);
+ rt_mutex_adjust_prio(lock, owner);
if (owner->pi_blocked_on)
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
{
struct rt_mutex_waiter *waiter;
+ lockdep_assert_held(&lock->wait_lock);
+
raw_spin_lock(¤t->pi_lock);
waiter = rt_mutex_top_waiter(lock);
* task unblocks.
*/
rt_mutex_dequeue_pi(current, waiter);
- rt_mutex_adjust_prio(current);
+ rt_mutex_adjust_prio(lock, current);
/*
* As we are waking up the top waiter, and the waiter stays
if (rt_mutex_has_waiters(lock))
rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
- rt_mutex_adjust_prio(owner);
+ rt_mutex_adjust_prio(lock, owner);
/* Store the lock on which owner is blocked or NULL */
next_lock = task_blocked_on_lock(owner);
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ if (!waiter || rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
#include <linux/rtmutex.h>
#include <linux/sched/wake_q.h>
+
+/*
+ * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two
+ * separate trees and they need their own copy of the sort keys because of
+ * different locking requirements.
+ *
+ * @entry: rbtree node to enqueue into the waiters tree
+ * @prio: Priority of the waiter
+ * @deadline: Deadline of the waiter if applicable
+ *
+ * See rt_waiter_node_less() and waiter_*_prio().
+ */
+struct rt_waiter_node {
+ struct rb_node entry;
+ int prio;
+ u64 deadline;
+};
+
/*
* This is the control structure for tasks blocked on a rt_mutex,
* which is allocated on the kernel stack on of the blocked task.
*
- * @tree_entry: pi node to enqueue into the mutex waiters tree
- * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
+ * @tree: node to enqueue into the mutex waiters tree
+ * @pi_tree: node to enqueue into the mutex owner waiters tree
* @task: task reference to the blocked task
* @lock: Pointer to the rt_mutex on which the waiter blocks
* @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
- * @prio: Priority of the waiter
- * @deadline: Deadline of the waiter if applicable
* @ww_ctx: WW context pointer
+ *
+ * @tree is ordered by @lock->wait_lock
+ * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock
*/
struct rt_mutex_waiter {
- struct rb_node tree_entry;
- struct rb_node pi_tree_entry;
+ struct rt_waiter_node tree;
+ struct rt_waiter_node pi_tree;
struct task_struct *task;
struct rt_mutex_base *lock;
unsigned int wake_state;
- int prio;
- u64 deadline;
struct ww_acquire_ctx *ww_ctx;
};
{
struct rb_node *leftmost = rb_first_cached(&lock->waiters);
- return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
+ return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter;
}
static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
struct rb_node *leftmost = rb_first_cached(&lock->waiters);
struct rt_mutex_waiter *w = NULL;
+ lockdep_assert_held(&lock->wait_lock);
+
if (leftmost) {
- w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
+ w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry);
BUG_ON(w->lock != lock);
}
return w;
static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
{
+ lockdep_assert_held(&p->pi_lock);
+
return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
- pi_tree_entry);
+ pi_tree.entry);
}
#define RT_MUTEX_HAS_WAITERS 1UL
static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
debug_rt_mutex_init_waiter(waiter);
- RB_CLEAR_NODE(&waiter->pi_tree_entry);
- RB_CLEAR_NODE(&waiter->tree_entry);
+ RB_CLEAR_NODE(&waiter->pi_tree.entry);
+ RB_CLEAR_NODE(&waiter->tree.entry);
waiter->wake_state = TASK_NORMAL;
waiter->task = NULL;
}
struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
if (!n)
return NULL;
- return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+ return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline struct rt_mutex_waiter *
__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
{
- struct rb_node *n = rb_next(&w->tree_entry);
+ struct rb_node *n = rb_next(&w->tree.entry);
if (!n)
return NULL;
- return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+ return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline struct rt_mutex_waiter *
__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
{
- struct rb_node *n = rb_prev(&w->tree_entry);
+ struct rb_node *n = rb_prev(&w->tree.entry);
if (!n)
return NULL;
- return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+ return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline struct rt_mutex_waiter *
struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
if (!n)
return NULL;
- return rb_entry(n, struct rt_mutex_waiter, tree_entry);
+ return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline void
if (handler != SIG_IGN && handler != SIG_DFL)
return false;
+ /* If dying, we handle all new signals by ignoring them */
+ if (fatal_signal_pending(tsk))
+ return false;
+
/* if ptraced, let the tracer determine */
return !tsk->ptrace;
}
rb_time_t before_stamp;
u64 event_stamp[MAX_NEST];
u64 read_stamp;
+ /* pages removed since last reset */
+ unsigned long pages_removed;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
+ unsigned long cache_pages_removed;
u64 read_stamp;
u64 page_stamp;
struct ring_buffer_event *event;
/**
* ring_buffer_wake_waiters - wake up any waiters on this ring buffer
* @buffer: The ring buffer to wake waiters on
+ * @cpu: The CPU buffer to wake waiters on
*
* In the case of a file that represents a ring buffer is closing,
* it is prudent to wake up any waiters that are on this.
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
}
+ /* Read iterators need to reset themselves when some pages removed */
+ cpu_buffer->pages_removed += nr_removed;
next_page = rb_list_head(to_remove)->next;
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);
- /*
- * change read pointer to make sure any read iterators reset
- * themselves
- */
- cpu_buffer->read = 0;
-
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
/**
* ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to
- * @event: The event pointer to commit.
*
* This commits the data to the ring buffer, and releases any locks held.
*
iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
+ iter->cache_pages_removed = cpu_buffer->pages_removed;
if (iter->head) {
iter->read_stamp = cpu_buffer->read_stamp;
buffer = cpu_buffer->buffer;
/*
- * Check if someone performed a consuming read to
- * the buffer. A consuming read invalidates the iterator
- * and we need to reset the iterator in this case.
+ * Check if someone performed a consuming read to the buffer
+ * or removed some pages from the buffer. In these cases,
+ * iterator was invalidated and we need to reset it.
*/
if (unlikely(iter->cache_read != cpu_buffer->read ||
- iter->cache_reader_page != cpu_buffer->reader_page))
+ iter->cache_reader_page != cpu_buffer->reader_page ||
+ iter->cache_pages_removed != cpu_buffer->pages_removed))
rb_iter_reset(iter);
again:
cpu_buffer->last_overrun = 0;
rb_head_page_activate(cpu_buffer);
+ cpu_buffer->pages_removed = 0;
}
/* Must have disabled the cpu buffer then done a synchronize_rcu */
/**
* ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
* @buffer: The ring buffer to reset a per cpu buffer of
- * @cpu: The CPU buffer to be reset
*/
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
{
{
struct trace_event_call *call = file->event_call;
struct trace_array *tr = file->tr;
- unsigned long file_flags = file->flags;
int ret = 0;
int disable;
break;
disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
+ /* Disable use of trace_buffered_event */
+ trace_buffered_event_disable();
} else
disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
if (atomic_inc_return(&file->sm_ref) > 1)
break;
set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
+ /* Enable use of trace_buffered_event */
+ trace_buffered_event_enable();
}
if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
break;
}
- /* Enable or disable use of trace_buffered_event */
- if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
- (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
- trace_buffered_event_enable();
- else
- trace_buffered_event_disable();
- }
-
return ret;
}
* synth_event_gen_cmd_array_start - Start synthetic event command from an array
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @name: The name of the synthetic event
+ * @mod: The module creating the event, NULL if not created from a module
* @fields: An array of type/name field descriptions
* @n_fields: The number of field descriptions contained in the fields array
*
/**
* event_triggers_call - Call triggers associated with a trace event
* @file: The trace_event_file associated with the event
+ * @buffer: The ring buffer that the event is being written to
* @rec: The trace entry for the event, NULL for unconditional invocation
+ * @event: The event meta data in the ring buffer
*
* For each trigger associated with an event, invoke the trigger
* function registered with the associated trigger command. If rec is
/* Get BTF_KIND_FUNC type */
t = btf_type_by_id(btf, id);
- if (!btf_type_is_func(t))
+ if (!t || !btf_type_is_func(t))
return ERR_PTR(-ENOENT);
/* The type of BTF_KIND_FUNC is BTF_KIND_FUNC_PROTO */
t = btf_type_by_id(btf, t->type);
- if (!btf_type_is_func_proto(t))
+ if (!t || !btf_type_is_func_proto(t))
return ERR_PTR(-ENOENT);
return t;
if (!ctx->params) {
params = find_btf_func_param(ctx->funcname, &ctx->nr_params,
ctx->flags & TPARG_FL_TPOINT);
- if (IS_ERR(params)) {
+ if (IS_ERR_OR_NULL(params)) {
trace_probe_log_err(ctx->offset, NO_BTF_ENTRY);
return PTR_ERR(params);
}
params = find_btf_func_param(ctx->funcname, &nr_params,
ctx->flags & TPARG_FL_TPOINT);
- if (IS_ERR(params)) {
+ if (IS_ERR_OR_NULL(params)) {
if (args_idx != -1) {
/* $arg* requires BTF info */
trace_probe_log_err(0, NOSUP_BTFARG);
* trace_seq_vprintf - sequence printing of trace information
* @s: trace sequence descriptor
* @fmt: printf format string
+ * @args: Arguments for the format string
*
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formatting of a trace
of_property_read_string(np_pool, "label", &name);
if (!name)
- name = np_pool->name;
+ name = of_node_full_name(np_pool);
}
if (pdev)
pool = gen_pool_get(&pdev->dev, name);
static void damon_test_set_attrs(struct kunit *test)
{
- struct damon_ctx ctx;
+ struct damon_ctx *c = damon_new_ctx();
struct damon_attrs valid_attrs = {
.min_nr_regions = 10, .max_nr_regions = 1000,
.sample_interval = 5000, .aggr_interval = 100000,};
struct damon_attrs invalid_attrs;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &valid_attrs), 0);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
invalid_attrs = valid_attrs;
invalid_attrs.min_nr_regions = 1;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
invalid_attrs = valid_attrs;
invalid_attrs.max_nr_regions = 9;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
invalid_attrs = valid_attrs;
invalid_attrs.aggr_interval = 4999;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
}
static struct kunit_case damon_test_cases[] = {
goto unlock_mutex;
}
- if (!folio_test_hwpoison(folio)) {
+ if (!PageHWPoison(p)) {
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
pfn, &unpoison_rs);
goto unlock_mutex;
if (!vma_is_anonymous(vma) && !vma_is_tcp(vma))
goto inval;
- /* find_mergeable_anon_vma uses adjacent vmas which are not locked */
- if (!vma->anon_vma && !vma_is_tcp(vma))
- goto inval;
-
if (!vma_start_read(vma))
goto inval;
+ /*
+ * find_mergeable_anon_vma uses adjacent vmas which are not locked.
+ * This check must happen after vma_start_read(); otherwise, a
+ * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA
+ * from its anon_vma.
+ */
+ if (unlikely(!vma->anon_vma && !vma_is_tcp(vma)))
+ goto inval_end_read;
+
/*
* Due to the possibility of userfault handler dropping mmap_lock, avoid
* it for now and fall back to page fault handling under mmap_lock.
*/
- if (userfaultfd_armed(vma)) {
- vma_end_read(vma);
- goto inval;
- }
+ if (userfaultfd_armed(vma))
+ goto inval_end_read;
/* Check since vm_start/vm_end might change before we lock the VMA */
- if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
- vma_end_read(vma);
- goto inval;
- }
+ if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ goto inval_end_read;
/* Check if the VMA got isolated after we found it */
if (vma->detached) {
rcu_read_unlock();
return vma;
+
+inval_end_read:
+ vma_end_read(vma);
inval:
rcu_read_unlock();
count_vm_vma_lock_event(VMA_LOCK_ABORT);
VMA_ITERATOR(vmi, mm, 0);
mmap_write_lock(mm);
- for_each_vma(vmi, vma)
+ for_each_vma(vmi, vma) {
+ vma_start_write(vma);
mpol_rebind_policy(vma->vm_policy, new);
+ }
mmap_write_unlock(mm);
}
struct mempolicy *old;
struct mempolicy *new;
+ vma_assert_write_locked(vma);
+
pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff,
vma->vm_ops, vma->vm_file,
if (err)
goto mpol_out;
+ /*
+ * Lock the VMAs before scanning for pages to migrate, to ensure we don't
+ * miss a concurrently inserted page.
+ */
+ vma_iter_init(&vmi, mm, start);
+ for_each_vma_range(vmi, vma, end)
+ vma_start_write(vma);
+
ret = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
break;
}
+ vma_start_write(vma);
new->home_node = home_node;
err = mbind_range(&vmi, vma, &prev, start, end, new);
mpol_put(new);
* anon pages imported.
*/
if (src->anon_vma && !dst->anon_vma) {
+ vma_start_write(dst);
dst->anon_vma = src->anon_vma;
return anon_vma_clone(dst, src);
}
if (walk->no_vma) {
/*
* pte_offset_map() might apply user-specific validation.
+ * Indeed, on x86_64 the pmd entries set up by init_espfix_ap()
+ * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear),
+ * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them.
*/
- if (walk->mm == &init_mm)
+ if (walk->mm == &init_mm || addr >= TASK_SIZE)
pte = pte_offset_kernel(pmd, addr);
else
pte = pte_offset_map(pmd, addr);
if (*ppos >= i_size_read(inode))
break;
- error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, SGP_READ);
+ error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
+ SGP_READ);
if (error) {
if (error == -EINVAL)
error = 0;
if (folio) {
folio_unlock(folio);
- if (folio_test_hwpoison(folio)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) &&
+ folio_test_has_hwpoisoned(folio))) {
error = -EIO;
break;
}
folio_put(folio);
folio = NULL;
} else {
- n = splice_zeropage_into_pipe(pipe, *ppos, len);
+ n = splice_zeropage_into_pipe(pipe, *ppos, part);
}
if (!n)
static int p9_client_version(struct p9_client *c)
{
- int err = 0;
+ int err;
struct p9_req_t *req;
char *version = NULL;
int msize;
struct p9_client *clnt;
char *client_id;
- err = 0;
clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
return ERR_PTR(-ENOMEM);
const char *uname, kuid_t n_uname,
const char *aname)
{
- int err = 0;
+ int err;
struct p9_req_t *req;
struct p9_fid *fid;
struct p9_qid qid;
struct p9_req_t *req;
u16 nwqids, count;
- err = 0;
wqids = NULL;
clnt = oldfid->clnt;
if (clone) {
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode);
- err = 0;
if (fid->mode != -1)
return -EINVAL;
int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags,
u32 mode, kgid_t gid, struct p9_qid *qid)
{
- int err = 0;
+ int err;
struct p9_client *clnt;
struct p9_req_t *req;
int iounit;
p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
fid->fid, name, perm, mode);
- err = 0;
clnt = fid->clnt;
if (fid->mode != -1)
int p9_client_symlink(struct p9_fid *dfid, const char *name,
const char *symtgt, kgid_t gid, struct p9_qid *qid)
{
- int err = 0;
+ int err;
struct p9_client *clnt;
struct p9_req_t *req;
int p9_client_fsync(struct p9_fid *fid, int datasync)
{
- int err;
+ int err = 0;
struct p9_client *clnt;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
fid->fid, datasync);
- err = 0;
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync);
int p9_client_clunk(struct p9_fid *fid)
{
- int err;
+ int err = 0;
struct p9_client *clnt;
struct p9_req_t *req;
int retries = 0;
again:
p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n",
fid->fid, retries);
- err = 0;
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid);
int p9_client_remove(struct p9_fid *fid)
{
- int err;
+ int err = 0;
struct p9_client *clnt;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
- err = 0;
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid);
if (!ret)
return ERR_PTR(-ENOMEM);
- err = 0;
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid);
if (!ret)
return ERR_PTR(-ENOMEM);
- err = 0;
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask);
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
- err = 0;
clnt = fid->clnt;
wst->size = p9_client_statsize(wst, clnt->proto_version);
p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n",
int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
p9_debug(P9_DEBUG_9P, " valid=%x mode=%x uid=%d gid=%d size=%lld\n",
struct p9_req_t *req;
struct p9_client *clnt;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
int p9_client_rename(struct p9_fid *fid,
struct p9_fid *newdirfid, const char *name)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
struct p9_fid *newdirfid, const char *new_name)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
- err = 0;
clnt = olddirfid->clnt;
p9_debug(P9_DEBUG_9P,
struct p9_client *clnt;
struct p9_fid *attr_fid;
- err = 0;
clnt = file_fid->clnt;
attr_fid = p9_fid_create(clnt);
if (!attr_fid) {
int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
u64 attr_size, int flags)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
p9_debug(P9_DEBUG_9P,
">>> TXATTRCREATE fid %d name %s size %llu flag %d\n",
fid->fid, name, attr_size, flags);
- err = 0;
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd",
fid->fid, name, attr_size, flags);
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
fid->fid, offset, count);
- err = 0;
clnt = fid->clnt;
rsize = fid->iounit;
struct p9_client *clnt;
struct p9_req_t *req;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TMKNOD fid %d name %s mode %d major %d minor %d\n",
struct p9_client *clnt;
struct p9_req_t *req;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
fid->fid, name, mode, from_kgid(&init_user_ns, gid));
struct p9_client *clnt;
struct p9_req_t *req;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TLOCK fid %d type %i flags %d start %lld length %lld proc_id %d client_id %s\n",
struct p9_client *clnt;
struct p9_req_t *req;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TGETLOCK fid %d, type %i start %lld length %lld proc_id %d client_id %s\n",
struct p9_client *clnt;
struct p9_req_t *req;
- err = 0;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
void *to = req->rc.sdata + in_hdr_len;
// Fits entirely into the static data? Nothing to do.
- if (req->rc.size < in_hdr_len)
+ if (req->rc.size < in_hdr_len || !pages)
return;
// Really long error message? Tough, truncate the reply. Might get
struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[4];
- size_t offs;
+ size_t offs = 0;
int need_drop = 0;
int kicked = 0;
if (in_pages) {
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
- in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
- in_pages, in_nr_pages, offs, inlen);
+ pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
+ in_pages, in_nr_pages, offs, inlen);
}
BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
list_del(&ro->notifier);
spin_unlock(&raw_notifier_lock);
+ rtnl_lock();
lock_sock(sk);
- rtnl_lock();
/* remove current filters & unregister */
if (ro->bound) {
if (ro->dev)
ro->dev = NULL;
ro->count = 0;
free_percpu(ro->uniq);
- rtnl_unlock();
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
+ rtnl_unlock();
+
sock_put(sk);
return 0;
return true;
}
}
+EXPORT_SYMBOL(ceph_addr_is_blank);
int ceph_addr_port(const struct ceph_entity_addr *addr)
{
ipv6_ifa_notify(0, ift);
}
- if ((create || list_empty(&idev->tempaddr_list)) &&
- idev->cnf.use_tempaddr > 0) {
+ /* Also create a temporary address if it's enabled but no temporary
+ * address currently exists.
+ * However, we get called with valid_lft == 0, prefered_lft == 0, create == false
+ * as part of cleanup (ie. deleting the mngtmpaddr).
+ * We don't want that to result in creating a new temporary ip address.
+ */
+ if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
+ create = true;
+
+ if (create && idev->cnf.use_tempaddr > 0) {
/* When a new public address is created as described
* in [ADDRCONF], also create a new temporary address.
- * Also create a temporary address if it's enabled but
- * no temporary address currently exists.
*/
read_unlock_bh(&idev->lock);
ipv6_create_tempaddr(ifp, false);
if (!err) {
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
mptcp_copy_inaddrs(sk, ssock->sk);
+ mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
}
- mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
-
unlock:
release_sock(sk);
return err;
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
return PTR_ERR(chain);
}
- if (nft_chain_is_bound(chain))
- return -EOPNOTSUPP;
} else if (nla[NFTA_RULE_CHAIN_ID]) {
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
return -EINVAL;
}
+ if (nft_chain_is_bound(chain))
+ return -EOPNOTSUPP;
+
if (nla[NFTA_RULE_HANDLE]) {
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
rule = __nft_rule_lookup(chain, handle);
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
}
+static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx,
+ struct nft_chain *chain,
+ enum nft_trans_phase phase)
+{
+ struct nft_ctx chain_ctx;
+ struct nft_rule *rule;
+
+ chain_ctx = *ctx;
+ chain_ctx.chain = chain;
+
+ list_for_each_entry(rule, &chain->rules, list)
+ nft_rule_expr_deactivate(&chain_ctx, rule, phase);
+}
+
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
enum nft_trans_phase phase)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
const struct nft_data *data = &priv->data;
- struct nft_ctx chain_ctx;
struct nft_chain *chain;
- struct nft_rule *rule;
if (priv->dreg == NFT_REG_VERDICT) {
switch (data->verdict.code) {
if (!nft_chain_binding(chain))
break;
- chain_ctx = *ctx;
- chain_ctx.chain = chain;
-
- list_for_each_entry(rule, &chain->rules, list)
- nft_rule_expr_deactivate(&chain_ctx, rule, phase);
-
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
nf_tables_unbind_chain(ctx, chain);
- fallthrough;
+ nft_deactivate_next(ctx->net, chain);
+ break;
case NFT_TRANS_PREPARE:
+ nft_immediate_chain_deactivate(ctx, chain, phase);
nft_deactivate_next(ctx->net, chain);
break;
default:
+ nft_immediate_chain_deactivate(ctx, chain, phase);
nft_chain_del(chain);
chain->bound = false;
nft_use_dec(&chain->table->use);
static int nft_rbtree_gc_elem(const struct nft_set *__set,
struct nft_rbtree *priv,
- struct nft_rbtree_elem *rbe)
+ struct nft_rbtree_elem *rbe,
+ u8 genmask)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
- struct nft_rbtree_elem *rbe_prev = NULL;
+ struct nft_rbtree_elem *rbe_prev;
struct nft_set_gc_batch *gcb;
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
if (!gcb)
return -ENOMEM;
- /* search for expired end interval coming before this element. */
+ /* search for end interval coming before this element.
+ * end intervals don't carry a timeout extension, they
+ * are coupled with the interval start element.
+ */
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
- if (nft_rbtree_interval_end(rbe_prev))
+ if (nft_rbtree_interval_end(rbe_prev) &&
+ nft_set_elem_active(&rbe_prev->ext, genmask))
break;
prev = rb_prev(prev);
}
- if (rbe_prev) {
+ if (prev) {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+
rb_erase(&rbe_prev->node, &priv->root);
atomic_dec(&set->nelems);
+ nft_set_gc_batch_add(gcb, rbe_prev);
}
rb_erase(&rbe->node, &priv->root);
/* perform garbage collection to avoid bogus overlap reports. */
if (nft_set_elem_expired(&rbe->ext)) {
- err = nft_rbtree_gc_elem(set, priv, rbe);
+ err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
if (err < 0)
return err;
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
- memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
+ memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
"Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
return -EINVAL;
}
+
+ if (nla_len(attr) != sizeof(u64)) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
+ return -EINVAL;
+ }
+
if (i >= qopt->num_tc)
break;
priv->min_rate[i] = nla_get_u64(attr);
"Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
return -EINVAL;
}
+
+ if (nla_len(attr) != sizeof(u64)) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
+ return -EINVAL;
+ }
+
if (i >= qopt->num_tc)
break;
priv->max_rate[i] = nla_get_u64(attr);
skb_reset_network_header(*skb);
skb_pull(*skb, tipc_ehdr_size(ehdr));
- pskb_trim(*skb, (*skb)->len - aead->authsize);
+ if (pskb_trim(*skb, (*skb)->len - aead->authsize))
+ goto free_skb;
/* Validate TIPCv2 message */
if (unlikely(!tipc_msg_validate(skb))) {
n->capabilities, &n->bc_entry.inputq1,
&n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
pr_warn("Broadcast rcv link creation failed, no memory\n");
- kfree(n);
+ tipc_node_put(n);
n = NULL;
goto exit;
}
return 0;
}
-static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
+static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
{
+ struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
+ short offset = offsetof(struct sockaddr_storage, __data);
+
+ BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
+
/* This may look like an off by one error but it is a bit more
* subtle. 108 is the longest valid AF_UNIX path for a binding.
* sun_path[108] doesn't as such exist. However in kernel space
* we are guaranteed that it is a valid memory location in our
* kernel address buffer because syscall functions always pass
* a pointer of struct sockaddr_storage which has a bigger buffer
- * than 108.
+ * than 108. Also, we must terminate sun_path for strlen() in
+ * getname_kernel().
+ */
+ addr->__data[addr_len - offset] = 0;
+
+ /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
+ * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
+ * know the actual buffer.
*/
- ((char *)sunaddr)[addr_len] = 0;
+ return strlen(addr->__data) + offset + 1;
}
static void __unix_remove_socket(struct sock *sk)
struct path parent;
int err;
- unix_mkname_bsd(sunaddr, addr_len);
- addr_len = strlen(sunaddr->sun_path) +
- offsetof(struct sockaddr_un, sun_path) + 1;
-
+ addr_len = unix_mkname_bsd(sunaddr, addr_len);
addr = unix_create_addr(sunaddr, addr_len);
if (!addr)
return -ENOMEM;
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/// Use DEFINE_DEBUGFS_ATTRIBUTE rather than DEFINE_SIMPLE_ATTRIBUTE
-/// for debugfs files.
-///
-//# Rationale: DEFINE_SIMPLE_ATTRIBUTE + debugfs_create_file()
-//# imposes some significant overhead as compared to
-//# DEFINE_DEBUGFS_ATTRIBUTE + debugfs_create_file_unsafe().
-//
-// Copyright (C): 2016 Nicolai Stange
-// Options: --no-includes
-//
-
-virtual context
-virtual patch
-virtual org
-virtual report
-
-@dsa@
-declarer name DEFINE_SIMPLE_ATTRIBUTE;
-identifier dsa_fops;
-expression dsa_get, dsa_set, dsa_fmt;
-position p;
-@@
-DEFINE_SIMPLE_ATTRIBUTE@p(dsa_fops, dsa_get, dsa_set, dsa_fmt);
-
-@dcf@
-expression name, mode, parent, data;
-identifier dsa.dsa_fops;
-@@
-debugfs_create_file(name, mode, parent, data, &dsa_fops)
-
-
-@context_dsa depends on context && dcf@
-declarer name DEFINE_DEBUGFS_ATTRIBUTE;
-identifier dsa.dsa_fops;
-expression dsa.dsa_get, dsa.dsa_set, dsa.dsa_fmt;
-@@
-* DEFINE_SIMPLE_ATTRIBUTE(dsa_fops, dsa_get, dsa_set, dsa_fmt);
-
-
-@patch_dcf depends on patch expression@
-expression name, mode, parent, data;
-identifier dsa.dsa_fops;
-@@
-- debugfs_create_file(name, mode, parent, data, &dsa_fops)
-+ debugfs_create_file_unsafe(name, mode, parent, data, &dsa_fops)
-
-@patch_dsa depends on patch_dcf && patch@
-identifier dsa.dsa_fops;
-expression dsa.dsa_get, dsa.dsa_set, dsa.dsa_fmt;
-@@
-- DEFINE_SIMPLE_ATTRIBUTE(dsa_fops, dsa_get, dsa_set, dsa_fmt);
-+ DEFINE_DEBUGFS_ATTRIBUTE(dsa_fops, dsa_get, dsa_set, dsa_fmt);
-
-
-@script:python depends on org && dcf@
-fops << dsa.dsa_fops;
-p << dsa.p;
-@@
-msg="%s should be defined with DEFINE_DEBUGFS_ATTRIBUTE" % (fops)
-coccilib.org.print_todo(p[0], msg)
-
-@script:python depends on report && dcf@
-fops << dsa.dsa_fops;
-p << dsa.p;
-@@
-msg="WARNING: %s should be defined with DEFINE_DEBUGFS_ATTRIBUTE" % (fops)
-coccilib.report.print_report(p[0], msg)
temorary||temporary
temproarily||temporarily
temperture||temperature
-thead||thread
theads||threads
therfore||therefore
thier||their
ret = -EACCES;
down_write(&key->sem);
- if (!capable(CAP_SYS_ADMIN)) {
+ {
+ bool is_privileged_op = false;
+
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
- goto error_put;
+ is_privileged_op = true;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
+ is_privileged_op = true;
+
+ if (is_privileged_op && !capable(CAP_SYS_ADMIN))
goto error_put;
}
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
- if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
+ if (uid_eq(key->uid, current_fsuid()) || capable(CAP_SYS_ADMIN)) {
key->perm = perm;
notify_key(key, NOTIFY_KEY_SETATTR, 0);
ret = 0;
}
list_for_each_entry(fb, &client->ump->block_list, list) {
- if (fb->info.first_group < 0 ||
- fb->info.first_group + fb->info.num_groups > SNDRV_UMP_MAX_GROUPS)
+ if (fb->info.first_group + fb->info.num_groups > SNDRV_UMP_MAX_GROUPS)
break;
group = &client->groups[fb->info.first_group];
for (i = 0; i < fb->info.num_groups; i++, group++) {
SND_PCI_QUIRK(0x1028, 0x0C50, "Dolphin", CS8409_DOLPHIN),
SND_PCI_QUIRK(0x1028, 0x0C51, "Dolphin", CS8409_DOLPHIN),
SND_PCI_QUIRK(0x1028, 0x0C52, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C73, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C75, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C7D, "Dolphin", CS8409_DOLPHIN),
+ SND_PCI_QUIRK(0x1028, 0x0C7F, "Dolphin", CS8409_DOLPHIN),
{} /* terminator */
};
SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis MLK 14 RPL-P", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402", ALC245_FIXUP_CS35L41_SPI_2),
spec = codec->spec;
spec->gen.shared_mic_vref_pin = 0x18;
codec->power_save_node = 0;
+ spec->en_3kpull_low = true;
#ifdef CONFIG_PM
codec->patch_ops.suspend = alc269_suspend;
spec->shutup = alc256_shutup;
spec->init_hook = alc256_init;
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
- if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD)
- spec->en_3kpull_low = true;
+ if (codec->core.vendor_id == 0x10ec0236 &&
+ codec->bus->pci->vendor != PCI_VENDOR_ID_AMD)
+ spec->en_3kpull_low = false;
break;
case 0x10ec0257:
spec->codec_variant = ALC269_TYPE_ALC257;
spec->shutup = alc256_shutup;
spec->init_hook = alc256_init;
spec->gen.mixer_nid = 0;
+ spec->en_3kpull_low = false;
break;
case 0x10ec0215:
case 0x10ec0245:
void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { }
#endif /* SUPPORT_JOYSTICK */
-static int snd_card_ymfpci_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_card_ymfpci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
return 0;
}
+static int snd_card_ymfpci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id));
+}
+
static struct pci_driver ymfpci_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ymfpci_ids,
DMI_MATCH(DMI_PRODUCT_NAME, "M3402RA"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ }
+ },
{
.driver_data = &acp6x_card,
.matches = {
#define I2S_MCK_12M288 12288000UL
#define I2S_MCK_11M2896 11289600UL
+#define I2S_MCK_6M144 6144000UL
/* mck = (32 * (imckfs+1) / (imckdiv+1)) * fs */
static const struct atmel_i2s_gck_param gck_params[] = {
+ /* mck = 6.144Mhz */
+ { 8000, I2S_MCK_6M144, 1, 47}, /* mck = 768 fs */
+
/* mck = 12.288MHz */
- { 8000, I2S_MCK_12M288, 0, 47}, /* mck = 1536 fs */
{ 16000, I2S_MCK_12M288, 1, 47}, /* mck = 768 fs */
{ 24000, I2S_MCK_12M288, 3, 63}, /* mck = 512 fs */
{ 32000, I2S_MCK_12M288, 3, 47}, /* mck = 384 fs */
static const DECLARE_TLV_DB_RANGE(dig_vol_tlv,
0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
1, 913, TLV_DB_MINMAX_ITEM(-10200, 1200));
-static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
+static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 50, 100, 0);
static const struct snd_kcontrol_new dre_ctrl =
SOC_DAPM_SINGLE("Switch", CS35L41_PWR_CTRL3, 20, 1, 0);
};
MODULE_DEVICE_TABLE(i2c, cs35l56_id_i2c);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cs35l56_asoc_acpi_match[] = {
+ { "CSC355C", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cs35l56_asoc_acpi_match);
+#endif
+
static struct i2c_driver cs35l56_i2c_driver = {
.driver = {
.name = "cs35l56",
.pm = &cs35l56_pm_ops_i2c_spi,
+ .acpi_match_table = ACPI_PTR(cs35l56_asoc_acpi_match),
},
.id_table = cs35l56_id_i2c,
.probe = cs35l56_i2c_probe,
};
MODULE_DEVICE_TABLE(spi, cs35l56_id_spi);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cs35l56_asoc_acpi_match[] = {
+ { "CSC355C", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cs35l56_asoc_acpi_match);
+#endif
+
static struct spi_driver cs35l56_spi_driver = {
.driver = {
.name = "cs35l56",
.pm = &cs35l56_pm_ops_i2c_spi,
+ .acpi_match_table = ACPI_PTR(cs35l56_asoc_acpi_match),
},
.id_table = cs35l56_id_spi,
.probe = cs35l56_spi_probe,
// Copyright (C) 2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
-#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
return 0;
}
-static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56)
+static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56)
{
- acpi_handle handle = ACPI_HANDLE(cs35l56->base.dev);
- const char *sub;
+ struct device *dev = cs35l56->base.dev;
+ const char *prop;
+ int ret;
- /* If there is no ACPI_HANDLE, there is no ACPI for this system, return 0 */
- if (!handle)
+ ret = device_property_read_string(dev, "cirrus,firmware-uid", &prop);
+ /* If bad sw node property, return 0 and fallback to legacy firmware path */
+ if (ret < 0)
return 0;
- sub = acpi_get_subsystem_id(handle);
- if (IS_ERR(sub)) {
- /* If bad ACPI, return 0 and fallback to legacy firmware path, otherwise fail */
- if (PTR_ERR(sub) == -ENODATA)
- return 0;
- else
- return PTR_ERR(sub);
- }
+ cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
+ if (cs35l56->dsp.system_name == NULL)
+ return -ENOMEM;
- cs35l56->dsp.system_name = sub;
- dev_dbg(cs35l56->base.dev, "Subsystem ID: %s\n", cs35l56->dsp.system_name);
+ dev_dbg(dev, "Firmware UID: %s\n", cs35l56->dsp.system_name);
return 0;
}
gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 1);
}
- ret = cs35l56_acpi_get_name(cs35l56);
+ ret = cs35l56_get_firmware_uid(cs35l56);
if (ret != 0)
goto err;
regcache_cache_only(cs35l56->base.regmap, true);
- kfree(cs35l56->dsp.system_name);
-
gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies);
}
struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
u8 events[DA7219_AAD_IRQ_REG_MAX];
u8 statusa;
- int i, report = 0, mask = 0;
+ int i, ret, report = 0, mask = 0;
/* Read current IRQ events */
- regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
- events, DA7219_AAD_IRQ_REG_MAX);
+ ret = regmap_bulk_read(da7219->regmap, DA7219_ACCDET_IRQ_EVENT_A,
+ events, DA7219_AAD_IRQ_REG_MAX);
+ if (ret) {
+ dev_warn_ratelimited(component->dev, "Failed to read IRQ events: %d\n", ret);
+ return IRQ_NONE;
+ }
if (!events[DA7219_AAD_IRQ_REG_A] && !events[DA7219_AAD_IRQ_REG_B])
return IRQ_NONE;
}
}
}
+
+ synchronize_irq(da7219_aad->irq);
}
void da7219_aad_resume(struct snd_soc_component *component)
"dmic data at high level",
"dmic data at low level",
};
-static const unsigned int es8316_dmic_values[] = { 0, 1, 2 };
+static const unsigned int es8316_dmic_values[] = { 0, 2, 3 };
static const struct soc_enum es8316_dmic_src_enum =
SOC_VALUE_ENUM_SINGLE(ES8316_ADC_DMIC, 0, 3,
ARRAY_SIZE(es8316_dmic_txt),
pm_runtime_get_noresume(dev);
ret = regmap_read(max98363->regmap, MAX98363_R21FF_REV_ID, ®);
- if (!ret) {
+ if (!ret)
dev_info(dev, "Revision ID: %X\n", reg);
- return ret;
- }
+ else
+ goto out;
if (max98363->first_hw_init) {
regcache_cache_bypass(max98363->regmap, false);
max98363->first_hw_init = true;
max98363->hw_init = true;
+out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
- return 0;
+ return ret;
}
#define MAX98363_RATES SNDRV_PCM_RATE_8000_192000
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <sound/tlv.h>
#include "nau8821.h"
+#define NAU8821_JD_ACTIVE_HIGH BIT(0)
+
+static int nau8821_quirk;
+static int quirk_override = -1;
+module_param_named(quirk, quirk_override, uint, 0444);
+MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+
#define NAU_FREF_MAX 13500000
#define NAU_FVCO_MAX 100000000
#define NAU_FVCO_MIN 90000000
return 0;
}
+/* Please keep this list alphabetically sorted */
+static const struct dmi_system_id nau8821_quirk_table[] = {
+ {
+ /* Positivo CW14Q01P-V2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P-V2"),
+ },
+ .driver_data = (void *)(NAU8821_JD_ACTIVE_HIGH),
+ },
+ {}
+};
+
+static void nau8821_check_quirks(void)
+{
+ const struct dmi_system_id *dmi_id;
+
+ if (quirk_override != -1) {
+ nau8821_quirk = quirk_override;
+ return;
+ }
+
+ dmi_id = dmi_first_match(nau8821_quirk_table);
+ if (dmi_id)
+ nau8821_quirk = (unsigned long)dmi_id->driver_data;
+}
+
static int nau8821_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
nau8821->dev = dev;
nau8821->irq = i2c->irq;
+
+ nau8821_check_quirks();
+
+ if (nau8821_quirk & NAU8821_JD_ACTIVE_HIGH)
+ nau8821->jkdet_polarity = 0;
+
nau8821_print_device_properties(nau8821);
nau8821_reset_chip(nau8821->regmap);
case 0x300a:
case 0xc000:
case 0xc710:
+ case 0xcf01:
case 0xc860 ... 0xc863:
case 0xc870 ... 0xc873:
return true;
{
struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev);
int ret = 0;
- unsigned int tmp;
+ unsigned int tmp, hibernation_flag;
if (rt1308->hw_init)
return 0;
pm_runtime_get_noresume(&slave->dev);
+ regmap_read(rt1308->regmap, 0xcf01, &hibernation_flag);
+ if ((hibernation_flag != 0x00) && rt1308->first_hw_init)
+ goto _preset_ready_;
+
/* sw reset */
regmap_write(rt1308->regmap, RT1308_SDW_RESET, 0);
regmap_write(rt1308->regmap, 0xc100, 0xd7);
regmap_write(rt1308->regmap, 0xc101, 0xd7);
+ /* apply BQ params */
+ rt1308_apply_bq_params(rt1308);
+
+ regmap_write(rt1308->regmap, 0xcf01, 0x01);
+
+_preset_ready_:
if (rt1308->first_hw_init) {
regcache_cache_bypass(rt1308->regmap, false);
regcache_mark_dirty(rt1308->regmap);
struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
}
#ifdef CONFIG_PM
if (!rt5682->first_hw_init)
return 0;
- if (!slave->unattach_request)
+ if (!slave->unattach_request) {
+ if (rt5682->disable_irq == true) {
+ mutex_lock(&rt5682->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
+ rt5682->disable_irq = false;
+ mutex_unlock(&rt5682->disable_irq_lock);
+ }
goto regmap_sync;
+ }
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT5682_PROBE_TIMEOUT));
if (!rt711->first_hw_init)
return 0;
- if (!slave->unattach_request)
+ if (!slave->unattach_request) {
+ if (rt711->disable_irq == true) {
+ mutex_lock(&rt711->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ rt711->disable_irq = false;
+ mutex_unlock(&rt711->disable_irq_lock);
+ }
goto regmap_sync;
+ }
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT711_PROBE_TIMEOUT));
if (!rt711->first_hw_init)
return 0;
- if (!slave->unattach_request)
+ if (!slave->unattach_request) {
+ if (rt711->disable_irq == true) {
+ mutex_lock(&rt711->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
+ rt711->disable_irq = false;
+ mutex_unlock(&rt711->disable_irq_lock);
+ }
goto regmap_sync;
+ }
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT711_PROBE_TIMEOUT));
if (!rt712->first_hw_init)
return 0;
- if (!slave->unattach_request)
+ if (!slave->unattach_request) {
+ if (rt712->disable_irq == true) {
+ mutex_lock(&rt712->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ rt712->disable_irq = false;
+ mutex_unlock(&rt712->disable_irq_lock);
+ }
goto regmap_sync;
+ }
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT712_PROBE_TIMEOUT));
if (!rt722->first_hw_init)
return 0;
- if (!slave->unattach_request)
+ if (!slave->unattach_request) {
+ if (rt722->disable_irq == true) {
+ mutex_lock(&rt722->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_6);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ rt722->disable_irq = false;
+ mutex_unlock(&rt722->disable_irq_lock);
+ }
goto regmap_sync;
+ }
time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT722_PROBE_TIMEOUT));
if (client->addr != tasdev->dev_addr) {
client->addr = tasdev->dev_addr;
- if (tasdev->cur_book == book) {
- ret = regmap_write(map,
- TASDEVICE_PAGE_SELECT, 0);
- if (ret < 0) {
- dev_err(tas_priv->dev, "%s, E=%d\n",
- __func__, ret);
- goto out;
- }
+ /* All tas2781s share the same regmap, clear the page
+ * inside regmap once switching to another tas2781.
+ * Register 0 at any pages and any books inside tas2781
+ * is the same one for page-switching.
+ */
+ ret = regmap_write(map, TASDEVICE_PAGE_SELECT, 0);
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "%s, E=%d\n",
+ __func__, ret);
+ goto out;
}
- goto out;
}
if (tasdev->cur_book != book) {
regmap_update_bits(wm8904->regmap, WM8904_BIAS_CONTROL_0,
WM8904_POBCTRL, 0);
+ /* Fill the cache for the ADC test register */
+ regmap_read(wm8904->regmap, WM8904_ADC_TEST_0, &val);
+
/* Can leave the device powered off until we need it */
regcache_cache_only(wm8904->regmap, true);
regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright 2018 NXP
#include <linux/bitfield.h>
MODULE_AUTHOR("Cosmin-Gabriel Samoila <cosmin.samoila@nxp.com>");
MODULE_DESCRIPTION("NXP PDM Microphone Interface (MICFIL) driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("Dual BSD/GPL");
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* PDM Microphone Interface for the NXP i.MX SoC
* Copyright 2018 NXP
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0);
regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0);
+ regmap_write(regmap, REG_SPDIF_STL, 0x0);
+ regmap_write(regmap, REG_SPDIF_STR, 0x0);
break;
default:
return -EINVAL;
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"),
},
- .driver_data = (void *)(RT711_JD2_100K),
+ .driver_data = (void *)(RT711_JD2),
},
{}
};
jack = &ctx->sdw_headset;
snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
ret = snd_soc_component_set_jack(component, jack, NULL);
struct axg_tdm_stream *ts,
unsigned int offset)
{
- unsigned int val, ch = ts->channels;
- unsigned long mask;
- int i, j;
+ unsigned int ch = ts->channels;
+ u32 val[AXG_TDM_NUM_LANES];
+ int i, j, k;
+
+ /*
+ * We need to mimick the slot distribution used by the HW to keep the
+ * channel placement consistent regardless of the number of channel
+ * in the stream. This is why the odd algorithm below is used.
+ */
+ memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
/*
* Distribute the channels of the stream over the available slots
- * of each TDM lane
+ * of each TDM lane. We need to go over the 32 slots ...
*/
- for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
- val = 0;
- mask = ts->mask[i];
-
- for (j = find_first_bit(&mask, 32);
- (j < 32) && ch;
- j = find_next_bit(&mask, 32, j + 1)) {
- val |= 1 << j;
- ch -= 1;
+ for (i = 0; (i < 32) && ch; i += 2) {
+ /* ... of all the lanes ... */
+ for (j = 0; j < AXG_TDM_NUM_LANES; j++) {
+ /* ... then distribute the channels in pairs */
+ for (k = 0; k < 2; k++) {
+ if ((BIT(i + k) & ts->mask[j]) && ch) {
+ val[j] |= BIT(i + k);
+ ch -= 1;
+ }
+ }
}
-
- regmap_write(map, offset, val);
- offset += regmap_get_reg_stride(map);
}
/*
return -EINVAL;
}
+ for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+ regmap_write(map, offset, val[i]);
+ offset += regmap_get_reg_stride(map);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
switch (ret) {
case -EPROBE_DEFER:
case -ENOTSUPP:
+ case -EINVAL:
break;
default:
dev_err(rtd->dev,
/* there is no point preparing this FE if there are no BEs */
if (list_empty(&fe->dpcm[stream].be_clients)) {
- dev_err(fe->dev, "ASoC: no backend DAIs enabled for %s\n",
- fe->dai_link->name);
+ /* dev_err_once() for visibility, dev_dbg() for debugging UCM profiles */
+ dev_err_once(fe->dev, "ASoC: no backend DAIs enabled for %s, possibly missing ALSA mixer-based routing or UCM profile\n",
+ fe->dai_link->name);
+ dev_dbg(fe->dev, "ASoC: no backend DAIs enabled for %s\n",
+ fe->dai_link->name);
ret = -EINVAL;
goto out;
}
static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream, int cmd)
{
+ struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream);
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_STOP:
{
struct snd_sof_dai_config_data data = { 0 };
+ int ret;
data.dai_data = DMA_CHAN_INVALID;
- return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data);
+ ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data);
+ if (ret < 0)
+ return ret;
+
+ if (cmd == SNDRV_PCM_TRIGGER_STOP)
+ return hda_link_dma_cleanup(substream, hext_stream, cpu_dai);
+
+ break;
}
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL);
return sdai->platform_private;
}
-static int hda_link_dma_cleanup(struct snd_pcm_substream *substream,
- struct hdac_ext_stream *hext_stream,
- struct snd_soc_dai *cpu_dai)
+int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
+ struct snd_soc_dai *cpu_dai)
{
const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
struct sof_intel_hda_stream *hda_stream;
hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags,
struct snd_sof_dai_config_data *data);
+int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
+ struct snd_soc_dai *cpu_dai);
#endif
ipc3_log_header(sdev->dev, "ipc rx", hdr->cmd);
- if (hdr->size < sizeof(hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) {
+ if (hdr->size < sizeof(*hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) {
dev_err(sdev->dev, "The received message size is invalid: %u\n",
hdr->size);
return;
struct snd_sof_pcm *spcm;
spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
time_info = spcm->stream[substream->stream].private;
/* delay calculation is not supported by current fw_reg ABI */
if (!time_info)
*ipc_config_size = ipc_size;
+ /* update pipeline memory usage */
+ sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config);
+
/* copy IPC data */
memcpy(*ipc_config_data, (void *)copier_data, sizeof(*copier_data));
if (gtw_cfg_config_length)
gtw_cfg_config_length,
&ipc4_copier->dma_config_tlv, dma_config_tlv_size);
- /* update pipeline memory usage */
- sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config);
-
return 0;
}
{ 0 }
};
+/* Microsoft USB Link headset */
+/* a guess work: raw playback volume values are from 2 to 129 */
+static const struct usbmix_dB_map ms_usb_link_dB = { -3225, 0, true };
+static const struct usbmix_name_map ms_usb_link_map[] = {
+ { 9, NULL, .dB = &ms_usb_link_dB },
+ { 10, NULL }, /* Headset Capture volume; seems non-working, disabled */
+ { 0 } /* terminator */
+};
+
/* ASUS ROG Zenith II with Realtek ALC1220-VB */
static const struct usbmix_name_map asus_zenith_ii_map[] = {
{ 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
.id = USB_ID(0x1395, 0x0025),
.map = sennheiser_pc8_map,
},
+ {
+ /* Microsoft USB Link headset */
+ .id = USB_ID(0x045e, 0x083c),
+ .map = ms_usb_link_map,
+ },
{ 0 } /* terminator */
};
}
}
},
+{
+ /* Advanced modes of the Mythware XA001AU.
+ * For the standard mode, Mythware XA001AU has ID ffad:a001
+ */
+ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Mythware",
+ .product_name = "XA001AU",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
#undef USB_AUDIO_DEVICE
/* XMOS based USB DACs */
switch (chip->usb_id) {
- case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
- case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */
+ case USB_ID(0x139f, 0x5504): /* Nagra DAC */
+ case USB_ID(0x20b1, 0x3089): /* Mola-Mola DAC */
+ case USB_ID(0x2522, 0x0007): /* LH Labs Geek Out 1V5 */
+ case USB_ID(0x2522, 0x0009): /* LH Labs Geek Pulse X Inifinity 2V0 */
case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */
case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
if (fp->altsetting == 2)
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
- case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
+ case USB_ID(0x16d0, 0x06b4): /* NuPrime Audio HD-AVP/AVA */
case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
+ case USB_ID(0x16d0, 0x09d8): /* NuPrime IDA-8 */
case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
+ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
+ case USB_ID(0x20a0, 0x4143): /* WaveIO USB Audio 2.0 */
case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
+ case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
case USB_ID(0x6b42, 0x0042): /* MSB Technology */
/* Amanero Combo384 USB based DACs with native DSD support */
case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
- case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
- case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
- case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
if (fp->altsetting == 2) {
switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
case 0x199:
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x041e, 0x4080, /* Creative Live Cam VF0610 */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x045e, 0x083c, /* MS USB Link headset */
+ QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY |
+ QUIRK_FLAG_DISABLE_AUTOSUSPEND),
DEVICE_FLG(0x046d, 0x084c, /* Logitech ConferenceCam Connect */
QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x046d, 0x0991, /* Logitech QuickCam Pro */
QUIRK_FLAG_IFACE_DELAY),
DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */
QUIRK_FLAG_FORCE_IFACE_RESET),
+ DEVICE_FLG(0x0644, 0x806b, /* TEAC UD-701 */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY |
+ QUIRK_FLAG_IFACE_DELAY),
DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
DEVICE_FLG(0x154e, 0x3006, /* Marantz SA-14S1 */
QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x154e, 0x300b, /* Marantz SA-KI RUBY / SA-12 */
+ QUIRK_FLAG_DSD_RAW),
DEVICE_FLG(0x154e, 0x500e, /* Denon DN-X1600 */
QUIRK_FLAG_IGNORE_CLOCK_SOURCE),
DEVICE_FLG(0x1686, 0x00dd, /* Zoom R16/24 */
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x21b4, 0x0230, /* Ayre QB-9 Twenty */
+ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x21b4, 0x0232, /* Ayre QX-5 Twenty */
+ QUIRK_FLAG_DSD_RAW),
DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
QUIRK_FLAG_SET_IFACE_FIRST),
DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */
QUIRK_FLAG_VALIDATE_RATES),
VENDOR_FLG(0x1235, /* Focusrite Novation */
QUIRK_FLAG_VALIDATE_RATES),
+ VENDOR_FLG(0x1511, /* AURALiC */
+ QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x152a, /* Thesycon devices */
QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x18d1, /* iBasso devices */
+ QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x1de7, /* Phoenix Audio */
QUIRK_FLAG_GET_SAMPLE_RATE),
VENDOR_FLG(0x20b1, /* XMOS based devices */
QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x21ed, /* Accuphase Laboratory */
+ QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x22d9, /* Oppo */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x23ba, /* Playback Design */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x2ab6, /* T+A devices */
QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2d87, /* Cayin device */
+ QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x3336, /* HEM devices */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x3353, /* Khadas devices */
QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x35f4, /* MSB Technology */
+ QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0x3842, /* EVGA */
QUIRK_FLAG_DSD_RAW),
VENDOR_FLG(0xc502, /* HiBy devices */
pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
- def _decode_enum(self, rsp, attr_spec):
- raw = rsp[attr_spec['name']]
+ def _decode_enum(self, raw, attr_spec):
enum = self.consts[attr_spec['enum']]
- i = attr_spec.get('value-start', 0)
if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']:
+ i = 0
value = set()
while raw:
if raw & 1:
raw >>= 1
i += 1
else:
- value = enum.entries_by_val[raw - i].name
- rsp[attr_spec['name']] = value
+ value = enum.entries_by_val[raw].name
+ return value
def _decode_binary(self, attr, attr_spec):
if attr_spec.struct_name:
decoded = attr.as_struct(members)
for m in members:
if m.enum:
- self._decode_enum(decoded, m)
+ decoded[m.name] = self._decode_enum(decoded[m.name], m)
elif attr_spec.sub_type:
decoded = attr.as_c_array(attr_spec.sub_type)
else:
else:
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
+ if 'enum' in attr_spec:
+ decoded = self._decode_enum(decoded, attr_spec)
+
if not attr_spec.is_multi:
rsp[attr_spec['name']] = decoded
elif attr_spec.name in rsp:
else:
rsp[attr_spec.name] = [decoded]
- if 'enum' in attr_spec:
- self._decode_enum(rsp, attr_spec)
return rsp
def _decode_extack_path(self, attrs, attr_set, offset, target):
#define SZ_64G (SZ_32G * 2)
#endif
-#ifndef SZ_512G
-#define SZ_512G (SZ_64G * 8)
-#endif
-
static __init int cxl_rch_init(void)
{
int rc, i;
FPROBES=yes
fi
-if [ -z "$KPROBES" -a "$FPROBES" ] ; then
+if [ -z "$KPROBES" -a -z "$FPROBES" ] ; then
exit_unsupported
fi
{
ssize_t ret;
- ret = read(stats_fd, header, sizeof(*header));
- TEST_ASSERT(ret == sizeof(*header), "Read stats header");
+ ret = pread(stats_fd, header, sizeof(*header), 0);
+ TEST_ASSERT(ret == sizeof(*header),
+ "Failed to read '%lu' header bytes, ret = '%ld'",
+ sizeof(*header), ret);
}
struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
id = malloc(header.name_size);
TEST_ASSERT(id, "Allocate memory for id string");
- ret = read(stats_fd, id, header.name_size);
- TEST_ASSERT(ret == header.name_size, "Read id string");
+ ret = pread(stats_fd, id, header.name_size, sizeof(header));
+ TEST_ASSERT(ret == header.name_size,
+ "Expected header size '%u', read '%lu' bytes",
+ header.name_size, ret);
/* Check id string, that should start with "kvm" */
TEST_ASSERT(!strncmp(id, "kvm", 3) && strlen(id) < header.name_size,
free(stats_data);
free(stats_desc);
free(id);
-}
-
-
-static void vm_stats_test(struct kvm_vm *vm)
-{
- int stats_fd = vm_get_stats_fd(vm);
-
- stats_test(stats_fd);
- close(stats_fd);
- TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed");
-}
-
-static void vcpu_stats_test(struct kvm_vcpu *vcpu)
-{
- int stats_fd = vcpu_get_stats_fd(vcpu);
- stats_test(stats_fd);
close(stats_fd);
TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed");
}
int main(int argc, char *argv[])
{
+ int vm_stats_fds, *vcpu_stats_fds;
int i, j;
struct kvm_vcpu **vcpus;
struct kvm_vm **vms;
vcpus = malloc(sizeof(struct kvm_vcpu *) * max_vm * max_vcpu);
TEST_ASSERT(vcpus, "Allocate memory for storing vCPU pointers");
+ /*
+ * Not per-VM as the array is populated, used, and invalidated within a
+ * single for-loop iteration.
+ */
+ vcpu_stats_fds = calloc(max_vm, sizeof(*vcpu_stats_fds));
+ TEST_ASSERT(vcpu_stats_fds, "Allocate memory for VM stats fds");
+
for (i = 0; i < max_vm; ++i) {
vms[i] = vm_create_barebones();
for (j = 0; j < max_vcpu; ++j)
vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j);
}
- /* Check stats read for every VM and VCPU */
+ /*
+ * Check stats read for every VM and vCPU, with a variety of flavors.
+ * Note, stats_test() closes the passed in stats fd.
+ */
for (i = 0; i < max_vm; ++i) {
- vm_stats_test(vms[i]);
+ /*
+ * Verify that creating multiple userspace references to a
+ * single stats file works and doesn't cause explosions.
+ */
+ vm_stats_fds = vm_get_stats_fd(vms[i]);
+ stats_test(dup(vm_stats_fds));
+
+ /* Verify userspace can instantiate multiple stats files. */
+ stats_test(vm_get_stats_fd(vms[i]));
+
+ for (j = 0; j < max_vcpu; ++j) {
+ vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]);
+ stats_test(dup(vcpu_stats_fds[j]));
+ stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j]));
+ }
+
+ /*
+ * Close the VM fd and redo the stats tests. KVM should gift a
+ * reference (to the VM) to each stats fd, i.e. stats should
+ * still be accessible even after userspace has put its last
+ * _direct_ reference to the VM.
+ */
+ kvm_vm_free(vms[i]);
+
+ stats_test(vm_stats_fds);
for (j = 0; j < max_vcpu; ++j)
- vcpu_stats_test(vcpus[i * max_vcpu + j]);
+ stats_test(vcpu_stats_fds[j]);
+
ksft_test_result_pass("vm%i\n", i);
}
- for (i = 0; i < max_vm; ++i)
- kvm_vm_free(vms[i]);
free(vms);
+ free(vcpus);
+ free(vcpu_stats_fds);
ksft_finished(); /* Print results and exit() accordingly */
}
#include "kvm_util.h"
#include "processor.h"
-static void test_cr4_feature_bit(struct kvm_vcpu *vcpu, struct kvm_sregs *orig,
- uint64_t feature_bit)
-{
- struct kvm_sregs sregs;
- int rc;
-
- /* Skip the sub-test, the feature is supported. */
- if (orig->cr4 & feature_bit)
- return;
-
- memcpy(&sregs, orig, sizeof(sregs));
- sregs.cr4 |= feature_bit;
-
- rc = _vcpu_sregs_set(vcpu, &sregs);
- TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit);
-
- /* Sanity check that KVM didn't change anything. */
- vcpu_sregs_get(vcpu, &sregs);
- TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs");
-}
+#define TEST_INVALID_CR_BIT(vcpu, cr, orig, bit) \
+do { \
+ struct kvm_sregs new; \
+ int rc; \
+ \
+ /* Skip the sub-test, the feature/bit is supported. */ \
+ if (orig.cr & bit) \
+ break; \
+ \
+ memcpy(&new, &orig, sizeof(sregs)); \
+ new.cr |= bit; \
+ \
+ rc = _vcpu_sregs_set(vcpu, &new); \
+ TEST_ASSERT(rc, "KVM allowed invalid " #cr " bit (0x%lx)", bit); \
+ \
+ /* Sanity check that KVM didn't change anything. */ \
+ vcpu_sregs_get(vcpu, &new); \
+ TEST_ASSERT(!memcmp(&new, &orig, sizeof(new)), "KVM modified sregs"); \
+} while (0)
static uint64_t calc_supported_cr4_feature_bits(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t cr4;
- int rc;
+ int rc, i;
/*
* Create a dummy VM, specifically to avoid doing KVM_SET_CPUID2, and
vcpu_sregs_get(vcpu, &sregs);
+ sregs.cr0 = 0;
sregs.cr4 |= calc_supported_cr4_feature_bits();
cr4 = sregs.cr4;
sregs.cr4, cr4);
/* Verify all unsupported features are rejected by KVM. */
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_UMIP);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_LA57);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_VMXE);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMXE);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_FSGSBASE);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_PCIDE);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_OSXSAVE);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMEP);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMAP);
- test_cr4_feature_bit(vcpu, &sregs, X86_CR4_PKE);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_UMIP);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_LA57);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_VMXE);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMXE);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_FSGSBASE);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PCIDE);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_OSXSAVE);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMEP);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMAP);
+ TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PKE);
+
+ for (i = 32; i < 64; i++)
+ TEST_INVALID_CR_BIT(vcpu, cr0, sregs, BIT(i));
+
+ /* NW without CD is illegal, as is PG without PE. */
+ TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_NW);
+ TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_PG);
+
kvm_vm_free(vm);
/* Create a "real" VM and verify APIC_BASE can be set. */
elif ! iptables -V &> /dev/null; then
echo "SKIP: Could not run all tests without iptables tool"
exit $ksft_skip
- fi
-
- if ! ip6tables -V &> /dev/null; then
+ elif ! ip6tables -V &> /dev/null; then
echo "SKIP: Could not run all tests without ip6tables tool"
exit $ksft_skip
fi
#include "../kselftest.h"
#include "rseq.h"
-static const ptrdiff_t *libc_rseq_offset_p;
-static const unsigned int *libc_rseq_size_p;
-static const unsigned int *libc_rseq_flags_p;
+/*
+ * Define weak versions to play nice with binaries that are statically linked
+ * against a libc that doesn't support registering its own rseq.
+ */
+__weak ptrdiff_t __rseq_offset;
+__weak unsigned int __rseq_size;
+__weak unsigned int __rseq_flags;
+
+static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
+static const unsigned int *libc_rseq_size_p = &__rseq_size;
+static const unsigned int *libc_rseq_flags_p = &__rseq_flags;
/* Offset from the thread pointer to the rseq area. */
ptrdiff_t rseq_offset;
static __attribute__((constructor))
void rseq_init(void)
{
- libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
- libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
- libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
+ /*
+ * If the libc's registered rseq size isn't already valid, it may be
+ * because the binary is dynamically linked and not necessarily due to
+ * libc not having registered a restartable sequence. Try to find the
+ * symbols if that's the case.
+ */
+ if (!*libc_rseq_size_p) {
+ libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+ libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+ libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
+ }
if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
*libc_rseq_size_p != 0) {
/* rseq registration owned by glibc */
sizeof(vcpu->stat), user_buffer, size, offset);
}
+static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
+{
+ struct kvm_vcpu *vcpu = file->private_data;
+
+ kvm_put_kvm(vcpu->kvm);
+ return 0;
+}
+
static const struct file_operations kvm_vcpu_stats_fops = {
.read = kvm_vcpu_stats_read,
+ .release = kvm_vcpu_stats_release,
.llseek = noop_llseek,
};
put_unused_fd(fd);
return PTR_ERR(file);
}
+
+ kvm_get_kvm(vcpu->kvm);
+
file->f_mode |= FMODE_PREAD;
fd_install(fd, file);
sizeof(kvm->stat), user_buffer, size, offset);
}
+static int kvm_vm_stats_release(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = file->private_data;
+
+ kvm_put_kvm(kvm);
+ return 0;
+}
+
static const struct file_operations kvm_vm_stats_fops = {
.read = kvm_vm_stats_read,
+ .release = kvm_vm_stats_release,
.llseek = noop_llseek,
};
put_unused_fd(fd);
return PTR_ERR(file);
}
+
+ kvm_get_kvm(kvm);
+
file->f_mode |= FMODE_PREAD;
fd_install(fd, file);