Merge tag 'drm-misc-next-2018-04-26' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Sun, 29 Apr 2018 23:32:34 +0000 (09:32 +1000)
committerDave Airlie <airlied@redhat.com>
Sun, 29 Apr 2018 23:32:43 +0000 (09:32 +1000)
drm-misc-next for v4.18:

UAPI Changes:
- Add support for a generic plane alpha property to sun4i, rcar-du and atmel-hclcdc. (Maxime)

Core Changes:
- Stop looking at legacy plane->fb and crtc members in atomic drivers. (Ville)
- mode_valid return type fixes. (Luc)
- Handle zpos normalization in the core. (Peter)

Driver Changes:
- Implement CTM, plane alpha and generic async cursor support in vc4. (Stefan)
- Various fixes for HPD and aux chan in drm_bridge/analogix_dp. (Lin, Zain, Douglas)
- Add support for MIPI DSI to sun4i. (Maxime)

Signed-off-by: Dave Airlie <airlied@redhat.com>
# gpg: Signature made Thu 26 Apr 2018 08:21:01 PM AEST
# gpg:                using RSA key FE558C72A67013C3
# gpg: Can't check signature: public key not found
Link: https://patchwork.freedesktop.org/patch/msgid/b33da7eb-efc9-ae6f-6f69-b7acd6df6797@mblankhorst.nl
165 files changed:
Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt [new file with mode: 0644]
Documentation/gpu/drivers.rst
Documentation/gpu/kms-properties.csv
Documentation/gpu/todo.rst
Documentation/gpu/xen-front.rst [new file with mode: 0644]
MAINTAINERS
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/adv7511/adv7511.h
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
drivers/gpu/drm/bridge/cdns-dsi.c [new file with mode: 0644]
drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/bridge/thc63lvd1024.c [new file with mode: 0644]
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_blend.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_scdc_helper.c
drivers/gpu/drm/drm_simple_kms_helper.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/gma500/cdv_intel_crt.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/mdfld_dsi_output.c
drivers/gpu/drm/gma500/oaktrail_hdmi.c
drivers/gpu/drm/gma500/psb_intel_drv.h
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/mxsfb/mxsfb_drv.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/pl111/pl111_display.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/rcar-du/rcar_du_drv.h
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_plane.h
drivers/gpu/drm/rcar-du/rcar_du_vsp.c
drivers/gpu/drm/rcar-du/rcar_du_vsp.h
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.h
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_plane.c
drivers/gpu/drm/stm/drv.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/stm/ltdc.h
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/sun4i/Makefile
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_backend.h
drivers/gpu/drm/sun4i/sun4i_layer.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tcon.h
drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c [new file with mode: 0644]
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c [new file with mode: 0644]
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h [new file with mode: 0644]
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tinydrm/core/tinydrm-core.c
drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
drivers/gpu/drm/tinydrm/ili9225.c
drivers/gpu/drm/tinydrm/mi0283qt.c
drivers/gpu/drm/tinydrm/mipi-dbi.c
drivers/gpu/drm/tinydrm/repaper.c
drivers/gpu/drm/tinydrm/st7586.c
drivers/gpu/drm/tinydrm/st7735r.c
drivers/gpu/drm/tve200/tve200_display.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_dmabuf.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_hvs.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_regs.h
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/xen/Kconfig [new file with mode: 0644]
drivers/gpu/drm/xen/Makefile [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front.h [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_cfg.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_cfg.h [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_conn.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_conn.h [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_evtchnl.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_evtchnl.h [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_gem.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_gem.h [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_kms.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_kms.h [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_shbuf.c [new file with mode: 0644]
drivers/gpu/drm/xen/xen_drm_front_shbuf.h [new file with mode: 0644]
drivers/gpu/drm/zte/zx_plane.c
drivers/gpu/drm/zte/zx_vou.c
drivers/gpu/drm/zte/zx_vou.h
drivers/staging/vboxvideo/vbox_drv.c
include/drm/bridge/analogix_dp.h
include/drm/drmP.h
include/drm/drm_blend.h
include/drm/drm_device.h
include/drm/drm_drv.h
include/drm/drm_edid.h
include/drm/drm_gem_framebuffer_helper.h
include/drm/drm_legacy.h
include/drm/drm_mode_config.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_plane.h
include/drm/drm_property.h
include/drm/drm_simple_kms_helper.h
include/drm/tinydrm/mipi-dbi.h
include/drm/tinydrm/tinydrm-helpers.h
include/drm/tinydrm/tinydrm.h
scripts/coccinelle/api/drm-get-put.cocci

index 0047b13..2c88753 100644 (file)
@@ -14,7 +14,13 @@ Required properties:
                "adi,adv7513"
                "adi,adv7533"
 
-- reg: I2C slave address
+- reg: I2C slave addresses
+  The ADV7511 internal registers are split into four pages exposed through
+  different I2C addresses, creating four register maps. Each map has it own
+  I2C address and acts as a standard slave device on the I2C bus. The main
+  address is mandatory, others are optional and revert to defaults if not
+  specified.
+
 
 The ADV7511 supports a large number of input data formats that differ by their
 color depth, color format, clock mode, bit justification and random
@@ -70,6 +76,9 @@ Optional properties:
   rather than generate its own timings for HDMI output.
 - clocks: from common clock binding: reference to the CEC clock.
 - clock-names: from common clock binding: must be "cec".
+- reg-names : Names of maps with programmable addresses.
+       It can contain any map needing a non-default address.
+       Possible maps names are : "main", "edid", "cec", "packet"
 
 Required nodes:
 
@@ -88,7 +97,12 @@ Example
 
        adv7511w: hdmi@39 {
                compatible = "adi,adv7511w";
-               reg = <39>;
+               /*
+                * The EDID page will be accessible on address 0x66 on the I2C
+                * bus. All other maps continue to use their default addresses.
+                */
+               reg = <0x39>, <0x66>;
+               reg-names = "main", "edid";
                interrupt-parent = <&gpio3>;
                interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
                clocks = <&cec_clock>;
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
new file mode 100644 (file)
index 0000000..f5725bb
--- /dev/null
@@ -0,0 +1,133 @@
+Cadence DSI bridge
+==================
+
+The Cadence DSI bridge is a DPI to DSI bridge supporting up to 4 DSI lanes.
+
+Required properties:
+- compatible: should be set to "cdns,dsi".
+- reg: physical base address and length of the controller's registers.
+- interrupts: interrupt line connected to the DSI bridge.
+- clocks: DSI bridge clocks.
+- clock-names: must contain "dsi_p_clk" and "dsi_sys_clk".
+- phys: phandle link to the MIPI D-PHY controller.
+- phy-names: must contain "dphy".
+- #address-cells: must be set to 1.
+- #size-cells: must be set to 0.
+
+Optional properties:
+- resets: DSI reset lines.
+- reset-names: can contain "dsi_p_rst".
+
+Required subnodes:
+- ports: Ports as described in Documentation/devicetree/bindings/graph.txt.
+  2 ports are available:
+  * port 0: this port is only needed if some of your DSI devices are
+           controlled through  an external bus like I2C or SPI. Can have at
+           most 4 endpoints. The endpoint number is directly encoding the
+           DSI virtual channel used by this device.
+  * port 1: represents the DPI input.
+  Other ports will be added later to support the new kind of inputs.
+
+- one subnode per DSI device connected on the DSI bus. Each DSI device should
+  contain a reg property encoding its virtual channel.
+
+Cadence DPHY
+============
+
+Cadence DPHY block.
+
+Required properties:
+- compatible: should be set to "cdns,dphy".
+- reg: physical base address and length of the DPHY registers.
+- clocks: DPHY reference clocks.
+- clock-names: must contain "psm" and "pll_ref".
+- #phy-cells: must be set to 0.
+
+
+Example:
+       dphy0: dphy@fd0e0000{
+               compatible = "cdns,dphy";
+               reg = <0x0 0xfd0e0000 0x0 0x1000>;
+               clocks = <&psm_clk>, <&pll_ref_clk>;
+               clock-names = "psm", "pll_ref";
+               #phy-cells = <0>;
+       };
+
+       dsi0: dsi@fd0c0000 {
+               compatible = "cdns,dsi";
+               reg = <0x0 0xfd0c0000 0x0 0x1000>;
+               clocks = <&pclk>, <&sysclk>;
+               clock-names = "dsi_p_clk", "dsi_sys_clk";
+               interrupts = <1>;
+               phys = <&dphy0>;
+               phy-names = "dphy";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@1 {
+                               reg = <1>;
+                               dsi0_dpi_input: endpoint {
+                                       remote-endpoint = <&xxx_dpi_output>;
+                               };
+                       };
+               };
+
+               panel: dsi-dev@0 {
+                       compatible = "<vendor,panel>";
+                       reg = <0>;
+               };
+       };
+
+or
+
+       dsi0: dsi@fd0c0000 {
+               compatible = "cdns,dsi";
+               reg = <0x0 0xfd0c0000 0x0 0x1000>;
+               clocks = <&pclk>, <&sysclk>;
+               clock-names = "dsi_p_clk", "dsi_sys_clk";
+               interrupts = <1>;
+               phys = <&dphy1>;
+               phy-names = "dphy";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               dsi0_output: endpoint@0 {
+                                       reg = <0>;
+                                       remote-endpoint = <&dsi_panel_input>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               dsi0_dpi_input: endpoint {
+                                       remote-endpoint = <&xxx_dpi_output>;
+                               };
+                       };
+               };
+       };
+
+       i2c@xxx {
+               panel: panel@59 {
+                       compatible = "<vendor,panel>";
+                       reg = <0x59>;
+
+                       port {
+                               dsi_panel_input: endpoint {
+                                       remote-endpoint = <&dsi0_output>;
+                               };
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
new file mode 100644 (file)
index 0000000..37f0c04
--- /dev/null
@@ -0,0 +1,60 @@
+Thine Electronics THC63LVD1024 LVDS decoder
+-------------------------------------------
+
+The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS streams
+to parallel data outputs. The chip supports single/dual input/output modes,
+handling up to two LVDS input streams and up to two digital CMOS/TTL outputs.
+
+Single or dual operation mode, output data mapping and DDR output modes are
+configured through input signals and the chip does not expose any control bus.
+
+Required properties:
+- compatible: Shall be "thine,thc63lvd1024"
+- vcc-supply: Power supply for TTL output, TTL CLOCKOUT signal, LVDS input,
+  PPL and digital circuitry
+
+Optional properties:
+- powerdown-gpios: Power down GPIO signal, pin name "/PDWN". Active low
+- oe-gpios: Output enable GPIO signal, pin name "OE". Active high
+
+The THC63LVD1024 video port connections are modeled according
+to OF graph bindings specified by Documentation/devicetree/bindings/graph.txt
+
+Required video port nodes:
+- port@0: First LVDS input port
+- port@2: First digital CMOS/TTL parallel output
+
+Optional video port nodes:
+- port@1: Second LVDS input port
+- port@3: Second digital CMOS/TTL parallel output
+
+Example:
+--------
+
+       thc63lvd1024: lvds-decoder {
+               compatible = "thine,thc63lvd1024";
+
+               vcc-supply = <&reg_lvds_vcc>;
+               powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
+                               lvds_dec_in_0: endpoint {
+                                       remote-endpoint = <&lvds_out>;
+                               };
+                       };
+
+                       port@2{
+                               reg = <2>;
+
+                               lvds_dec_out_2: endpoint {
+                                       remote-endpoint = <&adv7511_in>;
+                               };
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt b/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
new file mode 100644 (file)
index 0000000..6a6cf5d
--- /dev/null
@@ -0,0 +1,93 @@
+Allwinner A31 DSI Encoder
+=========================
+
+The DSI pipeline consists of two separate blocks: the DSI controller
+itself, and its associated D-PHY.
+
+DSI Encoder
+-----------
+
+The DSI Encoder generates the DSI signal from the TCON's.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun6i-a31-mipi-dsi
+  - reg: base address and size of memory-mapped region
+  - interrupts: interrupt associated to this IP
+  - clocks: phandles to the clocks feeding the DSI encoder
+    * bus: the DSI interface clock
+    * mod: the DSI module clock
+  - clock-names: the clock names mentioned above
+  - phys: phandle to the D-PHY
+  - phy-names: must be "dphy"
+  - resets: phandle to the reset controller driving the encoder
+
+  - ports: A ports node with endpoint definitions as defined in
+    Documentation/devicetree/bindings/media/video-interfaces.txt. The
+    first port should be the input endpoint, usually coming from the
+    associated TCON.
+
+Any MIPI-DSI device attached to this should be described according to
+the bindings defined in ../mipi-dsi-bus.txt
+
+D-PHY
+-----
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun6i-a31-mipi-dphy
+  - reg: base address and size of memory-mapped region
+  - clocks: phandles to the clocks feeding the DSI encoder
+    * bus: the DSI interface clock
+    * mod: the DSI module clock
+  - clock-names: the clock names mentioned above
+  - resets: phandle to the reset controller driving the encoder
+
+Example:
+
+dsi0: dsi@1ca0000 {
+       compatible = "allwinner,sun6i-a31-mipi-dsi";
+       reg = <0x01ca0000 0x1000>;
+       interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&ccu CLK_BUS_MIPI_DSI>,
+                <&ccu CLK_DSI_SCLK>;
+       clock-names = "bus", "mod";
+       resets = <&ccu RST_BUS_MIPI_DSI>;
+       phys = <&dphy0>;
+       phy-names = "dphy";
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       panel@0 {
+               compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+               reg = <0>;
+               power-gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>; /* PB07 */
+               reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+               backlight = <&pwm_bl>;
+       };
+
+       ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               port@0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0>;
+
+                       dsi0_in_tcon0: endpoint {
+                               remote-endpoint = <&tcon0_out_dsi0>;
+                       };
+               };
+       };
+};
+
+dphy0: d-phy@1ca1000 {
+       compatible = "allwinner,sun6i-a31-mipi-dphy";
+       reg = <0x01ca1000 0x1000>;
+       clocks = <&ccu CLK_BUS_MIPI_DSI>,
+                <&ccu CLK_DSI_DPHY>;
+       clock-names = "bus", "mod";
+       resets = <&ccu RST_BUS_MIPI_DSI>;
+       #phy-cells = <0>;
+};
index e8c8441..d3ab6ab 100644 (file)
@@ -12,6 +12,7 @@ GPU Driver Documentation
    tve200
    vc4
    bridge/dw-hdmi
+   xen-front
 
 .. only::  subproject and html
 
index 6b28b01..07ed22e 100644 (file)
@@ -98,5 +98,4 @@ radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
 ,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD
 ,Audio,“audio”,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
 ,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
-rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
 ,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
index f4d0b34..a7c150d 100644 (file)
@@ -212,6 +212,24 @@ probably use drm_fb_helper_fbdev_teardown().
 
 Contact: Maintainer of the driver you plan to convert
 
+Clean up mmap forwarding
+------------------------
+
+A lot of drivers forward gem mmap calls to dma-buf mmap for imported buffers.
+And also a lot of them forward dma-buf mmap to the gem mmap implementations.
+Would be great to refactor this all into a set of small common helpers.
+
+Contact: Daniel Vetter
+
+Put a reservation_object into drm_gem_object
+--------------------------------------------
+
+This would remove the need for the ->gem_prime_res_obj callback. It would also
+allow us to implement generic helpers for waiting for a bo, allowing for quite a
+bit of refactoring in the various wait ioctl implementations.
+
+Contact: Daniel Vetter
+
 idr_init_base()
 ---------------
 
diff --git a/Documentation/gpu/xen-front.rst b/Documentation/gpu/xen-front.rst
new file mode 100644 (file)
index 0000000..d988da7
--- /dev/null
@@ -0,0 +1,31 @@
+====================================================
+ drm/xen-front Xen para-virtualized frontend driver
+====================================================
+
+This frontend driver implements Xen para-virtualized display
+according to the display protocol described at
+include/xen/interface/io/displif.h
+
+Driver modes of operation in terms of display buffers used
+==========================================================
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Driver modes of operation in terms of display buffers used
+
+Buffers allocated by the frontend driver
+----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Buffers allocated by the frontend driver
+
+Buffers allocated by the backend
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Buffers allocated by the backend
+
+Driver limitations
+==================
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Driver limitations
index 79bb02f..8daa96a 100644 (file)
@@ -4830,6 +4830,15 @@ S:       Maintained
 F:     drivers/gpu/drm/tinydrm/
 F:     include/drm/tinydrm/
 
+DRM DRIVERS FOR XEN
+M:     Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+T:     git git://anongit.freedesktop.org/drm/drm-misc
+L:     dri-devel@lists.freedesktop.org
+L:     xen-devel@lists.xen.org
+S:     Supported
+F:     drivers/gpu/drm/xen/
+F:     Documentation/gpu/xen-front.rst
+
 DRM TTM SUBSYSTEM
 M:     Christian Koenig <christian.koenig@amd.com>
 M:     Roger He <Hongbo.He@amd.com>
index deeefa7..757825a 100644 (file)
@@ -289,6 +289,8 @@ source "drivers/gpu/drm/pl111/Kconfig"
 
 source "drivers/gpu/drm/tve200/Kconfig"
 
+source "drivers/gpu/drm/xen/Kconfig"
+
 # Keep legacy drivers last
 
 menuconfig DRM_LEGACY
index 50093ff..9d66657 100644 (file)
@@ -103,3 +103,4 @@ obj-$(CONFIG_DRM_MXSFB)     += mxsfb/
 obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
 obj-$(CONFIG_DRM_PL111) += pl111/
 obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_XEN) += xen/
index 831b733..036dff8 100644 (file)
@@ -799,7 +799,7 @@ static int ast_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static int ast_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
                          struct drm_display_mode *mode)
 {
        struct ast_private *ast = connector->dev->dev_private;
index ab32d5b..60c937f 100644 (file)
@@ -299,7 +299,6 @@ struct atmel_hlcdc_layer {
 struct atmel_hlcdc_plane {
        struct drm_plane base;
        struct atmel_hlcdc_layer layer;
-       struct atmel_hlcdc_plane_properties *properties;
 };
 
 static inline struct atmel_hlcdc_plane *
@@ -346,18 +345,6 @@ struct atmel_hlcdc_dc_desc {
 };
 
 /**
- * Atmel HLCDC Plane properties.
- *
- * This structure stores plane property definitions.
- *
- * @alpha: alpha blending (or transparency) property
- * @rotation: rotation property
- */
-struct atmel_hlcdc_plane_properties {
-       struct drm_property *alpha;
-};
-
-/**
  * Atmel HLCDC Display Controller.
  *
  * @desc: HLCDC Display Controller description
index e18800e..73c875d 100644 (file)
@@ -31,7 +31,6 @@
  * @src_y: y buffer position
  * @src_w: buffer width
  * @src_h: buffer height
- * @alpha: alpha blending of the plane
  * @disc_x: x discard position
  * @disc_y: y discard position
  * @disc_w: discard width
@@ -54,8 +53,6 @@ struct atmel_hlcdc_plane_state {
        uint32_t src_w;
        uint32_t src_h;
 
-       u8 alpha;
-
        int disc_x;
        int disc_y;
        int disc_w;
@@ -385,7 +382,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
                        cfg |= ATMEL_HLCDC_LAYER_LAEN;
                else
                        cfg |= ATMEL_HLCDC_LAYER_GAEN |
-                              ATMEL_HLCDC_LAYER_GA(state->alpha);
+                              ATMEL_HLCDC_LAYER_GA(state->base.alpha >> 8);
        }
 
        if (state->disc_h && state->disc_w)
@@ -553,7 +550,7 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
 
                if (!ovl_s->fb ||
                    ovl_s->fb->format->has_alpha ||
-                   ovl_state->alpha != 255)
+                   ovl_s->alpha != DRM_BLEND_ALPHA_OPAQUE)
                        continue;
 
                /* TODO: implement a smarter hidden area detection */
@@ -829,51 +826,18 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
        drm_plane_cleanup(p);
 }
 
-static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p,
-                                                struct drm_plane_state *s,
-                                                struct drm_property *property,
-                                                uint64_t val)
-{
-       struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-       struct atmel_hlcdc_plane_properties *props = plane->properties;
-       struct atmel_hlcdc_plane_state *state =
-                       drm_plane_state_to_atmel_hlcdc_plane_state(s);
-
-       if (property == props->alpha)
-               state->alpha = val;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
-                                       const struct drm_plane_state *s,
-                                       struct drm_property *property,
-                                       uint64_t *val)
-{
-       struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-       struct atmel_hlcdc_plane_properties *props = plane->properties;
-       const struct atmel_hlcdc_plane_state *state =
-               container_of(s, const struct atmel_hlcdc_plane_state, base);
-
-       if (property == props->alpha)
-               *val = state->alpha;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
-                               struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
 {
        const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
 
        if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER ||
-           desc->type == ATMEL_HLCDC_CURSOR_LAYER)
-               drm_object_attach_property(&plane->base.base,
-                                          props->alpha, 255);
+           desc->type == ATMEL_HLCDC_CURSOR_LAYER) {
+               int ret;
+
+               ret = drm_plane_create_alpha_property(&plane->base);
+               if (ret)
+                       return ret;
+       }
 
        if (desc->layout.xstride && desc->layout.pstride) {
                int ret;
@@ -988,8 +952,8 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
                        return;
                }
 
-               state->alpha = 255;
                p->state = &state->base;
+               p->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
                p->state->plane = p;
        }
 }
@@ -1042,13 +1006,10 @@ static const struct drm_plane_funcs layer_plane_funcs = {
        .reset = atmel_hlcdc_plane_reset,
        .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
        .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
-       .atomic_set_property = atmel_hlcdc_plane_atomic_set_property,
-       .atomic_get_property = atmel_hlcdc_plane_atomic_get_property,
 };
 
 static int atmel_hlcdc_plane_create(struct drm_device *dev,
-                                   const struct atmel_hlcdc_layer_desc *desc,
-                                   struct atmel_hlcdc_plane_properties *props)
+                                   const struct atmel_hlcdc_layer_desc *desc)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
        struct atmel_hlcdc_plane *plane;
@@ -1060,7 +1021,6 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
                return -ENOMEM;
 
        atmel_hlcdc_layer_init(&plane->layer, desc, dc->hlcdc->regmap);
-       plane->properties = props;
 
        if (desc->type == ATMEL_HLCDC_BASE_LAYER)
                type = DRM_PLANE_TYPE_PRIMARY;
@@ -1081,7 +1041,7 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
                             &atmel_hlcdc_layer_plane_helper_funcs);
 
        /* Set default property values*/
-       ret = atmel_hlcdc_plane_init_properties(plane, props);
+       ret = atmel_hlcdc_plane_init_properties(plane);
        if (ret)
                return ret;
 
@@ -1090,34 +1050,13 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
        return 0;
 }
 
-static struct atmel_hlcdc_plane_properties *
-atmel_hlcdc_plane_create_properties(struct drm_device *dev)
-{
-       struct atmel_hlcdc_plane_properties *props;
-
-       props = devm_kzalloc(dev->dev, sizeof(*props), GFP_KERNEL);
-       if (!props)
-               return ERR_PTR(-ENOMEM);
-
-       props->alpha = drm_property_create_range(dev, 0, "alpha", 0, 255);
-       if (!props->alpha)
-               return ERR_PTR(-ENOMEM);
-
-       return props;
-}
-
 int atmel_hlcdc_create_planes(struct drm_device *dev)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
-       struct atmel_hlcdc_plane_properties *props;
        const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers;
        int nlayers = dc->desc->nlayers;
        int i, ret;
 
-       props = atmel_hlcdc_plane_create_properties(dev);
-       if (IS_ERR(props))
-               return PTR_ERR(props);
-
        dc->dscrpool = dmam_pool_create("atmel-hlcdc-dscr", dev->dev,
                                sizeof(struct atmel_hlcdc_dma_channel_dscr),
                                sizeof(u64), 0);
@@ -1130,7 +1069,7 @@ int atmel_hlcdc_create_planes(struct drm_device *dev)
                    descs[i].type != ATMEL_HLCDC_CURSOR_LAYER)
                        continue;
 
-               ret = atmel_hlcdc_plane_create(dev, &descs[i], props);
+               ret = atmel_hlcdc_plane_create(dev, &descs[i]);
                if (ret)
                        return ret;
        }
index a24a18f..233980a 100644 (file)
@@ -188,7 +188,7 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
        return count;
 }
 
-static int bochs_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
                                      struct drm_display_mode *mode)
 {
        struct bochs_device *bochs =
index 3aa65bd..1d75d3a 100644 (file)
@@ -25,6 +25,16 @@ config DRM_ANALOGIX_ANX78XX
          the HDMI output of an application processor to MyDP
          or DisplayPort.
 
+config DRM_CDNS_DSI
+       tristate "Cadence DPI/DSI bridge"
+       select DRM_KMS_HELPER
+       select DRM_MIPI_DSI
+       select DRM_PANEL_BRIDGE
+       depends on OF
+       help
+         Support Cadence DPI to DSI bridge. This is an internal
+         bridge and is meant to be directly embedded in a SoC.
+
 config DRM_DUMB_VGA_DAC
        tristate "Dumb VGA DAC Bridge support"
        depends on OF
@@ -93,6 +103,12 @@ config DRM_SII9234
          It is an I2C driver, that detects connection of MHL bridge
          and starts encapsulation of HDMI signal.
 
+config DRM_THINE_THC63LVD1024
+       tristate "Thine THC63LVD1024 LVDS decoder bridge"
+       depends on OF
+       ---help---
+         Thine THC63LVD1024 LVDS/parallel converter driver.
+
 config DRM_TOSHIBA_TC358767
        tristate "Toshiba TC358767 eDP bridge"
        depends on OF
index 373eb28..35f88d4 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
+obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
 obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
 obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
 obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
 obj-$(CONFIG_DRM_SII902X) += sii902x.o
 obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
index d034b2c..73d8ccb 100644 (file)
 #define ADV7511_REG_CHIP_ID_HIGH               0xf5
 #define ADV7511_REG_CHIP_ID_LOW                        0xf6
 
+/* Hardware defined default addresses for I2C register maps */
+#define ADV7511_CEC_I2C_ADDR_DEFAULT           0x3c
+#define ADV7511_EDID_I2C_ADDR_DEFAULT          0x3f
+#define ADV7511_PACKET_I2C_ADDR_DEFAULT                0x38
+
 #define ADV7511_CSC_ENABLE                     BIT(7)
 #define ADV7511_CSC_UPDATE_MODE                        BIT(5)
 
@@ -321,6 +326,7 @@ enum adv7511_type {
 struct adv7511 {
        struct i2c_client *i2c_main;
        struct i2c_client *i2c_edid;
+       struct i2c_client *i2c_packet;
        struct i2c_client *i2c_cec;
 
        struct regmap *regmap;
index efa29db..2614cea 100644 (file)
@@ -586,7 +586,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
        /* Reading the EDID only works if the device is powered */
        if (!adv7511->powered) {
                unsigned int edid_i2c_addr =
-                                       (adv7511->i2c_main->addr << 1) + 4;
+                                       (adv7511->i2c_edid->addr << 1);
 
                __adv7511_power_on(adv7511);
 
@@ -654,7 +654,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
        return status;
 }
 
-static int adv7511_mode_valid(struct adv7511 *adv7511,
+static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
                              struct drm_display_mode *mode)
 {
        if (mode->clock > 165000)
@@ -969,10 +969,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
 {
        int ret;
 
-       adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
-                                    adv->i2c_main->addr - 1);
+       adv->i2c_cec = i2c_new_secondary_device(adv->i2c_main, "cec",
+                                               ADV7511_CEC_I2C_ADDR_DEFAULT);
        if (!adv->i2c_cec)
-               return -ENOMEM;
+               return -EINVAL;
        i2c_set_clientdata(adv->i2c_cec, adv);
 
        adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1082,8 +1082,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        struct adv7511_link_config link_config;
        struct adv7511 *adv7511;
        struct device *dev = &i2c->dev;
-       unsigned int main_i2c_addr = i2c->addr << 1;
-       unsigned int edid_i2c_addr = main_i2c_addr + 4;
        unsigned int val;
        int ret;
 
@@ -1153,23 +1151,34 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
        if (ret)
                goto uninit_regulators;
 
-       regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
-       regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-                    main_i2c_addr - 0xa);
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
-                    main_i2c_addr - 2);
-
        adv7511_packet_disable(adv7511, 0xffff);
 
-       adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+       adv7511->i2c_edid = i2c_new_secondary_device(i2c, "edid",
+                                       ADV7511_EDID_I2C_ADDR_DEFAULT);
        if (!adv7511->i2c_edid) {
-               ret = -ENOMEM;
+               ret = -EINVAL;
                goto uninit_regulators;
        }
 
+       regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+                    adv7511->i2c_edid->addr << 1);
+
+       adv7511->i2c_packet = i2c_new_secondary_device(i2c, "packet",
+                                       ADV7511_PACKET_I2C_ADDR_DEFAULT);
+       if (!adv7511->i2c_packet) {
+               ret = -EINVAL;
+               goto err_i2c_unregister_edid;
+       }
+
+       regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+                    adv7511->i2c_packet->addr << 1);
+
        ret = adv7511_init_cec_regmap(adv7511);
        if (ret)
-               goto err_i2c_unregister_edid;
+               goto err_i2c_unregister_packet;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+                    adv7511->i2c_cec->addr << 1);
 
        INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
 
@@ -1207,6 +1216,8 @@ err_unregister_cec:
        i2c_unregister_device(adv7511->i2c_cec);
        if (adv7511->cec_clk)
                clk_disable_unprepare(adv7511->cec_clk);
+err_i2c_unregister_packet:
+       i2c_unregister_device(adv7511->i2c_packet);
 err_i2c_unregister_edid:
        i2c_unregister_device(adv7511->i2c_edid);
 uninit_regulators:
@@ -1233,6 +1244,7 @@ static int adv7511_remove(struct i2c_client *i2c)
 
        cec_unregister_adapter(adv7511->cec_adap);
 
+       i2c_unregister_device(adv7511->i2c_packet);
        i2c_unregister_device(adv7511->i2c_edid);
 
        return 0;
index 5c52307..2bcbfad 100644 (file)
@@ -43,8 +43,10 @@ struct bridge_init {
        struct device_node *node;
 };
 
-static void analogix_dp_init_dp(struct analogix_dp_device *dp)
+static int analogix_dp_init_dp(struct analogix_dp_device *dp)
 {
+       int ret;
+
        analogix_dp_reset(dp);
 
        analogix_dp_swreset(dp);
@@ -56,10 +58,13 @@ static void analogix_dp_init_dp(struct analogix_dp_device *dp)
        analogix_dp_enable_sw_function(dp);
 
        analogix_dp_config_interrupt(dp);
-       analogix_dp_init_analog_func(dp);
+       ret = analogix_dp_init_analog_func(dp);
+       if (ret)
+               return ret;
 
        analogix_dp_init_hpd(dp);
        analogix_dp_init_aux(dp);
+       return 0;
 }
 
 static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
@@ -71,7 +76,7 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
                        return 0;
 
                timeout_loop++;
-               usleep_range(10, 11);
+               usleep_range(1000, 1100);
        }
 
        /*
@@ -148,87 +153,146 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
        psr_vsc.DB1 = 0;
 
        ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
-       if (ret != 1)
+       if (ret != 1) {
                dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
+               return ret;
+       }
 
        return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
 }
 EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
 
-static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
 {
        unsigned char psr_version;
+       int ret;
+
+       ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
+       if (ret != 1) {
+               dev_err(dp->dev, "failed to get PSR version, disable it\n");
+               return ret;
+       }
 
-       drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
        dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
 
-       return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+       dp->psr_enable = (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+
+       return 0;
 }
 
-static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
+static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
 {
        unsigned char psr_en;
+       int ret;
 
        /* Disable psr function */
-       drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+       ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+       if (ret != 1) {
+               dev_err(dp->dev, "failed to get psr config\n");
+               goto end;
+       }
+
        psr_en &= ~DP_PSR_ENABLE;
-       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+       ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+       if (ret != 1) {
+               dev_err(dp->dev, "failed to disable panel psr\n");
+               goto end;
+       }
 
        /* Main-Link transmitter remains active during PSR active states */
        psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
-       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+       ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+       if (ret != 1) {
+               dev_err(dp->dev, "failed to set panel psr\n");
+               goto end;
+       }
 
        /* Enable psr function */
        psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
                 DP_PSR_CRC_VERIFICATION;
-       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+       ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+       if (ret != 1) {
+               dev_err(dp->dev, "failed to set panel psr\n");
+               goto end;
+       }
 
        analogix_dp_enable_psr_crc(dp);
+
+       return 0;
+end:
+       dev_err(dp->dev, "enable psr fail, force to disable psr\n");
+       dp->psr_enable = false;
+
+       return ret;
 }
 
-static void
+static int
 analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
                                       bool enable)
 {
        u8 data;
+       int ret;
 
-       drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
+       ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
+       if (ret != 1)
+               return ret;
 
        if (enable)
-               drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
-                                  DP_LANE_COUNT_ENHANCED_FRAME_EN |
-                                       DPCD_LANE_COUNT_SET(data));
+               ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+                                        DP_LANE_COUNT_ENHANCED_FRAME_EN |
+                                        DPCD_LANE_COUNT_SET(data));
        else
-               drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
-                                  DPCD_LANE_COUNT_SET(data));
+               ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+                                        DPCD_LANE_COUNT_SET(data));
+
+       return ret < 0 ? ret : 0;
 }
 
-static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
+static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp,
+                                                 u8 *enhanced_mode_support)
 {
        u8 data;
-       int retval;
+       int ret;
 
-       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
-       retval = DPCD_ENHANCED_FRAME_CAP(data);
+       ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
+       if (ret != 1) {
+               *enhanced_mode_support = 0;
+               return ret;
+       }
 
-       return retval;
+       *enhanced_mode_support = DPCD_ENHANCED_FRAME_CAP(data);
+
+       return 0;
 }
 
-static void analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
+static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
 {
        u8 data;
+       int ret;
+
+       ret = analogix_dp_is_enhanced_mode_available(dp, &data);
+       if (ret < 0)
+               return ret;
+
+       ret = analogix_dp_enable_rx_to_enhanced_mode(dp, data);
+       if (ret < 0)
+               return ret;
 
-       data = analogix_dp_is_enhanced_mode_available(dp);
-       analogix_dp_enable_rx_to_enhanced_mode(dp, data);
        analogix_dp_enable_enhanced_mode(dp, data);
+
+       return 0;
 }
 
-static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
+static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
 {
+       int ret;
+
        analogix_dp_set_training_pattern(dp, DP_NONE);
 
-       drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
-                          DP_TRAINING_PATTERN_DISABLE);
+       ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                DP_TRAINING_PATTERN_DISABLE);
+
+       return ret < 0 ? ret : 0;
 }
 
 static void
@@ -276,6 +340,12 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
        retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
        if (retval < 0)
                return retval;
+       /* set enhanced mode if available */
+       retval = analogix_dp_set_enhanced_mode(dp);
+       if (retval < 0) {
+               dev_err(dp->dev, "failed to set enhance mode\n");
+               return retval;
+       }
 
        /* Set TX pre-emphasis to minimum */
        for (lane = 0; lane < lane_count; lane++)
@@ -531,7 +601,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 {
        int lane, lane_count, retval;
        u32 reg;
-       u8 link_align, link_status[2], adjust_request[2], spread;
+       u8 link_align, link_status[2], adjust_request[2];
 
        usleep_range(400, 401);
 
@@ -560,10 +630,11 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 
        if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) {
                /* traing pattern Set to Normal */
-               analogix_dp_training_pattern_dis(dp);
+               retval = analogix_dp_training_pattern_dis(dp);
+               if (retval < 0)
+                       return retval;
 
                dev_info(dp->dev, "Link Training success!\n");
-
                analogix_dp_get_link_bandwidth(dp, &reg);
                dp->link_train.link_rate = reg;
                dev_dbg(dp->dev, "final bandwidth = %.2x\n",
@@ -574,22 +645,6 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
                dev_dbg(dp->dev, "final lane count = %.2x\n",
                        dp->link_train.lane_count);
 
-               retval = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD,
-                                          &spread);
-               if (retval != 1) {
-                       dev_err(dp->dev, "failed to read downspread %d\n",
-                               retval);
-                       dp->fast_train_support = false;
-               } else {
-                       dp->fast_train_support =
-                               (spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING) ?
-                                       true : false;
-               }
-               dev_dbg(dp->dev, "fast link training %s\n",
-                       dp->fast_train_support ? "supported" : "unsupported");
-
-               /* set enhanced mode if available */
-               analogix_dp_set_enhanced_mode(dp);
                dp->link_train.lt_state = FINISHED;
 
                return 0;
@@ -793,7 +848,7 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
 
 static int analogix_dp_train_link(struct analogix_dp_device *dp)
 {
-       if (dp->fast_train_support)
+       if (dp->fast_train_enable)
                return analogix_dp_fast_link_train(dp);
 
        return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count,
@@ -819,11 +874,10 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
                if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0)
                        break;
                if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
-                       dev_err(dp->dev, "Timeout of video streamclk ok\n");
+                       dev_err(dp->dev, "Timeout of slave video streamclk ok\n");
                        return -ETIMEDOUT;
                }
-
-               usleep_range(1, 2);
+               usleep_range(1000, 1001);
        }
 
        /* Set to use the register calculated M/N video */
@@ -838,6 +892,9 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
        /* Configure video slave mode */
        analogix_dp_enable_video_master(dp, 0);
 
+       /* Enable video */
+       analogix_dp_start_video(dp);
+
        timeout_loop = 0;
 
        for (;;) {
@@ -850,8 +907,9 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
                        done_count = 0;
                }
                if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
-                       dev_err(dp->dev, "Timeout of video streamclk ok\n");
-                       return -ETIMEDOUT;
+                       dev_warn(dp->dev,
+                                "Ignoring timeout of video streamclk ok\n");
+                       break;
                }
 
                usleep_range(1000, 1001);
@@ -860,24 +918,32 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
        return 0;
 }
 
-static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
-                                       bool enable)
+static int analogix_dp_enable_scramble(struct analogix_dp_device *dp,
+                                      bool enable)
 {
        u8 data;
+       int ret;
 
        if (enable) {
                analogix_dp_enable_scrambling(dp);
 
-               drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
-               drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+               ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                       &data);
+               if (ret != 1)
+                       return ret;
+               ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
                                   (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
        } else {
                analogix_dp_disable_scrambling(dp);
 
-               drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
-               drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+               ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                       &data);
+               if (ret != 1)
+                       return ret;
+               ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
                                   (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
        }
+       return ret < 0 ? ret : 0;
 }
 
 static irqreturn_t analogix_dp_hardirq(int irq, void *arg)
@@ -916,7 +982,23 @@ static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static void analogix_dp_commit(struct analogix_dp_device *dp)
+static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp)
+{
+       int ret;
+       u8 spread;
+
+       ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &spread);
+       if (ret != 1) {
+               dev_err(dp->dev, "failed to read downspread %d\n", ret);
+               return ret;
+       }
+       dp->fast_train_enable = !!(spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+       dev_dbg(dp->dev, "fast link training %s\n",
+               dp->fast_train_enable ? "supported" : "unsupported");
+       return 0;
+}
+
+static int analogix_dp_commit(struct analogix_dp_device *dp)
 {
        int ret;
 
@@ -926,34 +1008,50 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
                        DRM_ERROR("failed to disable the panel\n");
        }
 
-       ret = readx_poll_timeout(analogix_dp_train_link, dp, ret, !ret, 100,
-                                DP_TIMEOUT_TRAINING_US * 5);
+       ret = analogix_dp_train_link(dp);
        if (ret) {
                dev_err(dp->dev, "unable to do link train, ret=%d\n", ret);
-               return;
+               return ret;
        }
 
-       analogix_dp_enable_scramble(dp, 1);
-       analogix_dp_enable_rx_to_enhanced_mode(dp, 1);
-       analogix_dp_enable_enhanced_mode(dp, 1);
+       ret = analogix_dp_enable_scramble(dp, 1);
+       if (ret < 0) {
+               dev_err(dp->dev, "can not enable scramble\n");
+               return ret;
+       }
 
        analogix_dp_init_video(dp);
        ret = analogix_dp_config_video(dp);
-       if (ret)
+       if (ret) {
                dev_err(dp->dev, "unable to config video\n");
+               return ret;
+       }
 
        /* Safe to enable the panel now */
        if (dp->plat_data->panel) {
-               if (drm_panel_enable(dp->plat_data->panel))
+               ret = drm_panel_enable(dp->plat_data->panel);
+               if (ret) {
                        DRM_ERROR("failed to enable the panel\n");
+                       return ret;
+               }
        }
 
-       /* Enable video */
-       analogix_dp_start_video(dp);
+       ret = analogix_dp_detect_sink_psr(dp);
+       if (ret)
+               return ret;
 
-       dp->psr_enable = analogix_dp_detect_sink_psr(dp);
-       if (dp->psr_enable)
-               analogix_dp_enable_sink_psr(dp);
+       if (dp->psr_enable) {
+               ret = analogix_dp_enable_sink_psr(dp);
+               if (ret)
+                       return ret;
+       }
+
+       /* Check whether panel supports fast training */
+       ret =  analogix_dp_fast_link_train_detection(dp);
+       if (ret)
+               dp->psr_enable = false;
+
+       return ret;
 }
 
 /*
@@ -1150,24 +1248,80 @@ static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
                DRM_ERROR("failed to setup the panel ret = %d\n", ret);
 }
 
-static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 {
-       struct analogix_dp_device *dp = bridge->driver_private;
-
-       if (dp->dpms_mode == DRM_MODE_DPMS_ON)
-               return;
+       int ret;
 
        pm_runtime_get_sync(dp->dev);
 
-       if (dp->plat_data->power_on)
-               dp->plat_data->power_on(dp->plat_data);
+       ret = clk_prepare_enable(dp->clock);
+       if (ret < 0) {
+               DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
+               goto out_dp_clk_pre;
+       }
+
+       if (dp->plat_data->power_on_start)
+               dp->plat_data->power_on_start(dp->plat_data);
 
        phy_power_on(dp->phy);
-       analogix_dp_init_dp(dp);
+
+       ret = analogix_dp_init_dp(dp);
+       if (ret)
+               goto out_dp_init;
+
+       /*
+        * According to DP spec v1.3 chap 3.5.1.2 Link Training,
+        * We should first make sure the HPD signal is asserted high by device
+        * when we want to establish a link with it.
+        */
+       ret = analogix_dp_detect_hpd(dp);
+       if (ret) {
+               DRM_ERROR("failed to get hpd single ret = %d\n", ret);
+               goto out_dp_init;
+       }
+
+       ret = analogix_dp_commit(dp);
+       if (ret) {
+               DRM_ERROR("dp commit error, ret = %d\n", ret);
+               goto out_dp_init;
+       }
+
+       if (dp->plat_data->power_on_end)
+               dp->plat_data->power_on_end(dp->plat_data);
+
        enable_irq(dp->irq);
-       analogix_dp_commit(dp);
+       return 0;
 
-       dp->dpms_mode = DRM_MODE_DPMS_ON;
+out_dp_init:
+       phy_power_off(dp->phy);
+       if (dp->plat_data->power_off)
+               dp->plat_data->power_off(dp->plat_data);
+       clk_disable_unprepare(dp->clock);
+out_dp_clk_pre:
+       pm_runtime_put_sync(dp->dev);
+
+       return ret;
+}
+
+static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+{
+       struct analogix_dp_device *dp = bridge->driver_private;
+       int timeout_loop = 0;
+
+       if (dp->dpms_mode == DRM_MODE_DPMS_ON)
+               return;
+
+       while (timeout_loop < MAX_PLL_LOCK_LOOP) {
+               if (analogix_dp_set_bridge(dp) == 0) {
+                       dp->dpms_mode = DRM_MODE_DPMS_ON;
+                       return;
+               }
+               dev_err(dp->dev, "failed to set bridge, retry: %d\n",
+                       timeout_loop);
+               timeout_loop++;
+               usleep_range(10, 11);
+       }
+       dev_err(dp->dev, "too many times retry set bridge, give it up\n");
 }
 
 static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
@@ -1186,11 +1340,15 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
        }
 
        disable_irq(dp->irq);
-       phy_power_off(dp->phy);
 
        if (dp->plat_data->power_off)
                dp->plat_data->power_off(dp->plat_data);
 
+       analogix_dp_set_analog_power_down(dp, POWER_ALL, 1);
+       phy_power_off(dp->phy);
+
+       clk_disable_unprepare(dp->clock);
+
        pm_runtime_put_sync(dp->dev);
 
        ret = analogix_dp_prepare_panel(dp, false, true);
@@ -1198,6 +1356,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
                DRM_ERROR("failed to setup the panel ret = %d\n", ret);
 
        dp->psr_enable = false;
+       dp->fast_train_enable = false;
        dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
index 6a96ef7..769255d 100644 (file)
@@ -19,6 +19,7 @@
 #define DP_TIMEOUT_LOOP_COUNT 100
 #define MAX_CR_LOOP 5
 #define MAX_EQ_LOOP 5
+#define MAX_PLL_LOCK_LOOP 5
 
 /* Training takes 22ms if AUX channel comm fails. Use this as retry interval */
 #define DP_TIMEOUT_TRAINING_US                 22000
@@ -173,7 +174,7 @@ struct analogix_dp_device {
        int                     hpd_gpio;
        bool                    force_hpd;
        bool                    psr_enable;
-       bool                    fast_train_support;
+       bool                    fast_train_enable;
 
        struct mutex            panel_lock;
        bool                    panel_is_modeset;
@@ -197,7 +198,7 @@ void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable);
 void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
                                       enum analog_power_block block,
                                       bool enable);
-void analogix_dp_init_analog_func(struct analogix_dp_device *dp);
+int analogix_dp_init_analog_func(struct analogix_dp_device *dp);
 void analogix_dp_init_hpd(struct analogix_dp_device *dp);
 void analogix_dp_force_hpd(struct analogix_dp_device *dp);
 enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp);
index 9df2f3e..a5f2763 100644 (file)
@@ -126,9 +126,14 @@ void analogix_dp_reset(struct analogix_dp_device *dp)
        analogix_dp_stop_video(dp);
        analogix_dp_enable_video_mute(dp, 0);
 
-       reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
-               AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
-               HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+               reg = RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N |
+                       SW_FUNC_EN_N;
+       else
+               reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
+                       AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
+                       HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+
        writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
 
        reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
@@ -230,16 +235,20 @@ enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp)
 void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable)
 {
        u32 reg;
+       u32 mask = DP_PLL_PD;
+       u32 pd_addr = ANALOGIX_DP_PLL_CTL;
 
-       if (enable) {
-               reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
-               reg |= DP_PLL_PD;
-               writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
-       } else {
-               reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
-               reg &= ~DP_PLL_PD;
-               writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+               pd_addr = ANALOGIX_DP_PD;
+               mask = RK_PLL_PD;
        }
+
+       reg = readl(dp->reg_base + pd_addr);
+       if (enable)
+               reg |= mask;
+       else
+               reg &= ~mask;
+       writel(reg, dp->reg_base + pd_addr);
 }
 
 void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
@@ -248,83 +257,98 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
 {
        u32 reg;
        u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
+       u32 mask;
 
        if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
                phy_pd_addr = ANALOGIX_DP_PD;
 
        switch (block) {
        case AUX_BLOCK:
-               if (enable) {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg |= AUX_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               } else {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg &= ~AUX_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               }
+               if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+                       mask = RK_AUX_PD;
+               else
+                       mask = AUX_PD;
+
+               reg = readl(dp->reg_base + phy_pd_addr);
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               writel(reg, dp->reg_base + phy_pd_addr);
                break;
        case CH0_BLOCK:
-               if (enable) {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg |= CH0_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               } else {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg &= ~CH0_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               }
+               mask = CH0_PD;
+               reg = readl(dp->reg_base + phy_pd_addr);
+
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               writel(reg, dp->reg_base + phy_pd_addr);
                break;
        case CH1_BLOCK:
-               if (enable) {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg |= CH1_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               } else {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg &= ~CH1_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               }
+               mask = CH1_PD;
+               reg = readl(dp->reg_base + phy_pd_addr);
+
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               writel(reg, dp->reg_base + phy_pd_addr);
                break;
        case CH2_BLOCK:
-               if (enable) {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg |= CH2_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               } else {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg &= ~CH2_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               }
+               mask = CH2_PD;
+               reg = readl(dp->reg_base + phy_pd_addr);
+
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               writel(reg, dp->reg_base + phy_pd_addr);
                break;
        case CH3_BLOCK:
-               if (enable) {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg |= CH3_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               } else {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg &= ~CH3_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               }
+               mask = CH3_PD;
+               reg = readl(dp->reg_base + phy_pd_addr);
+
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               writel(reg, dp->reg_base + phy_pd_addr);
                break;
        case ANALOG_TOTAL:
-               if (enable) {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg |= DP_PHY_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               } else {
-                       reg = readl(dp->reg_base + phy_pd_addr);
-                       reg &= ~DP_PHY_PD;
-                       writel(reg, dp->reg_base + phy_pd_addr);
-               }
+               /*
+                * There is no bit named DP_PHY_PD, so We used DP_INC_BG
+                * to power off everything instead of DP_PHY_PD in
+                * Rockchip
+                */
+               if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+                       mask = DP_INC_BG;
+               else
+                       mask = DP_PHY_PD;
+
+               reg = readl(dp->reg_base + phy_pd_addr);
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+
+               writel(reg, dp->reg_base + phy_pd_addr);
+               if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+                       usleep_range(10, 15);
                break;
        case POWER_ALL:
                if (enable) {
-                       reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
-                               CH1_PD | CH0_PD;
+                       reg = DP_ALL_PD;
                        writel(reg, dp->reg_base + phy_pd_addr);
                } else {
+                       reg = DP_ALL_PD;
+                       writel(reg, dp->reg_base + phy_pd_addr);
+                       usleep_range(10, 15);
+                       reg &= ~DP_INC_BG;
+                       writel(reg, dp->reg_base + phy_pd_addr);
+                       usleep_range(10, 15);
+
                        writel(0x00, dp->reg_base + phy_pd_addr);
                }
                break;
@@ -333,7 +357,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
        }
 }
 
-void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
+int analogix_dp_init_analog_func(struct analogix_dp_device *dp)
 {
        u32 reg;
        int timeout_loop = 0;
@@ -355,7 +379,7 @@ void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
                        timeout_loop++;
                        if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
                                dev_err(dp->dev, "failed to get pll lock status\n");
-                               return;
+                               return -ETIMEDOUT;
                        }
                        usleep_range(10, 20);
                }
@@ -366,6 +390,7 @@ void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
        reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
                | AUX_FUNC_EN_N);
        writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+       return 0;
 }
 
 void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)
@@ -450,17 +475,22 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
        reg = RPLY_RECEIV | AUX_ERR;
        writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
 
+       analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true);
+       usleep_range(10, 11);
+       analogix_dp_set_analog_power_down(dp, AUX_BLOCK, false);
+
        analogix_dp_reset_aux(dp);
 
-       /* Disable AUX transaction H/W retry */
+       /* AUX_BIT_PERIOD_EXPECTED_DELAY doesn't apply to Rockchip IP */
        if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
-               reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
-                     AUX_HW_RETRY_COUNT_SEL(3) |
-                     AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+               reg = 0;
        else
-               reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) |
-                     AUX_HW_RETRY_COUNT_SEL(0) |
-                     AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+               reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3);
+
+       /* Disable AUX transaction H/W retry */
+       reg |= AUX_HW_RETRY_COUNT_SEL(0) |
+              AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+
        writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL);
 
        /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
@@ -947,8 +977,12 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp)
        u32 reg;
 
        reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
-       reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
-       reg |= MASTER_VID_FUNC_EN_N;
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+               reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N);
+       } else {
+               reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
+               reg |= MASTER_VID_FUNC_EN_N;
+       }
        writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
 
        reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
@@ -1072,10 +1106,11 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
                             struct drm_dp_aux_msg *msg)
 {
        u32 reg;
+       u32 status_reg;
        u8 *buffer = msg->buffer;
-       int timeout_loop = 0;
        unsigned int i;
        int num_transferred = 0;
+       int ret;
 
        /* Buffer size of AUX CH is 16 bytes */
        if (WARN_ON(msg->size > 16))
@@ -1139,17 +1174,20 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 
        writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
 
-       /* Is AUX CH command reply received? */
+       ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2,
+                                reg, !(reg & AUX_EN), 25, 500 * 1000);
+       if (ret) {
+               dev_err(dp->dev, "AUX CH enable timeout!\n");
+               goto aux_error;
+       }
+
        /* TODO: Wait for an interrupt instead of looping? */
-       reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
-       while (!(reg & RPLY_RECEIV)) {
-               timeout_loop++;
-               if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
-                       dev_err(dp->dev, "AUX CH command reply failed!\n");
-                       return -ETIMEDOUT;
-               }
-               reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
-               usleep_range(10, 11);
+       /* Is AUX CH command reply received? */
+       ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_INT_STA,
+                                reg, reg & RPLY_RECEIV, 10, 20 * 1000);
+       if (ret) {
+               dev_err(dp->dev, "AUX CH cmd reply timeout!\n");
+               goto aux_error;
        }
 
        /* Clear interrupt source for AUX CH command reply */
@@ -1157,17 +1195,13 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 
        /* Clear interrupt source for AUX CH access error */
        reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
-       if (reg & AUX_ERR) {
+       status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+       if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) {
                writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
-               return -EREMOTEIO;
-       }
 
-       /* Check AUX CH error access status */
-       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
-       if ((reg & AUX_STATUS_MASK)) {
-               dev_err(dp->dev, "AUX CH error happened: %d\n\n",
-                       reg & AUX_STATUS_MASK);
-               return -EREMOTEIO;
+               dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n",
+                        status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR));
+               goto aux_error;
        }
 
        if (msg->request & DP_AUX_I2C_READ) {
@@ -1193,4 +1227,10 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
                msg->reply = DP_AUX_NATIVE_REPLY_ACK;
 
        return num_transferred > 0 ? num_transferred : -EBUSY;
+
+aux_error:
+       /* if aux err happen, reset aux */
+       analogix_dp_init_aux(dp);
+
+       return -EREMOTEIO;
 }
index 40200c6..0cf27c7 100644 (file)
 
 /* ANALOGIX_DP_FUNC_EN_1 */
 #define MASTER_VID_FUNC_EN_N                   (0x1 << 7)
+#define RK_VID_CAP_FUNC_EN_N                   (0x1 << 6)
 #define SLAVE_VID_FUNC_EN_N                    (0x1 << 5)
+#define RK_VID_FIFO_FUNC_EN_N                  (0x1 << 5)
 #define AUD_FIFO_FUNC_EN_N                     (0x1 << 4)
 #define AUD_FUNC_EN_N                          (0x1 << 3)
 #define HDCP_FUNC_EN_N                         (0x1 << 2)
 #define DP_PLL_REF_BIT_1_2500V                 (0x7 << 0)
 
 /* ANALOGIX_DP_PHY_PD */
+#define DP_INC_BG                              (0x1 << 7)
+#define DP_EXP_BG                              (0x1 << 6)
 #define DP_PHY_PD                              (0x1 << 5)
+#define RK_AUX_PD                              (0x1 << 5)
 #define AUX_PD                                 (0x1 << 4)
+#define RK_PLL_PD                              (0x1 << 4)
 #define CH3_PD                                 (0x1 << 3)
 #define CH2_PD                                 (0x1 << 2)
 #define CH1_PD                                 (0x1 << 1)
 #define CH0_PD                                 (0x1 << 0)
+#define DP_ALL_PD                              (0xff)
 
 /* ANALOGIX_DP_PHY_TEST */
 #define MACRO_RST                              (0x1 << 5)
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
new file mode 100644 (file)
index 0000000..c255fc3
--- /dev/null
@@ -0,0 +1,1623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright: 2017 Cadence Design Systems, Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#define IP_CONF                                0x0
+#define SP_HS_FIFO_DEPTH(x)            (((x) & GENMASK(30, 26)) >> 26)
+#define SP_LP_FIFO_DEPTH(x)            (((x) & GENMASK(25, 21)) >> 21)
+#define VRS_FIFO_DEPTH(x)              (((x) & GENMASK(20, 16)) >> 16)
+#define DIRCMD_FIFO_DEPTH(x)           (((x) & GENMASK(15, 13)) >> 13)
+#define SDI_IFACE_32                   BIT(12)
+#define INTERNAL_DATAPATH_32           (0 << 10)
+#define INTERNAL_DATAPATH_16           (1 << 10)
+#define INTERNAL_DATAPATH_8            (3 << 10)
+#define INTERNAL_DATAPATH_SIZE         ((x) & GENMASK(11, 10))
+#define NUM_IFACE(x)                   ((((x) & GENMASK(9, 8)) >> 8) + 1)
+#define MAX_LANE_NB(x)                 (((x) & GENMASK(7, 6)) >> 6)
+#define RX_FIFO_DEPTH(x)               ((x) & GENMASK(5, 0))
+
+#define MCTL_MAIN_DATA_CTL             0x4
+#define TE_MIPI_POLLING_EN             BIT(25)
+#define TE_HW_POLLING_EN               BIT(24)
+#define DISP_EOT_GEN                   BIT(18)
+#define HOST_EOT_GEN                   BIT(17)
+#define DISP_GEN_CHECKSUM              BIT(16)
+#define DISP_GEN_ECC                   BIT(15)
+#define BTA_EN                         BIT(14)
+#define READ_EN                                BIT(13)
+#define REG_TE_EN                      BIT(12)
+#define IF_TE_EN(x)                    BIT(8 + (x))
+#define TVG_SEL                                BIT(6)
+#define VID_EN                         BIT(5)
+#define IF_VID_SELECT(x)               ((x) << 2)
+#define IF_VID_SELECT_MASK             GENMASK(3, 2)
+#define IF_VID_MODE                    BIT(1)
+#define LINK_EN                                BIT(0)
+
+#define MCTL_MAIN_PHY_CTL              0x8
+#define HS_INVERT_DAT(x)               BIT(19 + ((x) * 2))
+#define SWAP_PINS_DAT(x)               BIT(18 + ((x) * 2))
+#define HS_INVERT_CLK                  BIT(17)
+#define SWAP_PINS_CLK                  BIT(16)
+#define HS_SKEWCAL_EN                  BIT(15)
+#define WAIT_BURST_TIME(x)             ((x) << 10)
+#define DATA_ULPM_EN(x)                        BIT(6 + (x))
+#define CLK_ULPM_EN                    BIT(5)
+#define CLK_CONTINUOUS                 BIT(4)
+#define DATA_LANE_EN(x)                        BIT((x) - 1)
+
+#define MCTL_MAIN_EN                   0xc
+#define DATA_FORCE_STOP                        BIT(17)
+#define CLK_FORCE_STOP                 BIT(16)
+#define IF_EN(x)                       BIT(13 + (x))
+#define DATA_LANE_ULPM_REQ(l)          BIT(9 + (l))
+#define CLK_LANE_ULPM_REQ              BIT(8)
+#define DATA_LANE_START(x)             BIT(4 + (x))
+#define CLK_LANE_EN                    BIT(3)
+#define PLL_START                      BIT(0)
+
+#define MCTL_DPHY_CFG0                 0x10
+#define DPHY_C_RSTB                    BIT(20)
+#define DPHY_D_RSTB(x)                 GENMASK(15 + (x), 16)
+#define DPHY_PLL_PDN                   BIT(10)
+#define DPHY_CMN_PDN                   BIT(9)
+#define DPHY_C_PDN                     BIT(8)
+#define DPHY_D_PDN(x)                  GENMASK(3 + (x), 4)
+#define DPHY_ALL_D_PDN                 GENMASK(7, 4)
+#define DPHY_PLL_PSO                   BIT(1)
+#define DPHY_CMN_PSO                   BIT(0)
+
+#define MCTL_DPHY_TIMEOUT1             0x14
+#define HSTX_TIMEOUT(x)                        ((x) << 4)
+#define HSTX_TIMEOUT_MAX               GENMASK(17, 0)
+#define CLK_DIV(x)                     (x)
+#define CLK_DIV_MAX                    GENMASK(3, 0)
+
+#define MCTL_DPHY_TIMEOUT2             0x18
+#define LPRX_TIMEOUT(x)                        (x)
+
+#define MCTL_ULPOUT_TIME               0x1c
+#define DATA_LANE_ULPOUT_TIME(x)       ((x) << 9)
+#define CLK_LANE_ULPOUT_TIME(x)                (x)
+
+#define MCTL_3DVIDEO_CTL               0x20
+#define VID_VSYNC_3D_EN                        BIT(7)
+#define VID_VSYNC_3D_LR                        BIT(5)
+#define VID_VSYNC_3D_SECOND_EN         BIT(4)
+#define VID_VSYNC_3DFORMAT_LINE                (0 << 2)
+#define VID_VSYNC_3DFORMAT_FRAME       (1 << 2)
+#define VID_VSYNC_3DFORMAT_PIXEL       (2 << 2)
+#define VID_VSYNC_3DMODE_OFF           0
+#define VID_VSYNC_3DMODE_PORTRAIT      1
+#define VID_VSYNC_3DMODE_LANDSCAPE     2
+
+#define MCTL_MAIN_STS                  0x24
+#define MCTL_MAIN_STS_CTL              0x130
+#define MCTL_MAIN_STS_CLR              0x150
+#define MCTL_MAIN_STS_FLAG             0x170
+#define HS_SKEWCAL_DONE                        BIT(11)
+#define IF_UNTERM_PKT_ERR(x)           BIT(8 + (x))
+#define LPRX_TIMEOUT_ERR               BIT(7)
+#define HSTX_TIMEOUT_ERR               BIT(6)
+#define DATA_LANE_RDY(l)               BIT(2 + (l))
+#define CLK_LANE_RDY                   BIT(1)
+#define PLL_LOCKED                     BIT(0)
+
+#define MCTL_DPHY_ERR                  0x28
+#define MCTL_DPHY_ERR_CTL1             0x148
+#define MCTL_DPHY_ERR_CLR              0x168
+#define MCTL_DPHY_ERR_FLAG             0x188
+#define ERR_CONT_LP(x, l)              BIT(18 + ((x) * 4) + (l))
+#define ERR_CONTROL(l)                 BIT(14 + (l))
+#define ERR_SYNESC(l)                  BIT(10 + (l))
+#define ERR_ESC(l)                     BIT(6 + (l))
+
+#define MCTL_DPHY_ERR_CTL2             0x14c
+#define ERR_CONT_LP_EDGE(x, l)         BIT(12 + ((x) * 4) + (l))
+#define ERR_CONTROL_EDGE(l)            BIT(8 + (l))
+#define ERR_SYN_ESC_EDGE(l)            BIT(4 + (l))
+#define ERR_ESC_EDGE(l)                        BIT(0 + (l))
+
+#define MCTL_LANE_STS                  0x2c
+#define PPI_C_TX_READY_HS              BIT(18)
+#define DPHY_PLL_LOCK                  BIT(17)
+#define PPI_D_RX_ULPS_ESC(x)           (((x) & GENMASK(15, 12)) >> 12)
+#define LANE_STATE_START               0
+#define LANE_STATE_IDLE                        1
+#define LANE_STATE_WRITE               2
+#define LANE_STATE_ULPM                        3
+#define LANE_STATE_READ                        4
+#define DATA_LANE_STATE(l, val)                \
+       (((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0))
+#define CLK_LANE_STATE_HS              2
+#define CLK_LANE_STATE(val)            ((val) & GENMASK(1, 0))
+
+#define DSC_MODE_CTL                   0x30
+#define DSC_MODE_EN                    BIT(0)
+
+#define DSC_CMD_SEND                   0x34
+#define DSC_SEND_PPS                   BIT(0)
+#define DSC_EXECUTE_QUEUE              BIT(1)
+
+#define DSC_PPS_WRDAT                  0x38
+
+#define DSC_MODE_STS                   0x3c
+#define DSC_PPS_DONE                   BIT(1)
+#define DSC_EXEC_DONE                  BIT(2)
+
+#define CMD_MODE_CTL                   0x70
+#define IF_LP_EN(x)                    BIT(9 + (x))
+#define IF_VCHAN_ID(x, c)              ((c) << ((x) * 2))
+
+#define CMD_MODE_CTL2                  0x74
+#define TE_TIMEOUT(x)                  ((x) << 11)
+#define FILL_VALUE(x)                  ((x) << 3)
+#define ARB_IF_WITH_HIGHEST_PRIORITY(x)        ((x) << 1)
+#define ARB_ROUND_ROBIN_MODE           BIT(0)
+
+#define CMD_MODE_STS                   0x78
+#define CMD_MODE_STS_CTL               0x134
+#define CMD_MODE_STS_CLR               0x154
+#define CMD_MODE_STS_FLAG              0x174
+#define ERR_IF_UNDERRUN(x)             BIT(4 + (x))
+#define ERR_UNWANTED_READ              BIT(3)
+#define ERR_TE_MISS                    BIT(2)
+#define ERR_NO_TE                      BIT(1)
+#define CSM_RUNNING                    BIT(0)
+
+#define DIRECT_CMD_SEND                        0x80
+
+#define DIRECT_CMD_MAIN_SETTINGS       0x84
+#define TRIGGER_VAL(x)                 ((x) << 25)
+#define CMD_LP_EN                      BIT(24)
+#define CMD_SIZE(x)                    ((x) << 16)
+#define CMD_VCHAN_ID(x)                        ((x) << 14)
+#define CMD_DATATYPE(x)                        ((x) << 8)
+#define CMD_LONG                       BIT(3)
+#define WRITE_CMD                      0
+#define READ_CMD                       1
+#define TE_REQ                         4
+#define TRIGGER_REQ                    5
+#define BTA_REQ                                6
+
+#define DIRECT_CMD_STS                 0x88
+#define DIRECT_CMD_STS_CTL             0x138
+#define DIRECT_CMD_STS_CLR             0x158
+#define DIRECT_CMD_STS_FLAG            0x178
+#define RCVD_ACK_VAL(val)              ((val) >> 16)
+#define RCVD_TRIGGER_VAL(val)          (((val) & GENMASK(14, 11)) >> 11)
+#define READ_COMPLETED_WITH_ERR                BIT(10)
+#define BTA_FINISHED                   BIT(9)
+#define BTA_COMPLETED                  BIT(8)
+#define TE_RCVD                                BIT(7)
+#define TRIGGER_RCVD                   BIT(6)
+#define ACK_WITH_ERR_RCVD              BIT(5)
+#define ACK_RCVD                       BIT(4)
+#define READ_COMPLETED                 BIT(3)
+#define TRIGGER_COMPLETED              BIT(2)
+#define WRITE_COMPLETED                        BIT(1)
+#define SENDING_CMD                    BIT(0)
+
+#define DIRECT_CMD_STOP_READ           0x8c
+
+#define DIRECT_CMD_WRDATA              0x90
+
+#define DIRECT_CMD_FIFO_RST            0x94
+
+#define DIRECT_CMD_RDDATA              0xa0
+
+#define DIRECT_CMD_RD_PROPS            0xa4
+#define RD_DCS                         BIT(18)
+#define RD_VCHAN_ID(val)               (((val) >> 16) & GENMASK(1, 0))
+#define RD_SIZE(val)                   ((val) & GENMASK(15, 0))
+
+#define DIRECT_CMD_RD_STS              0xa8
+#define DIRECT_CMD_RD_STS_CTL          0x13c
+#define DIRECT_CMD_RD_STS_CLR          0x15c
+#define DIRECT_CMD_RD_STS_FLAG         0x17c
+#define ERR_EOT_WITH_ERR               BIT(8)
+#define ERR_MISSING_EOT                        BIT(7)
+#define ERR_WRONG_LENGTH               BIT(6)
+#define ERR_OVERSIZE                   BIT(5)
+#define ERR_RECEIVE                    BIT(4)
+#define ERR_UNDECODABLE                        BIT(3)
+#define ERR_CHECKSUM                   BIT(2)
+#define ERR_UNCORRECTABLE              BIT(1)
+#define ERR_FIXED                      BIT(0)
+
+#define VID_MAIN_CTL                   0xb0
+#define VID_IGNORE_MISS_VSYNC          BIT(31)
+#define VID_FIELD_SW                   BIT(28)
+#define VID_INTERLACED_EN              BIT(27)
+#define RECOVERY_MODE(x)               ((x) << 25)
+#define RECOVERY_MODE_NEXT_HSYNC       0
+#define RECOVERY_MODE_NEXT_STOP_POINT  2
+#define RECOVERY_MODE_NEXT_VSYNC       3
+#define REG_BLKEOL_MODE(x)             ((x) << 23)
+#define REG_BLKLINE_MODE(x)            ((x) << 21)
+#define REG_BLK_MODE_NULL_PKT          0
+#define REG_BLK_MODE_BLANKING_PKT      1
+#define REG_BLK_MODE_LP                        2
+#define SYNC_PULSE_HORIZONTAL          BIT(20)
+#define SYNC_PULSE_ACTIVE              BIT(19)
+#define BURST_MODE                     BIT(18)
+#define VID_PIXEL_MODE_MASK            GENMASK(17, 14)
+#define VID_PIXEL_MODE_RGB565          (0 << 14)
+#define VID_PIXEL_MODE_RGB666_PACKED   (1 << 14)
+#define VID_PIXEL_MODE_RGB666          (2 << 14)
+#define VID_PIXEL_MODE_RGB888          (3 << 14)
+#define VID_PIXEL_MODE_RGB101010       (4 << 14)
+#define VID_PIXEL_MODE_RGB121212       (5 << 14)
+#define VID_PIXEL_MODE_YUV420          (8 << 14)
+#define VID_PIXEL_MODE_YUV422_PACKED   (9 << 14)
+#define VID_PIXEL_MODE_YUV422          (10 << 14)
+#define VID_PIXEL_MODE_YUV422_24B      (11 << 14)
+#define VID_PIXEL_MODE_DSC_COMP                (12 << 14)
+#define VID_DATATYPE(x)                        ((x) << 8)
+#define VID_VIRTCHAN_ID(iface, x)      ((x) << (4 + (iface) * 2))
+#define STOP_MODE(x)                   ((x) << 2)
+#define START_MODE(x)                  (x)
+
+#define VID_VSIZE1                     0xb4
+#define VFP_LEN(x)                     ((x) << 12)
+#define VBP_LEN(x)                     ((x) << 6)
+#define VSA_LEN(x)                     (x)
+
+#define VID_VSIZE2                     0xb8
+#define VACT_LEN(x)                    (x)
+
+#define VID_HSIZE1                     0xc0
+#define HBP_LEN(x)                     ((x) << 16)
+#define HSA_LEN(x)                     (x)
+
+#define VID_HSIZE2                     0xc4
+#define HFP_LEN(x)                     ((x) << 16)
+#define HACT_LEN(x)                    (x)
+
+#define VID_BLKSIZE1                   0xcc
+#define BLK_EOL_PKT_LEN(x)             ((x) << 15)
+#define BLK_LINE_EVENT_PKT_LEN(x)      (x)
+
+#define VID_BLKSIZE2                   0xd0
+#define BLK_LINE_PULSE_PKT_LEN(x)      (x)
+
+#define VID_PKT_TIME                   0xd8
+#define BLK_EOL_DURATION(x)            (x)
+
+#define VID_DPHY_TIME                  0xdc
+#define REG_WAKEUP_TIME(x)             ((x) << 17)
+#define REG_LINE_DURATION(x)           (x)
+
+#define VID_ERR_COLOR1                 0xe0
+#define COL_GREEN(x)                   ((x) << 12)
+#define COL_RED(x)                     (x)
+
+#define VID_ERR_COLOR2                 0xe4
+#define PAD_VAL(x)                     ((x) << 12)
+#define COL_BLUE(x)                    (x)
+
+#define VID_VPOS                       0xe8
+#define LINE_VAL(val)                  (((val) & GENMASK(14, 2)) >> 2)
+#define LINE_POS(val)                  ((val) & GENMASK(1, 0))
+
+#define VID_HPOS                       0xec
+#define HORIZ_VAL(val)                 (((val) & GENMASK(17, 3)) >> 3)
+#define HORIZ_POS(val)                 ((val) & GENMASK(2, 0))
+
+#define VID_MODE_STS                   0xf0
+#define VID_MODE_STS_CTL               0x140
+#define VID_MODE_STS_CLR               0x160
+#define VID_MODE_STS_FLAG              0x180
+#define VSG_RECOVERY                   BIT(10)
+#define ERR_VRS_WRONG_LEN              BIT(9)
+#define ERR_LONG_READ                  BIT(8)
+#define ERR_LINE_WRITE                 BIT(7)
+#define ERR_BURST_WRITE                        BIT(6)
+#define ERR_SMALL_HEIGHT               BIT(5)
+#define ERR_SMALL_LEN                  BIT(4)
+#define ERR_MISSING_VSYNC              BIT(3)
+#define ERR_MISSING_HSYNC              BIT(2)
+#define ERR_MISSING_DATA               BIT(1)
+#define VSG_RUNNING                    BIT(0)
+
+#define VID_VCA_SETTING1               0xf4
+#define BURST_LP                       BIT(16)
+#define MAX_BURST_LIMIT(x)             (x)
+
+#define VID_VCA_SETTING2               0xf8
+#define MAX_LINE_LIMIT(x)              ((x) << 16)
+#define EXACT_BURST_LIMIT(x)           (x)
+
+#define TVG_CTL                                0xfc
+#define TVG_STRIPE_SIZE(x)             ((x) << 5)
+#define TVG_MODE_MASK                  GENMASK(4, 3)
+#define TVG_MODE_SINGLE_COLOR          (0 << 3)
+#define TVG_MODE_VSTRIPES              (2 << 3)
+#define TVG_MODE_HSTRIPES              (3 << 3)
+#define TVG_STOPMODE_MASK              GENMASK(2, 1)
+#define TVG_STOPMODE_EOF               (0 << 1)
+#define TVG_STOPMODE_EOL               (1 << 1)
+#define TVG_STOPMODE_NOW               (2 << 1)
+#define TVG_RUN                                BIT(0)
+
+#define TVG_IMG_SIZE                   0x100
+#define TVG_NBLINES(x)                 ((x) << 16)
+#define TVG_LINE_SIZE(x)               (x)
+
+#define TVG_COLOR1                     0x104
+#define TVG_COL1_GREEN(x)              ((x) << 12)
+#define TVG_COL1_RED(x)                        (x)
+
+#define TVG_COLOR1_BIS                 0x108
+#define TVG_COL1_BLUE(x)               (x)
+
+#define TVG_COLOR2                     0x10c
+#define TVG_COL2_GREEN(x)              ((x) << 12)
+#define TVG_COL2_RED(x)                        (x)
+
+#define TVG_COLOR2_BIS                 0x110
+#define TVG_COL2_BLUE(x)               (x)
+
+#define TVG_STS                                0x114
+#define TVG_STS_CTL                    0x144
+#define TVG_STS_CLR                    0x164
+#define TVG_STS_FLAG                   0x184
+#define TVG_STS_RUNNING                        BIT(0)
+
+#define STS_CTL_EDGE(e)                        ((e) << 16)
+
+#define DPHY_LANES_MAP                 0x198
+#define DAT_REMAP_CFG(b, l)            ((l) << ((b) * 8))
+
+#define DPI_IRQ_EN                     0x1a0
+#define DPI_IRQ_CLR                    0x1a4
+#define DPI_IRQ_STS                    0x1a8
+#define PIXEL_BUF_OVERFLOW             BIT(0)
+
+#define DPI_CFG                                0x1ac
+#define DPI_CFG_FIFO_DEPTH(x)          ((x) >> 16)
+#define DPI_CFG_FIFO_LEVEL(x)          ((x) & GENMASK(15, 0))
+
+#define TEST_GENERIC                   0x1f0
+#define TEST_STATUS(x)                 ((x) >> 16)
+#define TEST_CTRL(x)                   (x)
+
+#define ID_REG                         0x1fc
+#define REV_VENDOR_ID(x)               (((x) & GENMASK(31, 20)) >> 20)
+#define REV_PRODUCT_ID(x)              (((x) & GENMASK(19, 12)) >> 12)
+#define REV_HW(x)                      (((x) & GENMASK(11, 8)) >> 8)
+#define REV_MAJOR(x)                   (((x) & GENMASK(7, 4)) >> 4)
+#define REV_MINOR(x)                   ((x) & GENMASK(3, 0))
+
+#define DSI_OUTPUT_PORT                        0
+#define DSI_INPUT_PORT(inputid)                (1 + (inputid))
+
+#define DSI_HBP_FRAME_OVERHEAD         12
+#define DSI_HSA_FRAME_OVERHEAD         14
+#define DSI_HFP_FRAME_OVERHEAD         6
+#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
+#define DSI_BLANKING_FRAME_OVERHEAD    6
+#define DSI_NULL_FRAME_OVERHEAD                6
+#define DSI_EOT_PKT_SIZE               4
+
+#define REG_WAKEUP_TIME_NS             800
+#define DPHY_PLL_RATE_HZ               108000000
+
+/* DPHY registers */
+#define DPHY_PMA_CMN(reg)              (reg)
+#define DPHY_PMA_LCLK(reg)             (0x100 + (reg))
+#define DPHY_PMA_LDATA(lane, reg)      (0x200 + ((lane) * 0x100) + (reg))
+#define DPHY_PMA_RCLK(reg)             (0x600 + (reg))
+#define DPHY_PMA_RDATA(lane, reg)      (0x700 + ((lane) * 0x100) + (reg))
+#define DPHY_PCS(reg)                  (0xb00 + (reg))
+
+#define DPHY_CMN_SSM                   DPHY_PMA_CMN(0x20)
+#define DPHY_CMN_SSM_EN                        BIT(0)
+#define DPHY_CMN_TX_MODE_EN            BIT(9)
+
+#define DPHY_CMN_PWM                   DPHY_PMA_CMN(0x40)
+#define DPHY_CMN_PWM_DIV(x)            ((x) << 20)
+#define DPHY_CMN_PWM_LOW(x)            ((x) << 10)
+#define DPHY_CMN_PWM_HIGH(x)           (x)
+
+#define DPHY_CMN_FBDIV                 DPHY_PMA_CMN(0x4c)
+#define DPHY_CMN_FBDIV_VAL(low, high)  (((high) << 11) | ((low) << 22))
+#define DPHY_CMN_FBDIV_FROM_REG                (BIT(10) | BIT(21))
+
+#define DPHY_CMN_OPIPDIV               DPHY_PMA_CMN(0x50)
+#define DPHY_CMN_IPDIV_FROM_REG                BIT(0)
+#define DPHY_CMN_IPDIV(x)              ((x) << 1)
+#define DPHY_CMN_OPDIV_FROM_REG                BIT(6)
+#define DPHY_CMN_OPDIV(x)              ((x) << 7)
+
+#define DPHY_PSM_CFG                   DPHY_PCS(0x4)
+#define DPHY_PSM_CFG_FROM_REG          BIT(0)
+#define DPHY_PSM_CLK_DIV(x)            ((x) << 1)
+
+struct cdns_dsi_output {
+       struct mipi_dsi_device *dev;
+       struct drm_panel *panel;
+       struct drm_bridge *bridge;
+};
+
+enum cdns_dsi_input_id {
+       CDNS_SDI_INPUT,
+       CDNS_DPI_INPUT,
+       CDNS_DSC_INPUT,
+};
+
+struct cdns_dphy_cfg {
+       u8 pll_ipdiv;
+       u8 pll_opdiv;
+       u16 pll_fbdiv;
+       unsigned long lane_bps;
+       unsigned int nlanes;
+};
+
+struct cdns_dsi_cfg {
+       unsigned int hfp;
+       unsigned int hsa;
+       unsigned int hbp;
+       unsigned int hact;
+       unsigned int htotal;
+};
+
+struct cdns_dphy;
+
+enum cdns_dphy_clk_lane_cfg {
+       DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
+       DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
+       DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
+       DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
+};
+
+struct cdns_dphy_ops {
+       int (*probe)(struct cdns_dphy *dphy);
+       void (*remove)(struct cdns_dphy *dphy);
+       void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
+       void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
+                                enum cdns_dphy_clk_lane_cfg cfg);
+       void (*set_pll_cfg)(struct cdns_dphy *dphy,
+                           const struct cdns_dphy_cfg *cfg);
+       unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+};
+
+struct cdns_dphy {
+       struct cdns_dphy_cfg cfg;
+       void __iomem *regs;
+       struct clk *psm_clk;
+       struct clk *pll_ref_clk;
+       const struct cdns_dphy_ops *ops;
+};
+
+struct cdns_dsi_input {
+       enum cdns_dsi_input_id id;
+       struct drm_bridge bridge;
+};
+
+struct cdns_dsi {
+       struct mipi_dsi_host base;
+       void __iomem *regs;
+       struct cdns_dsi_input input;
+       struct cdns_dsi_output output;
+       unsigned int direct_cmd_fifo_depth;
+       unsigned int rx_fifo_depth;
+       struct completion direct_cmd_comp;
+       struct clk *dsi_p_clk;
+       struct reset_control *dsi_p_rst;
+       struct clk *dsi_sys_clk;
+       bool link_initialized;
+       struct cdns_dphy *dphy;
+};
+
+static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
+{
+       return container_of(input, struct cdns_dsi, input);
+}
+
+static inline struct cdns_dsi *to_cdns_dsi(struct mipi_dsi_host *host)
+{
+       return container_of(host, struct cdns_dsi, base);
+}
+
+static inline struct cdns_dsi_input *
+bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct cdns_dsi_input, bridge);
+}
+
+static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
+                                    struct cdns_dphy_cfg *cfg,
+                                    unsigned int dpi_htotal,
+                                    unsigned int dpi_bpp,
+                                    unsigned int dpi_hz,
+                                    unsigned int dsi_htotal,
+                                    unsigned int dsi_nlanes,
+                                    unsigned int *dsi_hfp_ext)
+{
+       u64 dlane_bps, dlane_bps_max, fbdiv, fbdiv_max, adj_dsi_htotal;
+       unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
+
+       memset(cfg, 0, sizeof(*cfg));
+
+       cfg->nlanes = dsi_nlanes;
+
+       if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
+               return -EINVAL;
+       else if (pll_ref_hz < 19200000)
+               cfg->pll_ipdiv = 1;
+       else if (pll_ref_hz < 38400000)
+               cfg->pll_ipdiv = 2;
+       else if (pll_ref_hz < 76800000)
+               cfg->pll_ipdiv = 4;
+       else
+               cfg->pll_ipdiv = 8;
+
+       /*
+        * Make sure DSI htotal is aligned on a lane boundary when calculating
+        * the expected data rate. This is done by extending HFP in case of
+        * misalignment.
+        */
+       adj_dsi_htotal = dsi_htotal;
+       if (dsi_htotal % dsi_nlanes)
+               adj_dsi_htotal += dsi_nlanes - (dsi_htotal % dsi_nlanes);
+
+       dlane_bps = (u64)dpi_hz * adj_dsi_htotal;
+
+       /* data rate in bytes/sec is not an integer, refuse the mode. */
+       if (do_div(dlane_bps, dsi_nlanes * dpi_htotal))
+               return -EINVAL;
+
+       /* data rate was in bytes/sec, convert to bits/sec. */
+       dlane_bps *= 8;
+
+       if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
+               return -EINVAL;
+       else if (dlane_bps >= 1250000000)
+               cfg->pll_opdiv = 1;
+       else if (dlane_bps >= 630000000)
+               cfg->pll_opdiv = 2;
+       else if (dlane_bps >= 320000000)
+               cfg->pll_opdiv = 4;
+       else if (dlane_bps >= 160000000)
+               cfg->pll_opdiv = 8;
+
+       /*
+        * Allow a deviation of 0.2% on the per-lane data rate to try to
+        * recover a potential mismatch between DPI and PPI clks.
+        */
+       dlane_bps_max = dlane_bps + DIV_ROUND_DOWN_ULL(dlane_bps, 500);
+       fbdiv_max = DIV_ROUND_DOWN_ULL(dlane_bps_max * 2 *
+                                      cfg->pll_opdiv * cfg->pll_ipdiv,
+                                      pll_ref_hz);
+       fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
+                                cfg->pll_ipdiv,
+                                pll_ref_hz);
+
+       /*
+        * Iterate over all acceptable fbdiv and try to find an adjusted DSI
+        * htotal length providing an exact match.
+        *
+        * Note that we could do something even trickier by relying on the fact
+        * that a new line is not necessarily aligned on a lane boundary, so,
+        * by making adj_dsi_htotal non aligned on a dsi_lanes we can improve a
+        * bit the precision. With this, the step would be
+        *
+        *      pll_ref_hz / (2 * opdiv * ipdiv * nlanes)
+        *
+        * instead of
+        *
+        *      pll_ref_hz / (2 * opdiv * ipdiv)
+        *
+        * The drawback of this approach is that we would need to make sure the
+        * number or lines is a multiple of the realignment periodicity which is
+        * a function of the number of lanes and the original misalignment. For
+        * example, for NLANES = 4 and HTOTAL % NLANES = 3, it takes 4 lines
+        * to realign on a lane:
+        * LINE 0: expected number of bytes, starts emitting first byte of
+        *         LINE 1 on LANE 3
+        * LINE 1: expected number of bytes, starts emitting first 2 bytes of
+        *         LINE 2 on LANES 2 and 3
+        * LINE 2: expected number of bytes, starts emitting first 3 bytes of
+        *         of LINE 3 on LANES 1, 2 and 3
+        * LINE 3: one byte less, now things are realigned on LANE 0 for LINE 4
+        *
+        * I figured this extra complexity was not worth the benefit, but if
+        * someone really has unfixable mismatch, that would be something to
+        * investigate.
+        */
+       for (; fbdiv <= fbdiv_max; fbdiv++) {
+               u32 rem;
+
+               adj_dsi_htotal = (u64)fbdiv * pll_ref_hz * dsi_nlanes *
+                                dpi_htotal;
+
+               /*
+                * Do the division in 2 steps to avoid an overflow on the
+                * divider.
+                */
+               rem = do_div(adj_dsi_htotal, dpi_hz);
+               if (rem)
+                       continue;
+
+               rem = do_div(adj_dsi_htotal,
+                            cfg->pll_opdiv * cfg->pll_ipdiv * 2 * 8);
+               if (rem)
+                       continue;
+
+               cfg->pll_fbdiv = fbdiv;
+               *dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
+               break;
+       }
+
+       /* No match, let's just reject the display mode. */
+       if (!cfg->pll_fbdiv)
+               return -EINVAL;
+
+       dlane_bps = DIV_ROUND_DOWN_ULL((u64)dpi_hz * adj_dsi_htotal * 8,
+                                      dsi_nlanes * dpi_htotal);
+       cfg->lane_bps = dlane_bps;
+
+       return 0;
+}
+
+static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
+{
+       unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
+       unsigned long psm_div;
+
+       if (!psm_clk_hz || psm_clk_hz > 100000000)
+               return -EINVAL;
+
+       psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
+       if (dphy->ops->set_psm_div)
+               dphy->ops->set_psm_div(dphy, psm_div);
+
+       return 0;
+}
+
+static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
+                                      enum cdns_dphy_clk_lane_cfg cfg)
+{
+       if (dphy->ops->set_clk_lane_cfg)
+               dphy->ops->set_clk_lane_cfg(dphy, cfg);
+}
+
+static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
+                                 const struct cdns_dphy_cfg *cfg)
+{
+       if (dphy->ops->set_pll_cfg)
+               dphy->ops->set_pll_cfg(dphy, cfg);
+}
+
+static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+       return dphy->ops->get_wakeup_time_ns(dphy);
+}
+
+static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
+                                     unsigned int dpi_bpp,
+                                     unsigned int dsi_pkt_overhead)
+{
+       unsigned int dsi_timing = DIV_ROUND_UP(dpi_timing * dpi_bpp, 8);
+
+       if (dsi_timing < dsi_pkt_overhead)
+               dsi_timing = 0;
+       else
+               dsi_timing -= dsi_pkt_overhead;
+
+       return dsi_timing;
+}
+
+static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
+                            const struct drm_display_mode *mode,
+                            struct cdns_dsi_cfg *dsi_cfg,
+                            struct cdns_dphy_cfg *dphy_cfg,
+                            bool mode_valid_check)
+{
+       unsigned long dsi_htotal = 0, dsi_hss_hsa_hse_hbp = 0;
+       struct cdns_dsi_output *output = &dsi->output;
+       unsigned int dsi_hfp_ext = 0, dpi_hfp, tmp;
+       bool sync_pulse = false;
+       int bpp, nlanes, ret;
+
+       memset(dsi_cfg, 0, sizeof(*dsi_cfg));
+
+       if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+               sync_pulse = true;
+
+       bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+       nlanes = output->dev->lanes;
+
+       if (mode_valid_check)
+               tmp = mode->htotal -
+                     (sync_pulse ? mode->hsync_end : mode->hsync_start);
+       else
+               tmp = mode->crtc_htotal -
+                     (sync_pulse ?
+                      mode->crtc_hsync_end : mode->crtc_hsync_start);
+
+       dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
+       dsi_htotal += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
+       dsi_hss_hsa_hse_hbp += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
+
+       if (sync_pulse) {
+               if (mode_valid_check)
+                       tmp = mode->hsync_end - mode->hsync_start;
+               else
+                       tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+               dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
+                                                DSI_HSA_FRAME_OVERHEAD);
+               dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+               dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+       }
+
+       dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
+                                         mode->hdisplay : mode->crtc_hdisplay,
+                                         bpp, 0);
+       dsi_htotal += dsi_cfg->hact;
+
+       if (mode_valid_check)
+               dpi_hfp = mode->hsync_start - mode->hdisplay;
+       else
+               dpi_hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+       dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
+       dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
+
+       if (mode_valid_check)
+               ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
+                                               mode->htotal, bpp,
+                                               mode->clock * 1000,
+                                               dsi_htotal, nlanes,
+                                               &dsi_hfp_ext);
+       else
+               ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
+                                               mode->crtc_htotal, bpp,
+                                               mode->crtc_clock * 1000,
+                                               dsi_htotal, nlanes,
+                                               &dsi_hfp_ext);
+
+       if (ret)
+               return ret;
+
+       dsi_cfg->hfp += dsi_hfp_ext;
+       dsi_htotal += dsi_hfp_ext;
+       dsi_cfg->htotal = dsi_htotal;
+
+       /*
+        * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
+        * is empty before we start a receiving a new line on the DPI
+        * interface.
+        */
+       if ((u64)dphy_cfg->lane_bps * dpi_hfp * nlanes <
+           (u64)dsi_hss_hsa_hse_hbp *
+           (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+{
+       struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+       struct cdns_dsi *dsi = input_to_dsi(input);
+       struct cdns_dsi_output *output = &dsi->output;
+
+       if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+               dev_err(dsi->base.dev,
+                       "cdns-dsi driver is only compatible with DRM devices supporting atomic updates");
+               return -ENOTSUPP;
+       }
+
+       return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+}
+
+static enum drm_mode_status
+cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+                          const struct drm_display_mode *mode)
+{
+       struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+       struct cdns_dsi *dsi = input_to_dsi(input);
+       struct cdns_dsi_output *output = &dsi->output;
+       struct cdns_dphy_cfg dphy_cfg;
+       struct cdns_dsi_cfg dsi_cfg;
+       int bpp, nlanes, ret;
+
+       /*
+        * VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
+        * least 1.
+        */
+       if (mode->vtotal - mode->vsync_end < 2)
+               return MODE_V_ILLEGAL;
+
+       /* VSA_DSI = VSA_DPI and must be at least 2. */
+       if (mode->vsync_end - mode->vsync_start < 2)
+               return MODE_V_ILLEGAL;
+
+       /* HACT must be 32-bits aligned. */
+       bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+       if ((mode->hdisplay * bpp) % 32)
+               return MODE_H_ILLEGAL;
+
+       nlanes = output->dev->lanes;
+
+       ret = cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, true);
+       if (ret)
+               return MODE_CLOCK_RANGE;
+
+       return MODE_OK;
+}
+
+static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+       struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+       struct cdns_dsi *dsi = input_to_dsi(input);
+       u32 val;
+
+       val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
+       val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
+                DISP_EOT_GEN);
+       writel(val, dsi->regs + MCTL_MAIN_DATA_CTL);
+
+       val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id);
+       writel(val, dsi->regs + MCTL_MAIN_EN);
+       pm_runtime_put(dsi->base.dev);
+}
+
+static void cdns_dsi_hs_init(struct cdns_dsi *dsi,
+                            const struct cdns_dphy_cfg *dphy_cfg)
+{
+       u32 status;
+
+       /*
+        * Power all internal DPHY blocks down and maintain their reset line
+        * asserted before changing the DPHY config.
+        */
+       writel(DPHY_CMN_PSO | DPHY_PLL_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN |
+              DPHY_CMN_PDN | DPHY_PLL_PDN,
+              dsi->regs + MCTL_DPHY_CFG0);
+
+       /*
+        * Configure the internal PSM clk divider so that the DPHY has a
+        * 1MHz clk (or something close).
+        */
+       WARN_ON_ONCE(cdns_dphy_setup_psm(dsi->dphy));
+
+       /*
+        * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
+        * and 8 data lanes, each clk lane can be attache different set of
+        * data lanes. The 2 groups are named 'left' and 'right', so here we
+        * just say that we want the 'left' clk lane to drive the 'left' data
+        * lanes.
+        */
+       cdns_dphy_set_clk_lane_cfg(dsi->dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
+
+       /*
+        * Configure the DPHY PLL that will be used to generate the TX byte
+        * clk.
+        */
+       cdns_dphy_set_pll_cfg(dsi->dphy, dphy_cfg);
+
+       /* Start TX state machine. */
+       writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+              dsi->dphy->regs + DPHY_CMN_SSM);
+
+       /* Activate the PLL and wait until it's locked. */
+       writel(PLL_LOCKED, dsi->regs + MCTL_MAIN_STS_CLR);
+       writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN,
+              dsi->regs + MCTL_DPHY_CFG0);
+       WARN_ON_ONCE(readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+                                       status & PLL_LOCKED, 100, 100));
+       /* De-assert data and clock reset lines. */
+       writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
+              DPHY_D_RSTB(dphy_cfg->nlanes) | DPHY_C_RSTB,
+              dsi->regs + MCTL_DPHY_CFG0);
+}
+
+static void cdns_dsi_init_link(struct cdns_dsi *dsi)
+{
+       struct cdns_dsi_output *output = &dsi->output;
+       unsigned long sysclk_period, ulpout;
+       u32 val;
+       int i;
+
+       if (dsi->link_initialized)
+               return;
+
+       val = 0;
+       for (i = 1; i < output->dev->lanes; i++)
+               val |= DATA_LANE_EN(i);
+
+       if (!(output->dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+               val |= CLK_CONTINUOUS;
+
+       writel(val, dsi->regs + MCTL_MAIN_PHY_CTL);
+
+       /* ULPOUT should be set to 1ms and is expressed in sysclk cycles. */
+       sysclk_period = NSEC_PER_SEC / clk_get_rate(dsi->dsi_sys_clk);
+       ulpout = DIV_ROUND_UP(NSEC_PER_MSEC, sysclk_period);
+       writel(CLK_LANE_ULPOUT_TIME(ulpout) | DATA_LANE_ULPOUT_TIME(ulpout),
+              dsi->regs + MCTL_ULPOUT_TIME);
+
+       writel(LINK_EN, dsi->regs + MCTL_MAIN_DATA_CTL);
+
+       val = CLK_LANE_EN | PLL_START;
+       for (i = 0; i < output->dev->lanes; i++)
+               val |= DATA_LANE_START(i);
+
+       writel(val, dsi->regs + MCTL_MAIN_EN);
+
+       dsi->link_initialized = true;
+}
+
+static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+       struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+       struct cdns_dsi *dsi = input_to_dsi(input);
+       struct cdns_dsi_output *output = &dsi->output;
+       struct drm_display_mode *mode;
+       struct cdns_dphy_cfg dphy_cfg;
+       unsigned long tx_byte_period;
+       struct cdns_dsi_cfg dsi_cfg;
+       u32 tmp, reg_wakeup, div;
+       int bpp, nlanes;
+
+       if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
+               return;
+
+       mode = &bridge->encoder->crtc->state->adjusted_mode;
+       bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+       nlanes = output->dev->lanes;
+
+       WARN_ON_ONCE(cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, false));
+
+       cdns_dsi_hs_init(dsi, &dphy_cfg);
+       cdns_dsi_init_link(dsi);
+
+       writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
+              dsi->regs + VID_HSIZE1);
+       writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
+              dsi->regs + VID_HSIZE2);
+
+       writel(VBP_LEN(mode->crtc_vtotal - mode->crtc_vsync_end - 1) |
+              VFP_LEN(mode->crtc_vsync_start - mode->crtc_vdisplay) |
+              VSA_LEN(mode->crtc_vsync_end - mode->crtc_vsync_start + 1),
+              dsi->regs + VID_VSIZE1);
+       writel(mode->crtc_vdisplay, dsi->regs + VID_VSIZE2);
+
+       tmp = dsi_cfg.htotal -
+             (dsi_cfg.hsa + DSI_BLANKING_FRAME_OVERHEAD +
+              DSI_HSA_FRAME_OVERHEAD);
+       writel(BLK_LINE_PULSE_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE2);
+       if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+               writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD),
+                      dsi->regs + VID_VCA_SETTING2);
+
+       tmp = dsi_cfg.htotal -
+             (DSI_HSS_VSS_VSE_FRAME_OVERHEAD + DSI_BLANKING_FRAME_OVERHEAD);
+       writel(BLK_LINE_EVENT_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE1);
+       if (!(output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+               writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD),
+                      dsi->regs + VID_VCA_SETTING2);
+
+       tmp = DIV_ROUND_UP(dsi_cfg.htotal, nlanes) -
+             DIV_ROUND_UP(dsi_cfg.hsa, nlanes);
+
+       if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+               tmp -= DIV_ROUND_UP(DSI_EOT_PKT_SIZE, nlanes);
+
+       tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
+                                           dphy_cfg.lane_bps);
+       reg_wakeup = cdns_dphy_get_wakeup_time_ns(dsi->dphy) /
+                    tx_byte_period;
+       writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
+              dsi->regs + VID_DPHY_TIME);
+
+       /*
+        * HSTX and LPRX timeouts are both expressed in TX byte clk cycles and
+        * both should be set to at least the time it takes to transmit a
+        * frame.
+        */
+       tmp = NSEC_PER_SEC / drm_mode_vrefresh(mode);
+       tmp /= tx_byte_period;
+
+       for (div = 0; div <= CLK_DIV_MAX; div++) {
+               if (tmp <= HSTX_TIMEOUT_MAX)
+                       break;
+
+               tmp >>= 1;
+       }
+
+       if (tmp > HSTX_TIMEOUT_MAX)
+               tmp = HSTX_TIMEOUT_MAX;
+
+       writel(CLK_DIV(div) | HSTX_TIMEOUT(tmp),
+              dsi->regs + MCTL_DPHY_TIMEOUT1);
+
+       writel(LPRX_TIMEOUT(tmp), dsi->regs + MCTL_DPHY_TIMEOUT2);
+
+       if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO) {
+               switch (output->dev->format) {
+               case MIPI_DSI_FMT_RGB888:
+                       tmp = VID_PIXEL_MODE_RGB888 |
+                             VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_24);
+                       break;
+
+               case MIPI_DSI_FMT_RGB666:
+                       tmp = VID_PIXEL_MODE_RGB666 |
+                             VID_DATATYPE(MIPI_DSI_PIXEL_STREAM_3BYTE_18);
+                       break;
+
+               case MIPI_DSI_FMT_RGB666_PACKED:
+                       tmp = VID_PIXEL_MODE_RGB666_PACKED |
+                             VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_18);
+                       break;
+
+               case MIPI_DSI_FMT_RGB565:
+                       tmp = VID_PIXEL_MODE_RGB565 |
+                             VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_16);
+                       break;
+
+               default:
+                       dev_err(dsi->base.dev, "Unsupported DSI format\n");
+                       return;
+               }
+
+               if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+                       tmp |= SYNC_PULSE_ACTIVE | SYNC_PULSE_HORIZONTAL;
+
+               tmp |= REG_BLKLINE_MODE(REG_BLK_MODE_BLANKING_PKT) |
+                      REG_BLKEOL_MODE(REG_BLK_MODE_BLANKING_PKT) |
+                      RECOVERY_MODE(RECOVERY_MODE_NEXT_HSYNC) |
+                      VID_IGNORE_MISS_VSYNC;
+
+               writel(tmp, dsi->regs + VID_MAIN_CTL);
+       }
+
+       tmp = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
+       tmp &= ~(IF_VID_SELECT_MASK | HOST_EOT_GEN | IF_VID_MODE);
+
+       if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+               tmp |= HOST_EOT_GEN;
+
+       if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+               tmp |= IF_VID_MODE | IF_VID_SELECT(input->id) | VID_EN;
+
+       writel(tmp, dsi->regs + MCTL_MAIN_DATA_CTL);
+
+       tmp = readl(dsi->regs + MCTL_MAIN_EN) | IF_EN(input->id);
+       writel(tmp, dsi->regs + MCTL_MAIN_EN);
+}
+
+static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
+       .attach = cdns_dsi_bridge_attach,
+       .mode_valid = cdns_dsi_bridge_mode_valid,
+       .disable = cdns_dsi_bridge_disable,
+       .enable = cdns_dsi_bridge_enable,
+};
+
+static int cdns_dsi_attach(struct mipi_dsi_host *host,
+                          struct mipi_dsi_device *dev)
+{
+       struct cdns_dsi *dsi = to_cdns_dsi(host);
+       struct cdns_dsi_output *output = &dsi->output;
+       struct cdns_dsi_input *input = &dsi->input;
+       struct drm_bridge *bridge;
+       struct drm_panel *panel;
+       struct device_node *np;
+       int ret;
+
+       /*
+        * We currently do not support connecting several DSI devices to the
+        * same host. In order to support that we'd need the DRM bridge
+        * framework to allow dynamic reconfiguration of the bridge chain.
+        */
+       if (output->dev)
+               return -EBUSY;
+
+       /* We do not support burst mode yet. */
+       if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+               return -ENOTSUPP;
+
+       /*
+        * The host <-> device link might be described using an OF-graph
+        * representation, in this case we extract the device of_node from
+        * this representation, otherwise we use dsidev->dev.of_node which
+        * should have been filled by the core.
+        */
+       np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
+                                     dev->channel);
+       if (!np)
+               np = of_node_get(dev->dev.of_node);
+
+       panel = of_drm_find_panel(np);
+       if (panel) {
+               bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+       } else {
+               bridge = of_drm_find_bridge(dev->dev.of_node);
+               if (!bridge)
+                       bridge = ERR_PTR(-EINVAL);
+       }
+
+       of_node_put(np);
+
+       if (IS_ERR(bridge)) {
+               ret = PTR_ERR(bridge);
+               dev_err(host->dev, "failed to add DSI device %s (err = %d)",
+                       dev->name, ret);
+               return ret;
+       }
+
+       output->dev = dev;
+       output->bridge = bridge;
+       output->panel = panel;
+
+       /*
+        * The DSI output has been properly configured, we can now safely
+        * register the input to the bridge framework so that it can take place
+        * in a display pipeline.
+        */
+       drm_bridge_add(&input->bridge);
+
+       return 0;
+}
+
+static int cdns_dsi_detach(struct mipi_dsi_host *host,
+                          struct mipi_dsi_device *dev)
+{
+       struct cdns_dsi *dsi = to_cdns_dsi(host);
+       struct cdns_dsi_output *output = &dsi->output;
+       struct cdns_dsi_input *input = &dsi->input;
+
+       drm_bridge_remove(&input->bridge);
+       if (output->panel)
+               drm_panel_bridge_remove(output->bridge);
+
+       return 0;
+}
+
+static irqreturn_t cdns_dsi_interrupt(int irq, void *data)
+{
+       struct cdns_dsi *dsi = data;
+       irqreturn_t ret = IRQ_NONE;
+       u32 flag, ctl;
+
+       flag = readl(dsi->regs + DIRECT_CMD_STS_FLAG);
+       if (flag) {
+               ctl = readl(dsi->regs + DIRECT_CMD_STS_CTL);
+               ctl &= ~flag;
+               writel(ctl, dsi->regs + DIRECT_CMD_STS_CTL);
+               complete(&dsi->direct_cmd_comp);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
+                                const struct mipi_dsi_msg *msg)
+{
+       struct cdns_dsi *dsi = to_cdns_dsi(host);
+       u32 cmd, sts, val, wait = WRITE_COMPLETED, ctl = 0;
+       struct mipi_dsi_packet packet;
+       int ret, i, tx_len, rx_len;
+
+       ret = pm_runtime_get_sync(host->dev);
+       if (ret < 0)
+               return ret;
+
+       cdns_dsi_init_link(dsi);
+
+       ret = mipi_dsi_create_packet(&packet, msg);
+       if (ret)
+               goto out;
+
+       tx_len = msg->tx_buf ? msg->tx_len : 0;
+       rx_len = msg->rx_buf ? msg->rx_len : 0;
+
+       /* For read operations, the maximum TX len is 2. */
+       if (rx_len && tx_len > 2) {
+               ret = -ENOTSUPP;
+               goto out;
+       }
+
+       /* TX len is limited by the CMD FIFO depth. */
+       if (tx_len > dsi->direct_cmd_fifo_depth) {
+               ret = -ENOTSUPP;
+               goto out;
+       }
+
+       /* RX len is limited by the RX FIFO depth. */
+       if (rx_len > dsi->rx_fifo_depth) {
+               ret = -ENOTSUPP;
+               goto out;
+       }
+
+       cmd = CMD_SIZE(tx_len) | CMD_VCHAN_ID(msg->channel) |
+             CMD_DATATYPE(msg->type);
+
+       if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+               cmd |= CMD_LP_EN;
+
+       if (mipi_dsi_packet_format_is_long(msg->type))
+               cmd |= CMD_LONG;
+
+       if (rx_len) {
+               cmd |= READ_CMD;
+               wait = READ_COMPLETED_WITH_ERR | READ_COMPLETED;
+               ctl = READ_EN | BTA_EN;
+       } else if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+               cmd |= BTA_REQ;
+               wait = ACK_WITH_ERR_RCVD | ACK_RCVD;
+               ctl = BTA_EN;
+       }
+
+       writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) | ctl,
+              dsi->regs + MCTL_MAIN_DATA_CTL);
+
+       writel(cmd, dsi->regs + DIRECT_CMD_MAIN_SETTINGS);
+
+       for (i = 0; i < tx_len; i += 4) {
+               const u8 *buf = msg->tx_buf;
+               int j;
+
+               val = 0;
+               for (j = 0; j < 4 && j + i < tx_len; j++)
+                       val |= (u32)buf[i + j] << (8 * j);
+
+               writel(val, dsi->regs + DIRECT_CMD_WRDATA);
+       }
+
+       /* Clear status flags before sending the command. */
+       writel(wait, dsi->regs + DIRECT_CMD_STS_CLR);
+       writel(wait, dsi->regs + DIRECT_CMD_STS_CTL);
+       reinit_completion(&dsi->direct_cmd_comp);
+       writel(0, dsi->regs + DIRECT_CMD_SEND);
+
+       wait_for_completion_timeout(&dsi->direct_cmd_comp,
+                                   msecs_to_jiffies(1000));
+
+       sts = readl(dsi->regs + DIRECT_CMD_STS);
+       writel(wait, dsi->regs + DIRECT_CMD_STS_CLR);
+       writel(0, dsi->regs + DIRECT_CMD_STS_CTL);
+
+       writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) & ~ctl,
+              dsi->regs + MCTL_MAIN_DATA_CTL);
+
+       /* We did not receive the events we were waiting for. */
+       if (!(sts & wait)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       /* 'READ' or 'WRITE with ACK' failed. */
+       if (sts & (READ_COMPLETED_WITH_ERR | ACK_WITH_ERR_RCVD)) {
+               ret = -EIO;
+               goto out;
+       }
+
+       for (i = 0; i < rx_len; i += 4) {
+               u8 *buf = msg->rx_buf;
+               int j;
+
+               val = readl(dsi->regs + DIRECT_CMD_RDDATA);
+               for (j = 0; j < 4 && j + i < rx_len; j++)
+                       buf[i + j] = val >> (8 * j);
+       }
+
+out:
+       pm_runtime_put(host->dev);
+       return ret;
+}
+
+static const struct mipi_dsi_host_ops cdns_dsi_ops = {
+       .attach = cdns_dsi_attach,
+       .detach = cdns_dsi_detach,
+       .transfer = cdns_dsi_transfer,
+};
+
+static int cdns_dsi_resume(struct device *dev)
+{
+       struct cdns_dsi *dsi = dev_get_drvdata(dev);
+
+       reset_control_deassert(dsi->dsi_p_rst);
+       clk_prepare_enable(dsi->dsi_p_clk);
+       clk_prepare_enable(dsi->dsi_sys_clk);
+       clk_prepare_enable(dsi->dphy->psm_clk);
+       clk_prepare_enable(dsi->dphy->pll_ref_clk);
+
+       return 0;
+}
+
+static int cdns_dsi_suspend(struct device *dev)
+{
+       struct cdns_dsi *dsi = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(dsi->dphy->pll_ref_clk);
+       clk_disable_unprepare(dsi->dphy->psm_clk);
+       clk_disable_unprepare(dsi->dsi_sys_clk);
+       clk_disable_unprepare(dsi->dsi_p_clk);
+       reset_control_assert(dsi->dsi_p_rst);
+       dsi->link_initialized = false;
+       return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(cdns_dsi_pm_ops, cdns_dsi_suspend, cdns_dsi_resume,
+                           NULL);
+
+static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+       /* Default wakeup time is 800 ns (in a simulated environment). */
+       return 800;
+}
+
+static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
+                                     const struct cdns_dphy_cfg *cfg)
+{
+       u32 fbdiv_low, fbdiv_high;
+
+       fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
+       fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
+
+       writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
+              DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
+              DPHY_CMN_OPDIV(cfg->pll_opdiv),
+              dphy->regs + DPHY_CMN_OPIPDIV);
+       writel(DPHY_CMN_FBDIV_FROM_REG |
+              DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
+              dphy->regs + DPHY_CMN_FBDIV);
+       writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+              DPHY_CMN_PWM_DIV(0x8),
+              dphy->regs + DPHY_CMN_PWM);
+}
+
+static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+       writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
+              dphy->regs + DPHY_PSM_CFG);
+}
+
+/*
+ * This is the reference implementation of DPHY hooks. Specific integration of
+ * this IP may have to re-implement some of them depending on how they decided
+ * to wire things in the SoC.
+ */
+static const struct cdns_dphy_ops ref_dphy_ops = {
+       .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
+       .set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
+       .set_psm_div = cdns_dphy_ref_set_psm_div,
+};
+
+static const struct of_device_id cdns_dphy_of_match[] = {
+       { .compatible = "cdns,dphy", .data = &ref_dphy_ops },
+       { /* sentinel */ },
+};
+
+static struct cdns_dphy *cdns_dphy_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       struct cdns_dphy *dphy;
+       struct of_phandle_args args;
+       struct resource res;
+       int ret;
+
+       ret = of_parse_phandle_with_args(pdev->dev.of_node, "phys",
+                                        "#phy-cells", 0, &args);
+       if (ret)
+               return ERR_PTR(-ENOENT);
+
+       match = of_match_node(cdns_dphy_of_match, args.np);
+       if (!match || !match->data)
+               return ERR_PTR(-EINVAL);
+
+       dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+       if (!dphy)
+               return ERR_PTR(-ENOMEM);
+
+       dphy->ops = match->data;
+
+       ret = of_address_to_resource(args.np, 0, &res);
+       if (ret)
+               return ERR_PTR(ret);
+
+       dphy->regs = devm_ioremap_resource(&pdev->dev, &res);
+       if (IS_ERR(dphy->regs))
+               return ERR_CAST(dphy->regs);
+
+       dphy->psm_clk = of_clk_get_by_name(args.np, "psm");
+       if (IS_ERR(dphy->psm_clk))
+               return ERR_CAST(dphy->psm_clk);
+
+       dphy->pll_ref_clk = of_clk_get_by_name(args.np, "pll_ref");
+       if (IS_ERR(dphy->pll_ref_clk)) {
+               ret = PTR_ERR(dphy->pll_ref_clk);
+               goto err_put_psm_clk;
+       }
+
+       if (dphy->ops->probe) {
+               ret = dphy->ops->probe(dphy);
+               if (ret)
+                       goto err_put_pll_ref_clk;
+       }
+
+       return dphy;
+
+err_put_pll_ref_clk:
+       clk_put(dphy->pll_ref_clk);
+
+err_put_psm_clk:
+       clk_put(dphy->psm_clk);
+
+       return ERR_PTR(ret);
+}
+
+static void cdns_dphy_remove(struct cdns_dphy *dphy)
+{
+       if (dphy->ops->remove)
+               dphy->ops->remove(dphy);
+
+       clk_put(dphy->pll_ref_clk);
+       clk_put(dphy->psm_clk);
+}
+
+static int cdns_dsi_drm_probe(struct platform_device *pdev)
+{
+       struct cdns_dsi *dsi;
+       struct cdns_dsi_input *input;
+       struct resource *res;
+       int ret, irq;
+       u32 val;
+
+       dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, dsi);
+
+       input = &dsi->input;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dsi->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(dsi->regs))
+               return PTR_ERR(dsi->regs);
+
+       dsi->dsi_p_clk = devm_clk_get(&pdev->dev, "dsi_p_clk");
+       if (IS_ERR(dsi->dsi_p_clk))
+               return PTR_ERR(dsi->dsi_p_clk);
+
+       dsi->dsi_p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+                                                               "dsi_p_rst");
+       if (IS_ERR(dsi->dsi_p_rst))
+               return PTR_ERR(dsi->dsi_p_rst);
+
+       dsi->dsi_sys_clk = devm_clk_get(&pdev->dev, "dsi_sys_clk");
+       if (IS_ERR(dsi->dsi_sys_clk))
+               return PTR_ERR(dsi->dsi_sys_clk);
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       dsi->dphy = cdns_dphy_probe(pdev);
+       if (IS_ERR(dsi->dphy))
+               return PTR_ERR(dsi->dphy);
+
+       ret = clk_prepare_enable(dsi->dsi_p_clk);
+       if (ret)
+               goto err_remove_dphy;
+
+       val = readl(dsi->regs + ID_REG);
+       if (REV_VENDOR_ID(val) != 0xcad) {
+               dev_err(&pdev->dev, "invalid vendor id\n");
+               ret = -EINVAL;
+               goto err_disable_pclk;
+       }
+
+       val = readl(dsi->regs + IP_CONF);
+       dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2);
+       dsi->rx_fifo_depth = RX_FIFO_DEPTH(val);
+       init_completion(&dsi->direct_cmd_comp);
+
+       writel(0, dsi->regs + MCTL_MAIN_DATA_CTL);
+       writel(0, dsi->regs + MCTL_MAIN_EN);
+       writel(0, dsi->regs + MCTL_MAIN_PHY_CTL);
+
+       /*
+        * We only support the DPI input, so force input->id to
+        * CDNS_DPI_INPUT.
+        */
+       input->id = CDNS_DPI_INPUT;
+       input->bridge.funcs = &cdns_dsi_bridge_funcs;
+       input->bridge.of_node = pdev->dev.of_node;
+
+       /* Mask all interrupts before registering the IRQ handler. */
+       writel(0, dsi->regs + MCTL_MAIN_STS_CTL);
+       writel(0, dsi->regs + MCTL_DPHY_ERR_CTL1);
+       writel(0, dsi->regs + CMD_MODE_STS_CTL);
+       writel(0, dsi->regs + DIRECT_CMD_STS_CTL);
+       writel(0, dsi->regs + DIRECT_CMD_RD_STS_CTL);
+       writel(0, dsi->regs + VID_MODE_STS_CTL);
+       writel(0, dsi->regs + TVG_STS_CTL);
+       writel(0, dsi->regs + DPI_IRQ_EN);
+       ret = devm_request_irq(&pdev->dev, irq, cdns_dsi_interrupt, 0,
+                              dev_name(&pdev->dev), dsi);
+       if (ret)
+               goto err_disable_pclk;
+
+       pm_runtime_enable(&pdev->dev);
+       dsi->base.dev = &pdev->dev;
+       dsi->base.ops = &cdns_dsi_ops;
+
+       ret = mipi_dsi_host_register(&dsi->base);
+       if (ret)
+               goto err_disable_runtime_pm;
+
+       clk_disable_unprepare(dsi->dsi_p_clk);
+
+       return 0;
+
+err_disable_runtime_pm:
+       pm_runtime_disable(&pdev->dev);
+
+err_disable_pclk:
+       clk_disable_unprepare(dsi->dsi_p_clk);
+
+err_remove_dphy:
+       cdns_dphy_remove(dsi->dphy);
+
+       return ret;
+}
+
+static int cdns_dsi_drm_remove(struct platform_device *pdev)
+{
+       struct cdns_dsi *dsi = platform_get_drvdata(pdev);
+
+       mipi_dsi_host_unregister(&dsi->base);
+       pm_runtime_disable(&pdev->dev);
+       cdns_dphy_remove(dsi->dphy);
+
+       return 0;
+}
+
+static const struct of_device_id cdns_dsi_of_match[] = {
+       { .compatible = "cdns,dsi" },
+       { },
+};
+
+static struct platform_driver cdns_dsi_platform_driver = {
+       .probe  = cdns_dsi_drm_probe,
+       .remove = cdns_dsi_drm_remove,
+       .driver = {
+               .name   = "cdns-dsi",
+               .of_match_table = cdns_dsi_of_match,
+               .pm = &cdns_dsi_pm_ops,
+       },
+};
+module_platform_driver(cdns_dsi_platform_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("Cadence DSI driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cdns-dsi");
+
index 3b7e5c5..8f9c8a6 100644 (file)
@@ -152,7 +152,6 @@ static struct platform_driver snd_dw_hdmi_driver = {
        .remove = snd_dw_hdmi_remove,
        .driver = {
                .name = DRIVER_NAME,
-               .owner = THIS_MODULE,
        },
 };
 module_platform_driver(snd_dw_hdmi_driver);
index 226171a..fd79996 100644 (file)
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
  * Copyright (C) STMicroelectronics SA 2017
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  * Modified by Philippe Cornu <philippe.cornu@st.com>
  * This generic Synopsys DesignWare MIPI DSI host driver is based on the
  * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs.
@@ -775,20 +771,20 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
 
        clk_prepare_enable(dsi->pclk);
 
-       ret = phy_ops->get_lane_mbps(priv_data, mode, dsi->mode_flags,
+       ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
                                     dsi->lanes, dsi->format, &dsi->lane_mbps);
        if (ret)
                DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
 
        pm_runtime_get_sync(dsi->dev);
        dw_mipi_dsi_init(dsi);
-       dw_mipi_dsi_dpi_config(dsi, mode);
+       dw_mipi_dsi_dpi_config(dsi, adjusted_mode);
        dw_mipi_dsi_packet_handler_config(dsi);
        dw_mipi_dsi_video_mode_config(dsi);
-       dw_mipi_dsi_video_packet_config(dsi, mode);
+       dw_mipi_dsi_video_packet_config(dsi, adjusted_mode);
        dw_mipi_dsi_command_mode_config(dsi);
-       dw_mipi_dsi_line_timer_config(dsi, mode);
-       dw_mipi_dsi_vertical_timing_config(dsi, mode);
+       dw_mipi_dsi_line_timer_config(dsi, adjusted_mode);
+       dw_mipi_dsi_vertical_timing_config(dsi, adjusted_mode);
 
        dw_mipi_dsi_dphy_init(dsi);
        dw_mipi_dsi_dphy_timing_config(dsi);
@@ -802,7 +798,7 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
 
        dw_mipi_dsi_dphy_enable(dsi);
 
-       dw_mipi_dsi_wait_for_two_frames(mode);
+       dw_mipi_dsi_wait_for_two_frames(adjusted_mode);
 
        /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
        dw_mipi_dsi_set_mode(dsi, 0);
index 08ab7d6..0fd9cf2 100644 (file)
@@ -1102,7 +1102,7 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
        return true;
 }
 
-static int tc_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
                                   struct drm_display_mode *mode)
 {
        /* DPI interface clock limitation: upto 154 MHz */
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
new file mode 100644 (file)
index 0000000..c8b9edd
--- /dev/null
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * THC63LVD1024 LVDS to parallel data DRM bridge driver.
+ *
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+enum thc63_ports {
+       THC63_LVDS_IN0,
+       THC63_LVDS_IN1,
+       THC63_RGB_OUT0,
+       THC63_RGB_OUT1,
+};
+
+struct thc63_dev {
+       struct device *dev;
+
+       struct regulator *vcc;
+
+       struct gpio_desc *pdwn;
+       struct gpio_desc *oe;
+
+       struct drm_bridge bridge;
+       struct drm_bridge *next;
+};
+
+static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct thc63_dev, bridge);
+}
+
+static int thc63_attach(struct drm_bridge *bridge)
+{
+       struct thc63_dev *thc63 = to_thc63(bridge);
+
+       return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+}
+
+static void thc63_enable(struct drm_bridge *bridge)
+{
+       struct thc63_dev *thc63 = to_thc63(bridge);
+       int ret;
+
+       ret = regulator_enable(thc63->vcc);
+       if (ret) {
+               dev_err(thc63->dev,
+                       "Failed to enable regulator \"vcc\": %d\n", ret);
+               return;
+       }
+
+       gpiod_set_value(thc63->pdwn, 0);
+       gpiod_set_value(thc63->oe, 1);
+}
+
+static void thc63_disable(struct drm_bridge *bridge)
+{
+       struct thc63_dev *thc63 = to_thc63(bridge);
+       int ret;
+
+       gpiod_set_value(thc63->oe, 0);
+       gpiod_set_value(thc63->pdwn, 1);
+
+       ret = regulator_disable(thc63->vcc);
+       if (ret)
+               dev_err(thc63->dev,
+                       "Failed to disable regulator \"vcc\": %d\n", ret);
+}
+
+static const struct drm_bridge_funcs thc63_bridge_func = {
+       .attach = thc63_attach,
+       .enable = thc63_enable,
+       .disable = thc63_disable,
+};
+
+static int thc63_parse_dt(struct thc63_dev *thc63)
+{
+       struct device_node *thc63_out;
+       struct device_node *remote;
+
+       thc63_out = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
+                                                 THC63_RGB_OUT0, -1);
+       if (!thc63_out) {
+               dev_err(thc63->dev, "Missing endpoint in port@%u\n",
+                       THC63_RGB_OUT0);
+               return -ENODEV;
+       }
+
+       remote = of_graph_get_remote_port_parent(thc63_out);
+       of_node_put(thc63_out);
+       if (!remote) {
+               dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
+                       THC63_RGB_OUT0);
+               return -ENODEV;
+       }
+
+       if (!of_device_is_available(remote)) {
+               dev_err(thc63->dev, "port@%u remote endpoint is disabled\n",
+                       THC63_RGB_OUT0);
+               of_node_put(remote);
+               return -ENODEV;
+       }
+
+       thc63->next = of_drm_find_bridge(remote);
+       of_node_put(remote);
+       if (!thc63->next)
+               return -EPROBE_DEFER;
+
+       return 0;
+}
+
+static int thc63_gpio_init(struct thc63_dev *thc63)
+{
+       thc63->oe = devm_gpiod_get_optional(thc63->dev, "oe", GPIOD_OUT_LOW);
+       if (IS_ERR(thc63->oe)) {
+               dev_err(thc63->dev, "Unable to get \"oe-gpios\": %ld\n",
+                       PTR_ERR(thc63->oe));
+               return PTR_ERR(thc63->oe);
+       }
+
+       thc63->pdwn = devm_gpiod_get_optional(thc63->dev, "powerdown",
+                                             GPIOD_OUT_HIGH);
+       if (IS_ERR(thc63->pdwn)) {
+               dev_err(thc63->dev, "Unable to get \"powerdown-gpios\": %ld\n",
+                       PTR_ERR(thc63->pdwn));
+               return PTR_ERR(thc63->pdwn);
+       }
+
+       return 0;
+}
+
+static int thc63_probe(struct platform_device *pdev)
+{
+       struct thc63_dev *thc63;
+       int ret;
+
+       thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
+       if (!thc63)
+               return -ENOMEM;
+
+       thc63->dev = &pdev->dev;
+       platform_set_drvdata(pdev, thc63);
+
+       thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
+       if (IS_ERR(thc63->vcc)) {
+               if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+
+               dev_err(thc63->dev, "Unable to get \"vcc\" supply: %ld\n",
+                       PTR_ERR(thc63->vcc));
+               return PTR_ERR(thc63->vcc);
+       }
+
+       ret = thc63_gpio_init(thc63);
+       if (ret)
+               return ret;
+
+       ret = thc63_parse_dt(thc63);
+       if (ret)
+               return ret;
+
+       thc63->bridge.driver_private = thc63;
+       thc63->bridge.of_node = pdev->dev.of_node;
+       thc63->bridge.funcs = &thc63_bridge_func;
+
+       drm_bridge_add(&thc63->bridge);
+
+       return 0;
+}
+
+static int thc63_remove(struct platform_device *pdev)
+{
+       struct thc63_dev *thc63 = platform_get_drvdata(pdev);
+
+       drm_bridge_remove(&thc63->bridge);
+
+       return 0;
+}
+
+static const struct of_device_id thc63_match[] = {
+       { .compatible = "thine,thc63lvd1024", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, thc63_match);
+
+static struct platform_driver thc63_driver = {
+       .probe  = thc63_probe,
+       .remove = thc63_remove,
+       .driver = {
+               .name           = "thc63lvd1024",
+               .of_match_table = thc63_match,
+       },
+};
+module_platform_driver(thc63_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo@jmondi.org>");
+MODULE_DESCRIPTION("Thine THC63LVD1024 LVDS decoder DRM bridge driver");
+MODULE_LICENSE("GPL v2");
index 7d25c42..9bdd677 100644 (file)
@@ -783,6 +783,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
                state->src_w = val;
        } else if (property == config->prop_src_h) {
                state->src_h = val;
+       } else if (property == plane->alpha_property) {
+               state->alpha = val;
        } else if (property == plane->rotation_property) {
                if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
                        return -EINVAL;
@@ -848,6 +850,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
                *val = state->src_w;
        } else if (property == config->prop_src_h) {
                *val = state->src_h;
+       } else if (property == plane->alpha_property) {
+               *val = state->alpha;
        } else if (property == plane->rotation_property) {
                *val = state->rotation;
        } else if (property == plane->zpos_property) {
@@ -1492,6 +1496,14 @@ EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  * with the received implicit fence. In both cases this function consumes a
  * reference for @fence.
+ *
+ * This way explicit fencing can be used to overrule implicit fencing, which is
+ * important to make explicit fencing use-cases work: One example is using one
+ * buffer for 2 screens with different refresh rates. Implicit fencing will
+ * clamp rendering to the refresh rate of the slower screen, whereas explicit
+ * fence allows 2 independent render and display loops on a single buffer. If a
+ * driver allows obeys both implicit and explicit fences for plane updates, then
+ * it will break all the benefits of explicit fencing.
  */
 void
 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
index c356545..9cb2209 100644 (file)
@@ -875,6 +875,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
  * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
  * watermarks.
  *
+ * Note that zpos normalization will add all enable planes to the state which
+ * might not desired for some drivers.
+ * For example enable/disable of a cursor plane which have fixed zpos value
+ * would trigger all other enabled planes to be forced to the state change.
+ *
  * RETURNS:
  * Zero for success or -errno
  */
@@ -887,6 +892,12 @@ int drm_atomic_helper_check(struct drm_device *dev,
        if (ret)
                return ret;
 
+       if (dev->mode_config.normalize_zpos) {
+               ret = drm_atomic_normalize_zpos(dev, state);
+               if (ret)
+                       return ret;
+       }
+
        ret = drm_atomic_helper_check_planes(dev, state);
        if (ret)
                return ret;
@@ -1561,6 +1572,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
        for_each_new_plane_in_state(state, plane, plane_state, i) {
                funcs = plane->helper_private;
                funcs->atomic_async_update(plane, plane_state);
+
+               /*
+                * ->atomic_async_update() is supposed to update the
+                * plane->state in-place, make sure at least common
+                * properties have been properly updated.
+                */
+               WARN_ON_ONCE(plane->state->fb != plane_state->fb);
+               WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
+               WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
+               WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
+               WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
@@ -2659,7 +2681,7 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane,
                goto fail;
        }
 
-       if (plane_state->crtc && (plane == plane->crtc->cursor))
+       if (plane_state->crtc && plane_state->crtc->cursor == plane)
                plane_state->state->legacy_cursor_update = true;
 
        ret = __drm_atomic_helper_disable_plane(plane, plane_state);
@@ -2881,31 +2903,9 @@ commit:
        return 0;
 }
 
-/**
- * drm_atomic_helper_disable_all - disable all currently active outputs
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Loops through all connectors, finding those that aren't turned off and then
- * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
- * that they are connected to.
- *
- * This is used for example in suspend/resume to disable all currently active
- * functions when suspending. If you just want to shut down everything at e.g.
- * driver unload, look at drm_atomic_helper_shutdown().
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
- * drm_atomic_helper_shutdown().
- */
-int drm_atomic_helper_disable_all(struct drm_device *dev,
-                                 struct drm_modeset_acquire_ctx *ctx)
+static int __drm_atomic_helper_disable_all(struct drm_device *dev,
+                                          struct drm_modeset_acquire_ctx *ctx,
+                                          bool clean_old_fbs)
 {
        struct drm_atomic_state *state;
        struct drm_connector_state *conn_state;
@@ -2957,8 +2957,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
                        goto free;
 
                drm_atomic_set_fb_for_plane(plane_state, NULL);
-               plane_mask |= BIT(drm_plane_index(plane));
-               plane->old_fb = plane->fb;
+
+               if (clean_old_fbs) {
+                       plane->old_fb = plane->fb;
+                       plane_mask |= BIT(drm_plane_index(plane));
+               }
        }
 
        ret = drm_atomic_commit(state);
@@ -2969,6 +2972,34 @@ free:
        return ret;
 }
 
+/**
+ * drm_atomic_helper_disable_all - disable all currently active outputs
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Loops through all connectors, finding those that aren't turned off and then
+ * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
+ * that they are connected to.
+ *
+ * This is used for example in suspend/resume to disable all currently active
+ * functions when suspending. If you just want to shut down everything at e.g.
+ * driver unload, look at drm_atomic_helper_shutdown().
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
+ * drm_atomic_helper_shutdown().
+ */
+int drm_atomic_helper_disable_all(struct drm_device *dev,
+                                 struct drm_modeset_acquire_ctx *ctx)
+{
+       return __drm_atomic_helper_disable_all(dev, ctx, false);
+}
 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
 
 /**
@@ -2991,7 +3022,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
        while (1) {
                ret = drm_modeset_lock_all_ctx(dev, &ctx);
                if (!ret)
-                       ret = drm_atomic_helper_disable_all(dev, &ctx);
+                       ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
 
                if (ret != -EDEADLK)
                        break;
@@ -3095,14 +3126,14 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
        struct drm_connector_state *new_conn_state;
        struct drm_crtc *crtc;
        struct drm_crtc_state *new_crtc_state;
-       unsigned plane_mask = 0;
-       struct drm_device *dev = state->dev;
-       int ret;
 
        state->acquire_ctx = ctx;
 
        for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               plane_mask |= BIT(drm_plane_index(plane));
+               WARN_ON(plane->crtc != new_plane_state->crtc);
+               WARN_ON(plane->fb != new_plane_state->fb);
+               WARN_ON(plane->old_fb);
+
                state->planes[i].old_state = plane->state;
        }
 
@@ -3112,11 +3143,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
        for_each_new_connector_in_state(state, connector, new_conn_state, i)
                state->connectors[i].old_state = connector->state;
 
-       ret = drm_atomic_commit(state);
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
-       return ret;
+       return drm_atomic_commit(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
 
@@ -3484,6 +3511,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
        if (plane->state) {
                plane->state->plane = plane;
                plane->state->rotation = DRM_MODE_ROTATE_0;
+
+               /* Reset the alpha value to fully opaque if it matters */
+               if (plane->alpha_property)
+                       plane->state->alpha = plane->alpha_property->values[1];
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
index 5a81e1b..a16a74d 100644 (file)
  * On top of this basic transformation additional properties can be exposed by
  * the driver:
  *
+ * alpha:
+ *     Alpha is setup with drm_plane_create_alpha_property(). It controls the
+ *     plane-wide opacity, from transparent (0) to opaque (0xffff). It can be
+ *     combined with pixel alpha.
+ *     The pixel values in the framebuffers are expected to not be
+ *     pre-multiplied by the global alpha associated to the plane.
+ *
  * rotation:
  *     Rotation is set up with drm_plane_create_rotation_property(). It adds a
  *     rotation and reflection step between the source and destination rectangles.
  */
 
 /**
+ * drm_plane_create_alpha_property - create a new alpha property
+ * @plane: drm plane
+ *
+ * This function creates a generic, mutable, alpha property and enables support
+ * for it in the DRM core. It is attached to @plane.
+ *
+ * The alpha property will be allowed to be within the bounds of 0
+ * (transparent) to 0xffff (opaque).
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_plane_create_alpha_property(struct drm_plane *plane)
+{
+       struct drm_property *prop;
+
+       prop = drm_property_create_range(plane->dev, 0, "alpha",
+                                        0, DRM_BLEND_ALPHA_OPAQUE);
+       if (!prop)
+               return -ENOMEM;
+
+       drm_object_attach_property(&plane->base, prop, DRM_BLEND_ALPHA_OPAQUE);
+       plane->alpha_property = prop;
+
+       if (plane->state)
+               plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_alpha_property);
+
+/**
  * drm_plane_create_rotation_property - create a new rotation property
  * @plane: drm plane
  * @rotation: initial value of the rotation property
index 0358388..a231dd5 100644 (file)
@@ -402,6 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
 {
        struct drm_mode_crtc *crtc_resp = data;
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -410,34 +411,36 @@ int drm_mode_getcrtc(struct drm_device *dev,
        if (!crtc)
                return -ENOENT;
 
+       plane = crtc->primary;
+
        crtc_resp->gamma_size = crtc->gamma_size;
 
-       drm_modeset_lock(&crtc->primary->mutex, NULL);
-       if (crtc->primary->state && crtc->primary->state->fb)
-               crtc_resp->fb_id = crtc->primary->state->fb->base.id;
-       else if (!crtc->primary->state && crtc->primary->fb)
-               crtc_resp->fb_id = crtc->primary->fb->base.id;
+       drm_modeset_lock(&plane->mutex, NULL);
+       if (plane->state && plane->state->fb)
+               crtc_resp->fb_id = plane->state->fb->base.id;
+       else if (!plane->state && plane->fb)
+               crtc_resp->fb_id = plane->fb->base.id;
        else
                crtc_resp->fb_id = 0;
 
-       if (crtc->primary->state) {
-               crtc_resp->x = crtc->primary->state->src_x >> 16;
-               crtc_resp->y = crtc->primary->state->src_y >> 16;
+       if (plane->state) {
+               crtc_resp->x = plane->state->src_x >> 16;
+               crtc_resp->y = plane->state->src_y >> 16;
        }
-       drm_modeset_unlock(&crtc->primary->mutex);
+       drm_modeset_unlock(&plane->mutex);
 
        drm_modeset_lock(&crtc->mutex, NULL);
        if (crtc->state) {
                if (crtc->state->enable) {
                        drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
                        crtc_resp->mode_valid = 1;
-
                } else {
                        crtc_resp->mode_valid = 0;
                }
        } else {
                crtc_resp->x = crtc->x;
                crtc_resp->y = crtc->y;
+
                if (crtc->enabled) {
                        drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
                        crtc_resp->mode_valid = 1;
@@ -471,7 +474,7 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
 
        ret = crtc->funcs->set_config(set, ctx);
        if (ret == 0) {
-               crtc->primary->crtc = crtc;
+               crtc->primary->crtc = fb ? crtc : NULL;
                crtc->primary->fb = fb;
        }
 
@@ -554,6 +557,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_mode_crtc *crtc_req = data;
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
        struct drm_connector **connector_set = NULL, *connector;
        struct drm_framebuffer *fb = NULL;
        struct drm_display_mode *mode = NULL;
@@ -580,22 +584,33 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        }
        DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
+       plane = crtc->primary;
+
        mutex_lock(&crtc->dev->mode_config.mutex);
        drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
 retry:
        ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
        if (ret)
                goto out;
+
        if (crtc_req->mode_valid) {
                /* If we have a mode we need a framebuffer. */
                /* If we pass -1, set the mode with the currently bound fb */
                if (crtc_req->fb_id == -1) {
-                       if (!crtc->primary->fb) {
+                       struct drm_framebuffer *old_fb;
+
+                       if (plane->state)
+                               old_fb = plane->state->fb;
+                       else
+                               old_fb = plane->fb;
+
+                       if (!old_fb) {
                                DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
                                ret = -EINVAL;
                                goto out;
                        }
-                       fb = crtc->primary->fb;
+
+                       fb = old_fb;
                        /* Make refcounting symmetric with the lookup path. */
                        drm_framebuffer_get(fb);
                } else {
@@ -627,8 +642,8 @@ retry:
                 * match real hardware capabilities. Skip the check in that
                 * case.
                 */
-               if (!crtc->primary->format_default) {
-                       ret = drm_plane_check_pixel_format(crtc->primary,
+               if (!plane->format_default) {
+                       ret = drm_plane_check_pixel_format(plane,
                                                           fb->format->format,
                                                           fb->modifier);
                        if (ret) {
index 3c2b828..5d307b2 100644 (file)
@@ -220,3 +220,5 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 
 /* drm_edid.c */
 void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
+void drm_reset_display_info(struct drm_connector *connector);
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
index 6fac412..6588306 100644 (file)
@@ -2941,12 +2941,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
        }
 }
 
+#define DP_PAYLOAD_TABLE_SIZE          64
+
 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
                                  char *buf)
 {
        int i;
 
-       for (i = 0; i < 64; i += 16) {
+       for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
                if (drm_dp_dpcd_read(mgr->aux,
                                     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
                                     &buf[i], 16) != 16)
@@ -3015,7 +3017,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
 
        mutex_lock(&mgr->lock);
        if (mgr->mst_primary) {
-               u8 buf[64];
+               u8 buf[DP_PAYLOAD_TABLE_SIZE];
                int ret;
 
                ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
@@ -3033,8 +3035,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
                seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
                           buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
                if (dump_dp_payload_table(mgr, buf))
-                       seq_printf(m, "payload table: %*ph\n", 63, buf);
-
+                       seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
        }
 
        mutex_unlock(&mgr->lock);
index a1b9338..32a83b4 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/moduleparam.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/srcu.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drmP.h>
@@ -75,6 +76,8 @@ static bool drm_core_init_complete = false;
 
 static struct dentry *drm_debugfs_root;
 
+DEFINE_STATIC_SRCU(drm_unplug_srcu);
+
 /*
  * DRM Minors
  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -318,18 +321,51 @@ void drm_put_dev(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_put_dev);
 
-static void drm_device_set_unplugged(struct drm_device *dev)
+/**
+ * drm_dev_enter - Enter device critical section
+ * @dev: DRM device
+ * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
+ *
+ * This function marks and protects the beginning of a section that should not
+ * be entered after the device has been unplugged. The section end is marked
+ * with drm_dev_exit(). Calls to this function can be nested.
+ *
+ * Returns:
+ * True if it is OK to enter the section, false otherwise.
+ */
+bool drm_dev_enter(struct drm_device *dev, int *idx)
+{
+       *idx = srcu_read_lock(&drm_unplug_srcu);
+
+       if (dev->unplugged) {
+               srcu_read_unlock(&drm_unplug_srcu, *idx);
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(drm_dev_enter);
+
+/**
+ * drm_dev_exit - Exit device critical section
+ * @idx: index returned from drm_dev_enter()
+ *
+ * This function marks the end of a section that should not be entered after
+ * the device has been unplugged.
+ */
+void drm_dev_exit(int idx)
 {
-       smp_wmb();
-       atomic_set(&dev->unplugged, 1);
+       srcu_read_unlock(&drm_unplug_srcu, idx);
 }
+EXPORT_SYMBOL(drm_dev_exit);
 
 /**
  * drm_dev_unplug - unplug a DRM device
  * @dev: DRM device
  *
  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
- * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
+ * userspace operations. Entry-points can use drm_dev_enter() and
+ * drm_dev_exit() to protect device resources in a race free manner. This
  * essentially unregisters the device like drm_dev_unregister(), but can be
  * called while there are still open users of @dev.
  */
@@ -338,10 +374,18 @@ void drm_dev_unplug(struct drm_device *dev)
        drm_dev_unregister(dev);
 
        mutex_lock(&drm_global_mutex);
-       drm_device_set_unplugged(dev);
        if (dev->open_count == 0)
                drm_dev_put(dev);
        mutex_unlock(&drm_global_mutex);
+
+       /*
+        * After synchronizing any critical read section is guaranteed to see
+        * the new value of ->unplugged, and any critical section which might
+        * still have seen the old value of ->unplugged is guaranteed to have
+        * finished.
+        */
+       dev->unplugged = true;
+       synchronize_srcu(&drm_unplug_srcu);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 39f1db4..08d33b4 100644 (file)
@@ -4455,7 +4455,6 @@ drm_reset_display_info(struct drm_connector *connector)
 
        info->non_desktop = 0;
 }
-EXPORT_SYMBOL_GPL(drm_reset_display_info);
 
 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
 {
@@ -4533,7 +4532,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
                info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
        return quirks;
 }
-EXPORT_SYMBOL_GPL(drm_add_display_info);
 
 static int validate_displayid(u8 *displayid, int length, int idx)
 {
index ad67203..8c4d32a 100644 (file)
@@ -468,29 +468,31 @@ int drm_mode_getfb(struct drm_device *dev,
                goto out;
        }
 
+       if (!fb->funcs->create_handle) {
+               ret = -ENODEV;
+               goto out;
+       }
+
        r->height = fb->height;
        r->width = fb->width;
        r->depth = fb->format->depth;
        r->bpp = fb->format->cpp[0] * 8;
        r->pitch = fb->pitches[0];
-       if (fb->funcs->create_handle) {
-               if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
-                   drm_is_control_client(file_priv)) {
-                       ret = fb->funcs->create_handle(fb, file_priv,
-                                                      &r->handle);
-               } else {
-                       /* GET_FB() is an unprivileged ioctl so we must not
-                        * return a buffer-handle to non-master processes! For
-                        * backwards-compatibility reasons, we cannot make
-                        * GET_FB() privileged, so just return an invalid handle
-                        * for non-masters. */
-                       r->handle = 0;
-                       ret = 0;
-               }
-       } else {
-               ret = -ENODEV;
+
+       /* GET_FB() is an unprivileged ioctl so we must not return a
+        * buffer-handle to non-master processes! For
+        * backwards-compatibility reasons, we cannot make GET_FB() privileged,
+        * so just return an invalid handle for non-masters.
+        */
+       if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) &&
+           !drm_is_control_client(file_priv)) {
+               r->handle = 0;
+               ret = 0;
+               goto out;
        }
 
+       ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
+
 out:
        drm_framebuffer_put(fb);
 
index 4975ba9..4a16d7b 100644 (file)
@@ -436,9 +436,12 @@ err_unref:
  * @obj: object to register
  * @handlep: pionter to return the created handle to the caller
  *
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * Create a handle for this object. This adds a handle reference to the object,
+ * which includes a regular reference count. Callers will likely want to
+ * dereference the object afterwards.
+ *
+ * Since this publishes @obj to userspace it must be fully set up by this point,
+ * drivers must call this last in their buffer object creation callbacks.
  */
 int drm_gem_handle_create(struct drm_file *file_priv,
                          struct drm_gem_object *obj,
index 4d682a6..acfbc06 100644 (file)
@@ -22,6 +22,7 @@
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 /**
  * DOC: overview
@@ -266,6 +267,24 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
 EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
 
 /**
+ * drm_gem_fb_simple_display_pipe_prepare_fb - prepare_fb helper for
+ *     &drm_simple_display_pipe
+ * @pipe: Simple display pipe
+ * @plane_state: Plane state
+ *
+ * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has a
+ * &dma_buf attached, extracts the exclusive fence and attaches it to plane
+ * state for the atomic helper to wait on. Drivers can use this as their
+ * &drm_simple_display_pipe_funcs.prepare_fb callback.
+ */
+int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+                                             struct drm_plane_state *plane_state)
+{
+       return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);
+
+/**
  * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
  *                           emulation
  * @dev: DRM device
index d345563..50c73c0 100644 (file)
@@ -340,7 +340,7 @@ static void _drm_lease_revoke(struct drm_master *top)
                                break;
 
                        /* Over */
-                       master = list_entry(master->lessee_list.next, struct drm_master, lessee_list);
+                       master = list_next_entry(master, lessee_list);
                }
        }
 }
index 902cc1a..caebddd 100644 (file)
@@ -60,7 +60,7 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
        .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
-static const struct drm_dmi_panel_orientation_data vios_lth17 = {
+static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
        .width = 800,
        .height = 1280,
        .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
@@ -102,12 +102,30 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
                },
                .driver_data = (void *)&itworks_tw891,
+       }, {    /*
+                * Lenovo Ideapad Miix 310 laptop, only some production batches
+                * have a portrait screen, the resolution checks makes the quirk
+                * apply only to those batches.
+                */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80SG"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
+       }, {    /* Lenovo Ideapad Miix 320 */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* VIOS LTH17 */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
                },
-               .driver_data = (void *)&vios_lth17,
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        },
        {}
 };
index 6d2a6e4..0350544 100644 (file)
@@ -756,6 +756,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                                     struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_device *dev = crtc->dev;
+       struct drm_plane *plane = crtc->cursor;
        struct drm_framebuffer *fb = NULL;
        struct drm_mode_fb_cmd2 fbreq = {
                .width = req->width,
@@ -769,8 +770,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
        uint32_t src_w = 0, src_h = 0;
        int ret = 0;
 
-       BUG_ON(!crtc->cursor);
-       WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
+       BUG_ON(!plane);
+       WARN_ON(plane->crtc != crtc && plane->crtc != NULL);
 
        /*
         * Obtain fb we'll be using (either new or existing) and take an extra
@@ -784,13 +785,18 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                                DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
                                return PTR_ERR(fb);
                        }
+
                        fb->hot_x = req->hot_x;
                        fb->hot_y = req->hot_y;
                } else {
                        fb = NULL;
                }
        } else {
-               fb = crtc->cursor->fb;
+               if (plane->state)
+                       fb = plane->state->fb;
+               else
+                       fb = plane->fb;
+
                if (fb)
                        drm_framebuffer_get(fb);
        }
@@ -810,7 +816,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                src_h = fb->height << 16;
        }
 
-       ret = __setplane_internal(crtc->cursor, crtc, fb,
+       ret = __setplane_internal(plane, crtc, fb,
                                  crtc_x, crtc_y, crtc_w, crtc_h,
                                  0, 0, src_w, src_h, ctx);
 
@@ -931,7 +937,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 {
        struct drm_mode_crtc_page_flip_target *page_flip = data;
        struct drm_crtc *crtc;
-       struct drm_framebuffer *fb = NULL;
+       struct drm_plane *plane;
+       struct drm_framebuffer *fb = NULL, *old_fb;
        struct drm_pending_vblank_event *e = NULL;
        u32 target_vblank = page_flip->sequence;
        struct drm_modeset_acquire_ctx ctx;
@@ -959,6 +966,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        if (!crtc)
                return -ENOENT;
 
+       plane = crtc->primary;
+
        if (crtc->funcs->page_flip_target) {
                u32 current_vblank;
                int r;
@@ -1003,11 +1012,16 @@ retry:
        ret = drm_modeset_lock(&crtc->mutex, &ctx);
        if (ret)
                goto out;
-       ret = drm_modeset_lock(&crtc->primary->mutex, &ctx);
+       ret = drm_modeset_lock(&plane->mutex, &ctx);
        if (ret)
                goto out;
 
-       if (crtc->primary->fb == NULL) {
+       if (plane->state)
+               old_fb = plane->state->fb;
+       else
+               old_fb = plane->fb;
+
+       if (old_fb == NULL) {
                /* The framebuffer is currently unbound, presumably
                 * due to a hotplug event, that userspace has not
                 * yet discovered.
@@ -1022,8 +1036,8 @@ retry:
                goto out;
        }
 
-       if (crtc->state) {
-               const struct drm_plane_state *state = crtc->primary->state;
+       if (plane->state) {
+               const struct drm_plane_state *state = plane->state;
 
                ret = drm_framebuffer_check_src_coords(state->src_x,
                                                       state->src_y,
@@ -1031,12 +1045,13 @@ retry:
                                                       state->src_h,
                                                       fb);
        } else {
-               ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+               ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y,
+                                             &crtc->mode, fb);
        }
        if (ret)
                goto out;
 
-       if (crtc->primary->fb->format != fb->format) {
+       if (old_fb->format != fb->format) {
                DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
                ret = -EINVAL;
                goto out;
@@ -1048,10 +1063,12 @@ retry:
                        ret = -ENOMEM;
                        goto out;
                }
+
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
                e->event.base.length = sizeof(e->event);
                e->event.vbl.user_data = page_flip->user_data;
                e->event.vbl.crtc_id = crtc->base.id;
+
                ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
                if (ret) {
                        kfree(e);
@@ -1060,7 +1077,7 @@ retry:
                }
        }
 
-       crtc->primary->old_fb = crtc->primary->fb;
+       plane->old_fb = plane->fb;
        if (crtc->funcs->page_flip_target)
                ret = crtc->funcs->page_flip_target(crtc, fb, e,
                                                    page_flip->flags,
@@ -1073,19 +1090,18 @@ retry:
                if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
                        drm_event_cancel_free(dev, &e->base);
                /* Keep the old fb, don't unref it. */
-               crtc->primary->old_fb = NULL;
+               plane->old_fb = NULL;
        } else {
-               crtc->primary->fb = fb;
-               /* Unref only the old framebuffer. */
-               fb = NULL;
+               plane->fb = fb;
+               drm_framebuffer_get(fb);
        }
 
 out:
        if (fb)
                drm_framebuffer_put(fb);
-       if (crtc->primary->old_fb)
-               drm_framebuffer_put(crtc->primary->old_fb);
-       crtc->primary->old_fb = NULL;
+       if (plane->old_fb)
+               drm_framebuffer_put(plane->old_fb);
+       plane->old_fb = NULL;
 
        if (ret == -EDEADLK) {
                ret = drm_modeset_backoff(&ctx);
index 7856a9b..caf675e 100644 (file)
@@ -331,6 +331,9 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
 
 /**
  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ * @attach: attachment to unmap buffer from
+ * @sgt: scatterlist info of the buffer to unmap
+ * @dir: direction of DMA transfer
  *
  * Not implemented. The unmap is done at drm_gem_map_detach().  This can be
  * used as the &dma_buf_ops.unmap_dma_buf callback.
@@ -429,6 +432,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 
 /**
  * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @page_num: page number within the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
  */
@@ -441,6 +446,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
 
 /**
  * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @page_num: page number within the buffer
+ * @addr: virtual address of the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
  */
@@ -453,6 +461,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
 
 /**
  * drm_gem_dmabuf_kmap - map implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @page_num: page number within the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.map callback.
  */
@@ -464,6 +474,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
 
 /**
  * drm_gem_dmabuf_kunmap - unmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @page_num: page number within the buffer
+ * @addr: virtual address of the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
  */
index 657ea5a..870e25f 100644 (file)
@@ -141,7 +141,7 @@ bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
 
        ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
        if (ret < 0) {
-               DRM_ERROR("Failed to read scrambling status: %d\n", ret);
+               DRM_DEBUG_KMS("Failed to read scrambling status: %d\n", ret);
                return false;
        }
 
@@ -168,7 +168,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
 
        ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
        if (ret < 0) {
-               DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+               DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
                return false;
        }
 
@@ -179,7 +179,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
 
        ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
        if (ret < 0) {
-               DRM_ERROR("Failed to enable scrambling: %d\n", ret);
+               DRM_DEBUG_KMS("Failed to enable scrambling: %d\n", ret);
                return false;
        }
 
@@ -223,7 +223,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
 
        ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
        if (ret < 0) {
-               DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+               DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
                return false;
        }
 
@@ -234,7 +234,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
 
        ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
        if (ret < 0) {
-               DRM_ERROR("Failed to set TMDS clock ratio: %d\n", ret);
+               DRM_DEBUG_KMS("Failed to set TMDS clock ratio: %d\n", ret);
                return false;
        }
 
index 987a353..7a00455 100644 (file)
@@ -64,13 +64,15 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
 static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
                                       struct drm_crtc_state *old_state)
 {
+       struct drm_plane *plane;
        struct drm_simple_display_pipe *pipe;
 
        pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
        if (!pipe->funcs || !pipe->funcs->enable)
                return;
 
-       pipe->funcs->enable(pipe, crtc->state);
+       plane = &pipe->plane;
+       pipe->funcs->enable(pipe, crtc->state, plane->state);
 }
 
 static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
index 964831d..86330f3 100644 (file)
@@ -162,7 +162,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
        dp->drm_dev = drm_dev;
 
        dp->plat_data.dev_type = EXYNOS_DP;
-       dp->plat_data.power_on = exynos_dp_poweron;
+       dp->plat_data.power_on_start = exynos_dp_poweron;
        dp->plat_data.power_off = exynos_dp_poweroff;
        dp->plat_data.attach = exynos_dp_bridge_attach;
        dp->plat_data.get_modes = exynos_dp_get_modes;
index a518e9c..39284bb 100644 (file)
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
-int exynos_atomic_check(struct drm_device *dev,
-                       struct drm_atomic_state *state)
-{
-       int ret;
-
-       ret = drm_atomic_helper_check_modeset(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_normalize_zpos(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_helper_check_planes(dev, state);
-       if (ret)
-               return ret;
-
-       return ret;
-}
-
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_exynos_file_private *file_priv;
index df2262f..075957c 100644 (file)
@@ -275,7 +275,6 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
 
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
                         bool nonblock);
-int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
 
 
 extern struct platform_driver fimd_driver;
index f0e7917..7fcc1a7 100644 (file)
@@ -161,7 +161,7 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
        .fb_create = exynos_user_fb_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
-       .atomic_check = exynos_atomic_check,
+       .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
 
@@ -182,4 +182,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
        dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
 
        dev->mode_config.allow_fb_modifiers = true;
+
+       dev->mode_config.normalize_zpos = true;
 }
index b837e7a..cb5a14b 100644 (file)
@@ -64,7 +64,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
        REG_WRITE(reg, temp);
 }
 
-static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
index a4bb89b..5ea785f 100644 (file)
@@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
        msleep(intel_dp->backlight_off_delay);
 }
 
-static int
+static enum drm_mode_status
 cdv_intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
index 563f193..f087899 100644 (file)
@@ -223,7 +223,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        if (mode->clock > 165000)
index e64960d..de9531c 100644 (file)
@@ -244,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
 {
 }
 
-static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
                              struct drm_display_mode *mode)
 {
        struct drm_device *dev = connector->dev;
index acb3848..fe02092 100644 (file)
@@ -346,7 +346,7 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
                                                struct drm_display_mode *mode)
 {
        struct mdfld_dsi_connector *dsi_connector =
index 8b2eb32..78566a8 100644 (file)
@@ -509,7 +509,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
        HDMI_WRITE(HDMI_VIDEO_REG, temp);
 }
 
-static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
        if (mode->clock > 165000)
index e8e4ea1..e05e539 100644 (file)
@@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev,
 extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
                                      const struct drm_display_mode *mode,
                                      struct drm_display_mode *adjusted_mode);
-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
                                     struct drm_display_mode *mode);
 extern int psb_intel_lvds_set_property(struct drm_connector *connector,
                                        struct drm_property *property,
index be3eefe..8baf632 100644 (file)
@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
        }
 }
 
-int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        struct drm_psb_private *dev_priv = connector->dev->dev_private;
index 8450791..8dc2b19 100644 (file)
@@ -1157,7 +1157,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
        return;
 }
 
-static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
index f4eba87..d2f4749 100644 (file)
@@ -27,7 +27,7 @@ static int hibmc_connector_get_modes(struct drm_connector *connector)
        return drm_add_modes_noedid(connector, 800, 600);
 }
 
-static int hibmc_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
                                      struct drm_display_mode *mode)
 {
        return MODE_OK;
index 9e67a7b..421c8a7 100644 (file)
@@ -1106,7 +1106,7 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
        return n;
 }
 
-static int tda998x_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
                                        struct drm_display_mode *mode)
 {
        /* TDA19988 dotclock can go up to 165MHz */
index c0a8805..de0e223 100644 (file)
@@ -748,6 +748,11 @@ intel_crt_detect(struct drm_connector *connector,
                      connector->base.id, connector->name,
                      force);
 
+       if (i915_modparams.load_detect_test) {
+               intel_display_power_get(dev_priv, intel_encoder->power_domain);
+               goto load_detect;
+       }
+
        /* Skip machines without VGA that falsely report hotplug events */
        if (dmi_check_system(intel_spurious_crt_detect))
                return connector_status_disconnected;
@@ -776,11 +781,12 @@ intel_crt_detect(struct drm_connector *connector,
         * broken monitor (without edid) to work behind a broken kvm (that fails
         * to have the right resistors for HP detection) needs to fix this up.
         * For now just bail out. */
-       if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
+       if (I915_HAS_HOTPLUG(dev_priv)) {
                status = connector_status_disconnected;
                goto out;
        }
 
+load_detect:
        if (!force) {
                status = connector->status;
                goto out;
index 3b48fd2..182f9bf 100644 (file)
@@ -2824,7 +2824,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                        continue;
 
                if (intel_plane_ggtt_offset(state) == plane_config->base) {
-                       fb = c->primary->fb;
+                       fb = state->base.fb;
                        drm_framebuffer_get(fb);
                        goto valid_fb;
                }
@@ -9974,6 +9974,8 @@ found:
        ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
        if (!ret)
                ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+       if (!ret)
+               ret = drm_atomic_add_affected_planes(restore_state, crtc);
        if (ret) {
                DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
                goto fail;
index 6467a5c..6490ee1 100644 (file)
@@ -640,7 +640,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
                if (!crtc->state->active)
                        continue;
 
-               WARN(!crtc->primary->fb,
+               WARN(!crtc->primary->state->fb,
                     "re-used BIOS config but lost an fb on crtc %d\n",
                     crtc->base.id);
        }
index fb50a9d..8918539 100644 (file)
@@ -1586,7 +1586,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
 
 #define MODE_BANDWIDTH MODE_BAD
 
-static int mga_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
        struct drm_device *dev = connector->dev;
index 5cae8db..ffe5137 100644 (file)
@@ -99,7 +99,8 @@ static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
 };
 
 static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
-                             struct drm_crtc_state *crtc_state)
+                             struct drm_crtc_state *crtc_state,
+                             struct drm_plane_state *plane_state)
 {
        struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
 
@@ -125,12 +126,6 @@ static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
        mxsfb_plane_atomic_update(mxsfb, plane_state);
 }
 
-static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
-                                struct drm_plane_state *plane_state)
-{
-       return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static int mxsfb_pipe_enable_vblank(struct drm_simple_display_pipe *pipe)
 {
        struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
@@ -159,7 +154,7 @@ static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
        .enable         = mxsfb_pipe_enable,
        .disable        = mxsfb_pipe_disable,
        .update         = mxsfb_pipe_update,
-       .prepare_fb     = mxsfb_pipe_prepare_fb,
+       .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
        .enable_vblank  = mxsfb_pipe_enable_vblank,
        .disable_vblank = mxsfb_pipe_disable_vblank,
 };
index e4c8d31..81c3567 100644 (file)
@@ -134,7 +134,7 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
                               nvkm_volt_map(volt, volt->max2_id, clk->temp));
 
        for (cstate = start; &cstate->head != &pstate->list;
-            cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+            cstate = list_prev_entry(cstate, head)) {
                if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
                        break;
        }
index 3632854..ef3b0e3 100644 (file)
@@ -319,6 +319,9 @@ static int omap_modeset_init(struct drm_device *dev)
        dev->mode_config.max_width = 8192;
        dev->mode_config.max_height = 8192;
 
+       /* We want the zpos to be normalized */
+       dev->mode_config.normalize_zpos = true;
+
        dev->mode_config.funcs = &omap_mode_config_funcs;
        dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
 
index 2899435..161233c 100644 (file)
@@ -65,7 +65,7 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
        info.rotation_type = OMAP_DSS_ROT_NONE;
        info.rotation = DRM_MODE_ROTATE_0;
        info.global_alpha = 0xff;
-       info.zorder = state->zpos;
+       info.zorder = state->normalized_zpos;
 
        /* update scanout: */
        omap_framebuffer_update_scanout(state->fb, state, &info);
index 3106464..19b0d00 100644 (file)
@@ -120,7 +120,8 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe,
 }
 
 static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
-                                struct drm_crtc_state *cstate)
+                                struct drm_crtc_state *cstate,
+                                struct drm_plane_state *plane_state)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_plane *plane = &pipe->plane;
@@ -376,19 +377,13 @@ static void pl111_display_disable_vblank(struct drm_simple_display_pipe *pipe)
        writel(0, priv->regs + priv->ienb);
 }
 
-static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
-                                   struct drm_plane_state *plane_state)
-{
-       return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static struct drm_simple_display_pipe_funcs pl111_display_funcs = {
        .mode_valid = pl111_mode_valid,
        .check = pl111_display_check,
        .enable = pl111_display_enable,
        .disable = pl111_display_disable,
        .update = pl111_display_update,
-       .prepare_fb = pl111_display_prepare_fb,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
index ecb35ed..820cbca 100644 (file)
@@ -1037,7 +1037,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static int qxl_conn_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
                               struct drm_display_mode *mode)
 {
        struct drm_device *ddev = connector->dev;
index 5c7ec15..131d8e8 100644 (file)
@@ -87,7 +87,6 @@ struct rcar_du_device {
        struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
 
        struct {
-               struct drm_property *alpha;
                struct drm_property *colorkey;
        } props;
 
index 0329b35..f4ac0f8 100644 (file)
@@ -233,15 +233,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
        struct rcar_du_device *rcdu = dev->dev_private;
        int ret;
 
-       ret = drm_atomic_helper_check_modeset(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_normalize_zpos(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_helper_check_planes(dev, state);
+       ret = drm_atomic_helper_check(dev, state);
        if (ret)
                return ret;
 
@@ -415,11 +407,6 @@ static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
 
 static int rcar_du_properties_init(struct rcar_du_device *rcdu)
 {
-       rcdu->props.alpha =
-               drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
-       if (rcdu->props.alpha == NULL)
-               return -ENOMEM;
-
        /*
         * The color key is expressed as an RGB888 triplet stored in a 32-bit
         * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
@@ -529,6 +516,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
        dev->mode_config.min_height = 0;
        dev->mode_config.max_width = 4095;
        dev->mode_config.max_height = 2047;
+       dev->mode_config.normalize_zpos = true;
        dev->mode_config.funcs = &rcar_du_mode_config_funcs;
        dev->mode_config.helper_private = &rcar_du_mode_config_helper;
 
index 68556bd..c20f7ed 100644 (file)
@@ -423,7 +423,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
                rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
        else
                rcar_du_plane_write(rgrp, index, PnALPHAR,
-                                   PnALPHAR_ABIT_X | state->alpha);
+                                   PnALPHAR_ABIT_X | state->state.alpha >> 8);
 
        pnmr = PnMR_BM_MD | state->format->pnmr;
 
@@ -692,11 +692,11 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
 
        state->hwindex = -1;
        state->source = RCAR_DU_PLANE_MEMORY;
-       state->alpha = 255;
        state->colorkey = RCAR_DU_COLORKEY_NONE;
        state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
 
        plane->state = &state->state;
+       plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
        plane->state->plane = plane;
 }
 
@@ -708,9 +708,7 @@ static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
        struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
        struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
 
-       if (property == rcdu->props.alpha)
-               rstate->alpha = val;
-       else if (property == rcdu->props.colorkey)
+       if (property == rcdu->props.colorkey)
                rstate->colorkey = val;
        else
                return -EINVAL;
@@ -726,9 +724,7 @@ static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
                container_of(state, const struct rcar_du_plane_state, state);
        struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
 
-       if (property == rcdu->props.alpha)
-               *val = rstate->alpha;
-       else if (property == rcdu->props.colorkey)
+       if (property == rcdu->props.colorkey)
                *val = rstate->colorkey;
        else
                return -EINVAL;
@@ -797,10 +793,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
                        continue;
 
                drm_object_attach_property(&plane->plane.base,
-                                          rcdu->props.alpha, 255);
-               drm_object_attach_property(&plane->plane.base,
                                           rcdu->props.colorkey,
                                           RCAR_DU_COLORKEY_NONE);
+               drm_plane_create_alpha_property(&plane->plane);
                drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
        }
 
index 890321b..5c19c69 100644 (file)
@@ -50,7 +50,6 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
  * @state: base DRM plane state
  * @format: information about the pixel format used by the plane
  * @hwindex: 0-based hardware plane index, -1 means unused
- * @alpha: value of the plane alpha property
  * @colorkey: value of the plane colorkey property
  */
 struct rcar_du_plane_state {
@@ -60,7 +59,6 @@ struct rcar_du_plane_state {
        int hwindex;
        enum rcar_du_plane_source source;
 
-       unsigned int alpha;
        unsigned int colorkey;
 };
 
index 2c260c3..b3bec01 100644 (file)
@@ -54,6 +54,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
        };
        struct rcar_du_plane_state state = {
                .state = {
+                       .alpha = DRM_BLEND_ALPHA_OPAQUE,
                        .crtc = &crtc->crtc,
                        .dst.x1 = 0,
                        .dst.y1 = 0,
@@ -67,7 +68,6 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
                },
                .format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
                .source = RCAR_DU_PLANE_VSPD1,
-               .alpha = 255,
                .colorkey = 0,
        };
 
@@ -173,7 +173,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
        struct vsp1_du_atomic_config cfg = {
                .pixelformat = 0,
                .pitch = fb->pitches[0],
-               .alpha = state->alpha,
+               .alpha = state->state.alpha >> 8,
                .zpos = state->state.zpos,
        };
        unsigned int i;
@@ -335,44 +335,13 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
        if (state == NULL)
                return;
 
-       state->alpha = 255;
+       state->state.alpha = DRM_BLEND_ALPHA_OPAQUE;
        state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
 
        plane->state = &state->state;
        plane->state->plane = plane;
 }
 
-static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane,
-       struct drm_plane_state *state, struct drm_property *property,
-       uint64_t val)
-{
-       struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
-       struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
-
-       if (property == rcdu->props.alpha)
-               rstate->alpha = val;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane,
-       const struct drm_plane_state *state, struct drm_property *property,
-       uint64_t *val)
-{
-       const struct rcar_du_vsp_plane_state *rstate =
-               container_of(state, const struct rcar_du_vsp_plane_state, state);
-       struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
-
-       if (property == rcdu->props.alpha)
-               *val = rstate->alpha;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
 static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
@@ -380,8 +349,6 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
        .destroy = drm_plane_cleanup,
        .atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
        .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
-       .atomic_set_property = rcar_du_vsp_plane_atomic_set_property,
-       .atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
 };
 
 int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
@@ -438,8 +405,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
                if (type == DRM_PLANE_TYPE_PRIMARY)
                        continue;
 
-               drm_object_attach_property(&plane->plane.base,
-                                          rcdu->props.alpha, 255);
+               drm_plane_create_alpha_property(&plane->plane);
                drm_plane_create_zpos_property(&plane->plane, 1, 1,
                                               vsp->num_planes - 1);
        }
index 4c5d7bb..8a8a25c 100644 (file)
@@ -44,15 +44,12 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
  * @state: base DRM plane state
  * @format: information about the pixel format used by the plane
  * @sg_tables: scatter-gather tables for the frame buffer memory
- * @alpha: value of the plane alpha property
  */
 struct rcar_du_vsp_plane_state {
        struct drm_plane_state state;
 
        const struct rcar_du_format_info *format;
        struct sg_table sg_tables[3];
-
-       unsigned int alpha;
 };
 
 static inline struct rcar_du_vsp_plane_state *
index 3e8bf79..080f053 100644 (file)
@@ -77,13 +77,13 @@ struct rockchip_dp_device {
        struct analogix_dp_plat_data plat_data;
 };
 
-static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
+static int analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
 {
        struct rockchip_dp_device *dp = to_dp(encoder);
        int ret;
 
        if (!analogix_dp_psr_enabled(dp->adp))
-               return;
+               return 0;
 
        DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
 
@@ -91,13 +91,13 @@ static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
                                         PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
        if (ret) {
                DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
-               return;
+               return -ETIMEDOUT;
        }
 
        if (enabled)
-               analogix_dp_enable_psr(dp->adp);
+               return analogix_dp_enable_psr(dp->adp);
        else
-               analogix_dp_disable_psr(dp->adp);
+               return analogix_dp_disable_psr(dp->adp);
 }
 
 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
@@ -109,7 +109,7 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
        return 0;
 }
 
-static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
+static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
 {
        struct rockchip_dp_device *dp = to_dp(plat_data);
        int ret;
@@ -127,7 +127,14 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
                return ret;
        }
 
-       return rockchip_drm_psr_activate(&dp->encoder);
+       return ret;
+}
+
+static int rockchip_dp_poweron_end(struct analogix_dp_plat_data *plat_data)
+{
+       struct rockchip_dp_device *dp = to_dp(plat_data);
+
+       return rockchip_drm_psr_inhibit_put(&dp->encoder);
 }
 
 static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
@@ -135,7 +142,7 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
        struct rockchip_dp_device *dp = to_dp(plat_data);
        int ret;
 
-       ret = rockchip_drm_psr_deactivate(&dp->encoder);
+       ret = rockchip_drm_psr_inhibit_get(&dp->encoder);
        if (ret != 0)
                return ret;
 
@@ -218,6 +225,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+       struct drm_display_info *di = &conn_state->connector->display_info;
 
        /*
         * The hardware IC designed that VOP must output the RGB10 video
@@ -229,6 +237,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
+       s->output_bpc = di->bpc;
 
        return 0;
 }
@@ -328,7 +337,8 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
        dp->plat_data.encoder = &dp->encoder;
 
        dp->plat_data.dev_type = dp->data->chip_type;
-       dp->plat_data.power_on = rockchip_dp_poweron;
+       dp->plat_data.power_on_start = rockchip_dp_poweron_start;
+       dp->plat_data.power_on_end = rockchip_dp_poweron_end;
        dp->plat_data.power_off = rockchip_dp_powerdown;
        dp->plat_data.get_modes = rockchip_dp_get_modes;
 
@@ -358,6 +368,8 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
        analogix_dp_unbind(dp->adp);
        rockchip_drm_psr_unregister(&dp->encoder);
        dp->encoder.funcs->destroy(&dp->encoder);
+
+       dp->adp = ERR_PTR(-ENODEV);
 }
 
 static const struct component_ops rockchip_dp_component_ops = {
@@ -381,6 +393,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        dp->dev = dev;
+       dp->adp = ERR_PTR(-ENODEV);
        dp->plat_data.panel = panel;
 
        ret = rockchip_dp_of_probe(dp);
@@ -404,6 +417,9 @@ static int rockchip_dp_suspend(struct device *dev)
 {
        struct rockchip_dp_device *dp = dev_get_drvdata(dev);
 
+       if (IS_ERR(dp->adp))
+               return 0;
+
        return analogix_dp_suspend(dp->adp);
 }
 
@@ -411,6 +427,9 @@ static int rockchip_dp_resume(struct device *dev)
 {
        struct rockchip_dp_device *dp = dev_get_drvdata(dev);
 
+       if (IS_ERR(dp->adp))
+               return 0;
+
        return analogix_dp_resume(dp->adp);
 }
 #endif
index 9c064a4..3a6ebfc 100644 (file)
@@ -36,6 +36,7 @@ struct rockchip_crtc_state {
        struct drm_crtc_state base;
        int output_type;
        int output_mode;
+       int output_bpc;
 };
 #define to_rockchip_crtc_state(s) \
                container_of(s, struct rockchip_crtc_state, base)
index e266539..d4f4118 100644 (file)
@@ -167,8 +167,67 @@ err_gem_object_unreference:
        return ERR_PTR(ret);
 }
 
+static void
+rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_encoder *encoder;
+       u32 encoder_mask = 0;
+       int i;
+
+       for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+               encoder_mask |= crtc_state->encoder_mask;
+               encoder_mask |= crtc->state->encoder_mask;
+       }
+
+       drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+               rockchip_drm_psr_inhibit_get(encoder);
+}
+
+static void
+rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_encoder *encoder;
+       u32 encoder_mask = 0;
+       int i;
+
+       for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+               encoder_mask |= crtc_state->encoder_mask;
+               encoder_mask |= crtc->state->encoder_mask;
+       }
+
+       drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+               rockchip_drm_psr_inhibit_put(encoder);
+}
+
+static void
+rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+{
+       struct drm_device *dev = old_state->dev;
+
+       rockchip_drm_psr_inhibit_get_state(old_state);
+
+       drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+       drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+       drm_atomic_helper_commit_planes(dev, old_state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+       rockchip_drm_psr_inhibit_put_state(old_state);
+
+       drm_atomic_helper_commit_hw_done(old_state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+       drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
 static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
-       .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+       .atomic_commit_tail = rockchip_atomic_helper_commit_tail_rpm,
 };
 
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
index 074db7a..a8db758 100644 (file)
@@ -357,8 +357,8 @@ err_free_rk_obj:
 }
 
 /*
- * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
- * function
+ * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
+ * callback function
  */
 void rockchip_gem_free_object(struct drm_gem_object *obj)
 {
index b339ca9..79d00d8 100644 (file)
 
 #define PSR_FLUSH_TIMEOUT_MS   100
 
-enum psr_state {
-       PSR_FLUSH,
-       PSR_ENABLE,
-       PSR_DISABLE,
-};
-
 struct psr_drv {
        struct list_head        list;
        struct drm_encoder      *encoder;
 
        struct mutex            lock;
-       bool                    active;
-       enum psr_state          state;
+       int                     inhibit_count;
+       bool                    enabled;
 
        struct delayed_work     flush_work;
 
-       void (*set)(struct drm_encoder *encoder, bool enable);
+       int (*set)(struct drm_encoder *encoder, bool enable);
 };
 
-static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
-{
-       struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
-       struct psr_drv *psr;
-
-       mutex_lock(&drm_drv->psr_list_lock);
-       list_for_each_entry(psr, &drm_drv->psr_list, list) {
-               if (psr->encoder->crtc == crtc)
-                       goto out;
-       }
-       psr = ERR_PTR(-ENODEV);
-
-out:
-       mutex_unlock(&drm_drv->psr_list_lock);
-       return psr;
-}
-
 static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder)
 {
        struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
@@ -73,46 +50,22 @@ out:
        return psr;
 }
 
-static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
+static int psr_set_state_locked(struct psr_drv *psr, bool enable)
 {
-       /*
-        * Allowed finite state machine:
-        *
-        *   PSR_ENABLE  < = = = = = >  PSR_FLUSH
-        *       | ^                        |
-        *       | |                        |
-        *       v |                        |
-        *   PSR_DISABLE < - - - - - - - - -
-        */
-       if (state == psr->state || !psr->active)
-               return;
-
-       /* Already disabled in flush, change the state, but not the hardware */
-       if (state == PSR_DISABLE && psr->state == PSR_FLUSH) {
-               psr->state = state;
-               return;
-       }
+       int ret;
 
-       psr->state = state;
+       if (psr->inhibit_count > 0)
+               return -EINVAL;
 
-       /* Actually commit the state change to hardware */
-       switch (psr->state) {
-       case PSR_ENABLE:
-               psr->set(psr->encoder, true);
-               break;
+       if (enable == psr->enabled)
+               return 0;
 
-       case PSR_DISABLE:
-       case PSR_FLUSH:
-               psr->set(psr->encoder, false);
-               break;
-       }
-}
+       ret = psr->set(psr->encoder, enable);
+       if (ret)
+               return ret;
 
-static void psr_set_state(struct psr_drv *psr, enum psr_state state)
-{
-       mutex_lock(&psr->lock);
-       psr_set_state_locked(psr, state);
-       mutex_unlock(&psr->lock);
+       psr->enabled = enable;
+       return 0;
 }
 
 static void psr_flush_handler(struct work_struct *work)
@@ -120,21 +73,24 @@ static void psr_flush_handler(struct work_struct *work)
        struct psr_drv *psr = container_of(to_delayed_work(work),
                                           struct psr_drv, flush_work);
 
-       /* If the state has changed since we initiated the flush, do nothing */
        mutex_lock(&psr->lock);
-       if (psr->state == PSR_FLUSH)
-               psr_set_state_locked(psr, PSR_ENABLE);
+       psr_set_state_locked(psr, true);
        mutex_unlock(&psr->lock);
 }
 
 /**
- * rockchip_drm_psr_activate - activate PSR on the given pipe
+ * rockchip_drm_psr_inhibit_put - release PSR inhibit on given encoder
  * @encoder: encoder to obtain the PSR encoder
  *
+ * Decrements PSR inhibit count on given encoder. Should be called only
+ * for a PSR inhibit count increment done before. If PSR inhibit counter
+ * reaches zero, PSR flush work is scheduled to make the hardware enter
+ * PSR mode in PSR_FLUSH_TIMEOUT_MS.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int rockchip_drm_psr_activate(struct drm_encoder *encoder)
+int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder)
 {
        struct psr_drv *psr = find_psr_by_encoder(encoder);
 
@@ -142,21 +98,30 @@ int rockchip_drm_psr_activate(struct drm_encoder *encoder)
                return PTR_ERR(psr);
 
        mutex_lock(&psr->lock);
-       psr->active = true;
+       --psr->inhibit_count;
+       WARN_ON(psr->inhibit_count < 0);
+       if (!psr->inhibit_count)
+               mod_delayed_work(system_wq, &psr->flush_work,
+                                PSR_FLUSH_TIMEOUT_MS);
        mutex_unlock(&psr->lock);
 
        return 0;
 }
-EXPORT_SYMBOL(rockchip_drm_psr_activate);
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
 
 /**
- * rockchip_drm_psr_deactivate - deactivate PSR on the given pipe
+ * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
  * @encoder: encoder to obtain the PSR encoder
  *
+ * Increments PSR inhibit count on given encoder. This function guarantees
+ * that after it returns PSR is turned off on given encoder and no PSR-related
+ * hardware state change occurs at least until a matching call to
+ * rockchip_drm_psr_inhibit_put() is done.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
+int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder)
 {
        struct psr_drv *psr = find_psr_by_encoder(encoder);
 
@@ -164,37 +129,25 @@ int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
                return PTR_ERR(psr);
 
        mutex_lock(&psr->lock);
-       psr->active = false;
+       psr_set_state_locked(psr, false);
+       ++psr->inhibit_count;
        mutex_unlock(&psr->lock);
        cancel_delayed_work_sync(&psr->flush_work);
 
        return 0;
 }
-EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get);
 
 static void rockchip_drm_do_flush(struct psr_drv *psr)
 {
-       psr_set_state(psr, PSR_FLUSH);
-       mod_delayed_work(system_wq, &psr->flush_work, PSR_FLUSH_TIMEOUT_MS);
-}
-
-/**
- * rockchip_drm_psr_flush - flush a single pipe
- * @crtc: CRTC of the pipe to flush
- *
- * Returns:
- * 0 on success, -errno on fail
- */
-int rockchip_drm_psr_flush(struct drm_crtc *crtc)
-{
-       struct psr_drv *psr = find_psr_by_crtc(crtc);
-       if (IS_ERR(psr))
-               return PTR_ERR(psr);
+       cancel_delayed_work_sync(&psr->flush_work);
 
-       rockchip_drm_do_flush(psr);
-       return 0;
+       mutex_lock(&psr->lock);
+       if (!psr_set_state_locked(psr, false))
+               mod_delayed_work(system_wq, &psr->flush_work,
+                                PSR_FLUSH_TIMEOUT_MS);
+       mutex_unlock(&psr->lock);
 }
-EXPORT_SYMBOL(rockchip_drm_psr_flush);
 
 /**
  * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
@@ -225,11 +178,16 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
  * @encoder: encoder that obtain the PSR function
  * @psr_set: call back to set PSR state
  *
+ * The function returns with PSR inhibit counter initialized with one
+ * and the caller (typically encoder driver) needs to call
+ * rockchip_drm_psr_inhibit_put() when it becomes ready to accept PSR
+ * enable request.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
-                       void (*psr_set)(struct drm_encoder *, bool enable))
+                       int (*psr_set)(struct drm_encoder *, bool enable))
 {
        struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
        struct psr_drv *psr;
@@ -244,8 +202,8 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
        INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
        mutex_init(&psr->lock);
 
-       psr->active = true;
-       psr->state = PSR_DISABLE;
+       psr->inhibit_count = 1;
+       psr->enabled = false;
        psr->encoder = encoder;
        psr->set = psr_set;
 
@@ -262,6 +220,11 @@ EXPORT_SYMBOL(rockchip_drm_psr_register);
  * @encoder: encoder that obtain the PSR function
  * @psr_set: call back to set PSR state
  *
+ * It is expected that the PSR inhibit counter is 1 when this function is
+ * called, which corresponds to a state when related encoder has been
+ * disconnected from any CRTCs and its driver called
+ * rockchip_drm_psr_inhibit_get() to stop the PSR logic.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
@@ -273,7 +236,12 @@ void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
        mutex_lock(&drm_drv->psr_list_lock);
        list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
                if (psr->encoder == encoder) {
-                       cancel_delayed_work_sync(&psr->flush_work);
+                       /*
+                        * Any other value would mean that the encoder
+                        * is still in use.
+                        */
+                       WARN_ON(psr->inhibit_count != 1);
+
                        list_del(&psr->list);
                        kfree(psr);
                }
index b1ea015..860c624 100644 (file)
 #define __ROCKCHIP_DRM_PSR___
 
 void rockchip_drm_psr_flush_all(struct drm_device *dev);
-int rockchip_drm_psr_flush(struct drm_crtc *crtc);
 
-int rockchip_drm_psr_activate(struct drm_encoder *encoder);
-int rockchip_drm_psr_deactivate(struct drm_encoder *encoder);
+int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
+int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
 
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
-                       void (*psr_set)(struct drm_encoder *, bool enable));
+                       int (*psr_set)(struct drm_encoder *, bool enable));
 void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
 
 #endif /* __ROCKCHIP_DRM_PSR__ */
index 53d4afe..fe3faa7 100644 (file)
@@ -925,6 +925,12 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
        if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
            !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
                s->output_mode = ROCKCHIP_OUT_MODE_P888;
+
+       if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
+               VOP_REG_SET(vop, common, pre_dither_down, 1);
+       else
+               VOP_REG_SET(vop, common, pre_dither_down, 0);
+
        VOP_REG_SET(vop, common, out_mode, s->output_mode);
 
        VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
@@ -1017,22 +1023,15 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
                        continue;
 
                drm_framebuffer_get(old_plane_state->fb);
+               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
                drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
                set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
-               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
        }
 }
 
-static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
-                                 struct drm_crtc_state *old_crtc_state)
-{
-       rockchip_drm_psr_flush(crtc);
-}
-
 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
        .mode_fixup = vop_crtc_mode_fixup,
        .atomic_flush = vop_crtc_atomic_flush,
-       .atomic_begin = vop_crtc_atomic_begin,
        .atomic_enable = vop_crtc_atomic_enable,
        .atomic_disable = vop_crtc_atomic_disable,
 };
index 56bbd2e..084acdd 100644 (file)
@@ -67,6 +67,7 @@ struct vop_common {
        struct vop_reg cfg_done;
        struct vop_reg dsp_blank;
        struct vop_reg data_blank;
+       struct vop_reg pre_dither_down;
        struct vop_reg dither_down;
        struct vop_reg dither_up;
        struct vop_reg gate_en;
index 2e4eea3..08023d3 100644 (file)
@@ -264,6 +264,7 @@ static const struct vop_common rk3288_common = {
        .standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
        .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
        .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
+       .pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
        .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
        .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
        .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
index cca4b3c..1963cc1 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_STI
        tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
-       depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
+       depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
        select RESET_CONTROLLER
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
@@ -8,6 +8,5 @@ config DRM_STI
        select DRM_PANEL
        select FW_LOADER
        select SND_SOC_HDMI_CODEC if SND_SOC
-       select OF
        help
          Choose this option to enable DRM on STM stiH4xx chipset
index 55b6967..90c46b4 100644 (file)
@@ -119,30 +119,10 @@ err:
        return ret;
 }
 
-static int sti_atomic_check(struct drm_device *dev,
-                           struct drm_atomic_state *state)
-{
-       int ret;
-
-       ret = drm_atomic_helper_check_modeset(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_normalize_zpos(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_helper_check_planes(dev, state);
-       if (ret)
-               return ret;
-
-       return ret;
-}
-
 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
        .fb_create = drm_gem_fb_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
-       .atomic_check = sti_atomic_check,
+       .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
 
@@ -160,6 +140,8 @@ static void sti_mode_config_init(struct drm_device *dev)
        dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
 
        dev->mode_config.funcs = &sti_mode_config_funcs;
+
+       dev->mode_config.normalize_zpos = true;
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
index b074609..b48cd86 100644 (file)
@@ -40,6 +40,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
                          bool new_frame,
                          bool new_field)
 {
+       struct drm_plane_state *state = plane->drm_plane.state;
        ktime_t now;
        struct sti_fps_info *fps;
        int fpks, fipks, ms_since_last, num_frames, num_fields;
@@ -66,14 +67,14 @@ void sti_plane_update_fps(struct sti_plane *plane,
        fps->last_timestamp = now;
        fps->last_frame_counter = fps->curr_frame_counter;
 
-       if (plane->drm_plane.fb) {
+       if (state->fb) {
                fpks = (num_frames * 1000000) / ms_since_last;
                snprintf(plane->fps_info.fps_str, FPS_LENGTH,
                         "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)",
                         plane->drm_plane.name,
-                        plane->drm_plane.fb->width,
-                        plane->drm_plane.fb->height,
-                        (char *)&plane->drm_plane.fb->format->format,
+                        state->fb->width,
+                        state->fb->height,
+                        (char *)&state->fb->format->format,
                         fpks / 1000, fpks % 1000,
                         sti_plane_to_str(plane));
        }
index 9ab00a8..8698e08 100644 (file)
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
        .gem_prime_vmap = drm_gem_cma_prime_vmap,
        .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
        .gem_prime_mmap = drm_gem_cma_prime_mmap,
-       .enable_vblank = ltdc_crtc_enable_vblank,
-       .disable_vblank = ltdc_crtc_disable_vblank,
 };
 
 static int drv_load(struct drm_device *ddev)
index 1a3277e..e3121d9 100644 (file)
@@ -392,9 +392,6 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
        u32 val;
        int i;
 
-       if (!crtc || !crtc->state)
-               return;
-
        if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
                return;
 
@@ -569,9 +566,9 @@ static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
        .atomic_disable = ltdc_crtc_atomic_disable,
 };
 
-int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe)
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
 {
-       struct ltdc_device *ldev = ddev->dev_private;
+       struct ltdc_device *ldev = crtc_to_ltdc(crtc);
 
        DRM_DEBUG_DRIVER("\n");
        reg_set(ldev->regs, LTDC_IER, IER_LIE);
@@ -579,9 +576,9 @@ int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe)
        return 0;
 }
 
-void ltdc_crtc_disable_vblank(struct drm_device *ddev, unsigned int pipe)
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
 {
-       struct ltdc_device *ldev = ddev->dev_private;
+       struct ltdc_device *ldev = crtc_to_ltdc(crtc);
 
        DRM_DEBUG_DRIVER("\n");
        reg_clear(ldev->regs, LTDC_IER, IER_LIE);
@@ -594,6 +591,8 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
        .reset = drm_atomic_helper_crtc_reset,
        .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+       .enable_vblank = ltdc_crtc_enable_vblank,
+       .disable_vblank = ltdc_crtc_disable_vblank,
        .gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
@@ -727,6 +726,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
        reg_update_bits(ldev->regs, LTDC_L1CR + lofs,
                        LXCR_LEN | LXCR_CLUTEN, val);
 
+       ldev->plane_fpsi[plane->index].counter++;
+
        mutex_lock(&ldev->err_lock);
        if (ldev->error_status & ISR_FUIF) {
                DRM_DEBUG_DRIVER("Fifo underrun\n");
@@ -752,6 +753,25 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
                         oldstate->crtc->base.id, plane->base.id);
 }
 
+static void ltdc_plane_atomic_print_state(struct drm_printer *p,
+                                         const struct drm_plane_state *state)
+{
+       struct drm_plane *plane = state->plane;
+       struct ltdc_device *ldev = plane_to_ltdc(plane);
+       struct fps_info *fpsi = &ldev->plane_fpsi[plane->index];
+       int ms_since_last;
+       ktime_t now;
+
+       now = ktime_get();
+       ms_since_last = ktime_to_ms(ktime_sub(now, fpsi->last_timestamp));
+
+       drm_printf(p, "\tuser_updates=%dfps\n",
+                  DIV_ROUND_CLOSEST(fpsi->counter * 1000, ms_since_last));
+
+       fpsi->last_timestamp = now;
+       fpsi->counter = 0;
+}
+
 static const struct drm_plane_funcs ltdc_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
@@ -759,6 +779,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
        .reset = drm_atomic_helper_plane_reset,
        .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .atomic_print_state = ltdc_plane_atomic_print_state,
 };
 
 static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
index edb2681..1e16d6a 100644 (file)
@@ -20,6 +20,13 @@ struct ltdc_caps {
        bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
 };
 
+#define LTDC_MAX_LAYER 4
+
+struct fps_info {
+       unsigned int counter;
+       ktime_t last_timestamp;
+};
+
 struct ltdc_device {
        void __iomem *regs;
        struct clk *pixel_clk;  /* lcd pixel clock */
@@ -27,10 +34,9 @@ struct ltdc_device {
        struct ltdc_caps caps;
        u32 error_status;
        u32 irq_status;
+       struct fps_info plane_fpsi[LTDC_MAX_LAYER];
 };
 
-int ltdc_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void ltdc_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
 int ltdc_load(struct drm_device *ddev);
 void ltdc_unload(struct drm_device *ddev);
 
index eee6bc0..156a865 100644 (file)
@@ -40,6 +40,16 @@ config DRM_SUN4I_BACKEND
          do some alpha blending and feed graphics to TCON. If M is
          selected the module will be called sun4i-backend.
 
+config DRM_SUN6I_DSI
+       tristate "Allwinner A31 MIPI-DSI Controller Support"
+       default MACH_SUN8I
+       select CRC_CCITT
+       select DRM_MIPI_DSI
+       help
+         Choose this option if you want have an Allwinner SoC with
+         MIPI-DSI support. If M is selected the module will be called
+         sun6i-dsi
+
 config DRM_SUN8I_DW_HDMI
        tristate "Support for Allwinner version of DesignWare HDMI"
        depends on DRM_SUN4I
index 330843c..2589f4a 100644 (file)
@@ -24,6 +24,9 @@ sun4i-tcon-y                  += sun4i_lvds.o
 sun4i-tcon-y                   += sun4i_tcon.o
 sun4i-tcon-y                   += sun4i_rgb.o
 
+sun6i-dsi-y                    += sun6i_mipi_dphy.o
+sun6i-dsi-y                    += sun6i_mipi_dsi.o
+
 obj-$(CONFIG_DRM_SUN4I)                += sun4i-drm.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i_tv.o
@@ -31,5 +34,6 @@ obj-$(CONFIG_DRM_SUN4I)               += sun6i_drc.o
 
 obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o sun4i-frontend.o
 obj-$(CONFIG_DRM_SUN4I_HDMI)   += sun4i-drm-hdmi.o
+obj-$(CONFIG_DRM_SUN6I_DSI)    += sun6i-dsi.o
 obj-$(CONFIG_DRM_SUN8I_DW_HDMI)        += sun8i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN8I_MIXER)  += sun8i-mixer.o
index 9bad54f..de0a76d 100644 (file)
@@ -295,6 +295,15 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
        DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
                         interlaced ? "on" : "off");
 
+       val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
+       if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
+               val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
+       regmap_update_bits(backend->engine.regs,
+                          SUN4I_BACKEND_ATTCTL_REG0(layer),
+                          SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
+                          SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
+                          val);
+
        if (sun4i_backend_format_is_yuv(fb->format->format))
                return sun4i_backend_update_yuv_format(backend, layer, plane);
 
@@ -490,7 +499,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
                DRM_DEBUG_DRIVER("Plane FB format is %s\n",
                                 drm_get_format_name(fb->format->format,
                                                     &format_name));
-               if (fb->format->has_alpha)
+               if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
                        num_alpha_planes++;
 
                if (sun4i_backend_format_is_yuv(fb->format->format)) {
@@ -548,7 +557,8 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
        }
 
        /* We can't have an alpha plane at the lowest position */
-       if (plane_states[0]->fb->format->has_alpha)
+       if (plane_states[0]->fb->format->has_alpha ||
+           (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
                return -EINVAL;
 
        for (i = 1; i < num_planes; i++) {
@@ -560,7 +570,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
                 * The only alpha position is the lowest plane of the
                 * second pipe.
                 */
-               if (fb->format->has_alpha)
+               if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
                        current_pipe++;
 
                s_state->pipe = current_pipe;
index 316f217..4caee03 100644 (file)
 #define SUN4I_BACKEND_CKMIN_REG                        0x884
 #define SUN4I_BACKEND_CKCFG_REG                        0x888
 #define SUN4I_BACKEND_ATTCTL_REG0(l)           (0x890 + (0x4 * (l)))
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK    GENMASK(31, 24)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(x)              ((x) << 24)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK     BIT(15)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x)               ((x) << 15)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK      GENMASK(11, 10)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x)                        ((x) << 10)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN            BIT(2)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN            BIT(1)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN      BIT(0)
 
 #define SUN4I_BACKEND_ATTCTL_REG1(l)           (0x8a0 + (0x4 * (l)))
 #define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT          GENMASK(15, 14)
index 2949a3c..750ad24 100644 (file)
@@ -37,6 +37,7 @@ static void sun4i_backend_layer_reset(struct drm_plane *plane)
        if (state) {
                plane->state = &state->state;
                plane->state->plane = plane;
+               plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
                plane->state->zpos = layer->id;
        }
 }
@@ -167,6 +168,7 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
                             &sun4i_backend_layer_helper_funcs);
        layer->backend = backend;
 
+       drm_plane_create_alpha_property(&layer->plane);
        drm_plane_create_zpos_property(&layer->plane, 0, 0,
                                       SUN4I_BACKEND_NUM_LAYERS - 1);
 
index c3d92d5..08747fc 100644 (file)
@@ -35,6 +35,7 @@
 #include "sun4i_lvds.h"
 #include "sun4i_rgb.h"
 #include "sun4i_tcon.h"
+#include "sun6i_mipi_dsi.h"
 #include "sunxi_engine.h"
 
 static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
@@ -169,6 +170,7 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
        case DRM_MODE_ENCODER_LVDS:
                is_lvds = true;
                /* Fallthrough */
+       case DRM_MODE_ENCODER_DSI:
        case DRM_MODE_ENCODER_NONE:
                channel = 0;
                break;
@@ -201,7 +203,8 @@ void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
        DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis");
 
        mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) |
-              SUN4I_TCON_GINT0_VBLANK_ENABLE(1);
+               SUN4I_TCON_GINT0_VBLANK_ENABLE(1) |
+               SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE;
 
        if (enable)
                val = mask;
@@ -273,6 +276,71 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
                     SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
 }
 
+static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
+                                    struct mipi_dsi_device *device,
+                                    const struct drm_display_mode *mode)
+{
+       u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format);
+       u8 lanes = device->lanes;
+       u32 block_space, start_delay;
+       u32 tcon_div;
+
+       tcon->dclk_min_div = 4;
+       tcon->dclk_max_div = 127;
+
+       sun4i_tcon0_mode_set_common(tcon, mode);
+
+       regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+                          SUN4I_TCON0_CTL_IF_MASK,
+                          SUN4I_TCON0_CTL_IF_8080);
+
+       regmap_write(tcon->regs, SUN4I_TCON_ECC_FIFO_REG,
+                    SUN4I_TCON_ECC_FIFO_EN);
+
+       regmap_write(tcon->regs, SUN4I_TCON0_CPU_IF_REG,
+                    SUN4I_TCON0_CPU_IF_MODE_DSI |
+                    SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH |
+                    SUN4I_TCON0_CPU_IF_TRI_FIFO_EN |
+                    SUN4I_TCON0_CPU_IF_TRI_EN);
+
+       /*
+        * This looks suspicious, but it works...
+        *
+        * The datasheet says that this should be set higher than 20 *
+        * pixel cycle, but it's not clear what a pixel cycle is.
+        */
+       regmap_read(tcon->regs, SUN4I_TCON0_DCLK_REG, &tcon_div);
+       tcon_div &= GENMASK(6, 0);
+       block_space = mode->htotal * bpp / (tcon_div * lanes);
+       block_space -= mode->hdisplay + 40;
+
+       regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI0_REG,
+                    SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(block_space) |
+                    SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(mode->hdisplay));
+
+       regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI1_REG,
+                    SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(mode->vdisplay));
+
+       start_delay = (mode->crtc_vtotal - mode->crtc_vdisplay - 10 - 1);
+       start_delay = start_delay * mode->crtc_htotal * 149;
+       start_delay = start_delay / (mode->crtc_clock / 1000) / 8;
+       regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI2_REG,
+                    SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(10) |
+                    SUN4I_TCON0_CPU_TRI2_START_DELAY(start_delay));
+
+       /*
+        * The Allwinner BSP has a comment that the period should be
+        * the display clock * 15, but uses an hardcoded 3000...
+        */
+       regmap_write(tcon->regs, SUN4I_TCON_SAFE_PERIOD_REG,
+                    SUN4I_TCON_SAFE_PERIOD_NUM(3000) |
+                    SUN4I_TCON_SAFE_PERIOD_MODE(3));
+
+       /* Enable the output on the pins */
+       regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG,
+                    0xe0000000);
+}
+
 static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
                                      const struct drm_encoder *encoder,
                                      const struct drm_display_mode *mode)
@@ -538,7 +606,17 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
                         const struct drm_encoder *encoder,
                         const struct drm_display_mode *mode)
 {
+       struct sun6i_dsi *dsi;
+
        switch (encoder->encoder_type) {
+       case DRM_MODE_ENCODER_DSI:
+               /*
+                * This is not really elegant, but it's the "cleaner"
+                * way I could think of...
+                */
+               dsi = encoder_to_sun6i_dsi(encoder);
+               sun4i_tcon0_mode_set_cpu(tcon, dsi->device, mode);
+               break;
        case DRM_MODE_ENCODER_LVDS:
                sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
                break;
@@ -582,7 +660,8 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
        regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
 
        if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) |
-                       SUN4I_TCON_GINT0_VBLANK_INT(1))))
+                       SUN4I_TCON_GINT0_VBLANK_INT(1) |
+                       SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT)))
                return IRQ_NONE;
 
        drm_crtc_handle_vblank(&scrtc->crtc);
@@ -591,7 +670,8 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
        /* Acknowledge the interrupt */
        regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG,
                           SUN4I_TCON_GINT0_VBLANK_INT(0) |
-                          SUN4I_TCON_GINT0_VBLANK_INT(1),
+                          SUN4I_TCON_GINT0_VBLANK_INT(1) |
+                          SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT,
                           0);
 
        if (engine->ops->vblank_quirk)
index 161e094..f6a071c 100644 (file)
 
 #define SUN4I_TCON_GINT0_REG                   0x4
 #define SUN4I_TCON_GINT0_VBLANK_ENABLE(pipe)           BIT(31 - (pipe))
+#define SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE       BIT(27)
+#define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_ENABLE      BIT(26)
 #define SUN4I_TCON_GINT0_VBLANK_INT(pipe)              BIT(15 - (pipe))
+#define SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT          BIT(11)
+#define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_INT         BIT(10)
 
 #define SUN4I_TCON_GINT1_REG                   0x8
+
 #define SUN4I_TCON_FRM_CTL_REG                 0x10
+#define SUN4I_TCON_FRM_CTL_EN                          BIT(31)
+
+#define SUN4I_TCON_FRM_SEED_PR_REG             0x14
+#define SUN4I_TCON_FRM_SEED_PG_REG             0x18
+#define SUN4I_TCON_FRM_SEED_PB_REG             0x1c
+#define SUN4I_TCON_FRM_SEED_LR_REG             0x20
+#define SUN4I_TCON_FRM_SEED_LG_REG             0x24
+#define SUN4I_TCON_FRM_SEED_LB_REG             0x28
+#define SUN4I_TCON_FRM_TBL0_REG                        0x2c
+#define SUN4I_TCON_FRM_TBL1_REG                        0x30
+#define SUN4I_TCON_FRM_TBL2_REG                        0x34
+#define SUN4I_TCON_FRM_TBL3_REG                        0x38
 
 #define SUN4I_TCON0_CTL_REG                    0x40
 #define SUN4I_TCON0_CTL_TCON_ENABLE                    BIT(31)
+#define SUN4I_TCON0_CTL_IF_MASK                                GENMASK(25, 24)
+#define SUN4I_TCON0_CTL_IF_8080                                (1 << 24)
 #define SUN4I_TCON0_CTL_CLK_DELAY_MASK                 GENMASK(8, 4)
 #define SUN4I_TCON0_CTL_CLK_DELAY(delay)               ((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
 #define SUN4I_TCON0_CTL_SRC_SEL_MASK                   GENMASK(2, 0)
 #define SUN4I_TCON0_BASIC3_V_SYNC(height)              (((height) - 1) & 0x7ff)
 
 #define SUN4I_TCON0_HV_IF_REG                  0x58
+
 #define SUN4I_TCON0_CPU_IF_REG                 0x60
+#define SUN4I_TCON0_CPU_IF_MODE_MASK                   GENMASK(31, 28)
+#define SUN4I_TCON0_CPU_IF_MODE_DSI                    (1 << 28)
+#define SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH              BIT(16)
+#define SUN4I_TCON0_CPU_IF_TRI_FIFO_EN                 BIT(2)
+#define SUN4I_TCON0_CPU_IF_TRI_EN                      BIT(0)
+
 #define SUN4I_TCON0_CPU_WR_REG                 0x64
 #define SUN4I_TCON0_CPU_RD0_REG                        0x68
 #define SUN4I_TCON0_CPU_RDA_REG                        0x6c
 
 #define SUN4I_TCON1_IO_POL_REG                 0xf0
 #define SUN4I_TCON1_IO_TRI_REG                 0xf4
+
+#define SUN4I_TCON_ECC_FIFO_REG                        0xf8
+#define SUN4I_TCON_ECC_FIFO_EN                         BIT(3)
+
 #define SUN4I_TCON_CEU_CTL_REG                 0x100
 #define SUN4I_TCON_CEU_MUL_RR_REG              0x110
 #define SUN4I_TCON_CEU_MUL_RG_REG              0x114
 #define SUN4I_TCON_CEU_RANGE_R_REG             0x140
 #define SUN4I_TCON_CEU_RANGE_G_REG             0x144
 #define SUN4I_TCON_CEU_RANGE_B_REG             0x148
+
+#define SUN4I_TCON0_CPU_TRI0_REG               0x160
+#define SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(space)                ((((space) - 1) & 0xfff) << 16)
+#define SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(size)          (((size) - 1) & 0xfff)
+
+#define SUN4I_TCON0_CPU_TRI1_REG               0x164
+#define SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(num)            (((num) - 1) & 0xffff)
+
+#define SUN4I_TCON0_CPU_TRI2_REG               0x168
+#define SUN4I_TCON0_CPU_TRI2_START_DELAY(delay)                (((delay) & 0xffff) << 16)
+#define SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(set)      ((set) & 0xfff)
+
+#define SUN4I_TCON_SAFE_PERIOD_REG             0x1f0
+#define SUN4I_TCON_SAFE_PERIOD_NUM(num)                        (((num) & 0xfff) << 16)
+#define SUN4I_TCON_SAFE_PERIOD_MODE(mode)              ((mode) & 0x3)
+
 #define SUN4I_TCON_MUX_CTRL_REG                        0x200
 
 #define SUN4I_TCON0_LVDS_ANA0_REG              0x220
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c
new file mode 100644 (file)
index 0000000..e4d1943
--- /dev/null
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Allwinnertech Co., Ltd.
+ * Copyright (C) 2017-2018 Bootlin
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "sun6i_mipi_dsi.h"
+
+#define SUN6I_DPHY_GCTL_REG            0x00
+#define SUN6I_DPHY_GCTL_LANE_NUM(n)            ((((n) - 1) & 3) << 4)
+#define SUN6I_DPHY_GCTL_EN                     BIT(0)
+
+#define SUN6I_DPHY_TX_CTL_REG          0x04
+#define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT       BIT(28)
+
+#define SUN6I_DPHY_TX_TIME0_REG                0x10
+#define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n)                (((n) & 0xff) << 24)
+#define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n)      (((n) & 0xff) << 16)
+#define SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(n)      ((n) & 0xff)
+
+#define SUN6I_DPHY_TX_TIME1_REG                0x14
+#define SUN6I_DPHY_TX_TIME1_CLK_POST(n)                (((n) & 0xff) << 24)
+#define SUN6I_DPHY_TX_TIME1_CLK_PRE(n)         (((n) & 0xff) << 16)
+#define SUN6I_DPHY_TX_TIME1_CLK_ZERO(n)                (((n) & 0xff) << 8)
+#define SUN6I_DPHY_TX_TIME1_CLK_PREPARE(n)     ((n) & 0xff)
+
+#define SUN6I_DPHY_TX_TIME2_REG                0x18
+#define SUN6I_DPHY_TX_TIME2_CLK_TRAIL(n)       ((n) & 0xff)
+
+#define SUN6I_DPHY_TX_TIME3_REG                0x1c
+
+#define SUN6I_DPHY_TX_TIME4_REG                0x20
+#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n)      (((n) & 0xff) << 8)
+#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n)      ((n) & 0xff)
+
+#define SUN6I_DPHY_ANA0_REG            0x4c
+#define SUN6I_DPHY_ANA0_REG_PWS                        BIT(31)
+#define SUN6I_DPHY_ANA0_REG_DMPC               BIT(28)
+#define SUN6I_DPHY_ANA0_REG_DMPD(n)            (((n) & 0xf) << 24)
+#define SUN6I_DPHY_ANA0_REG_SLV(n)             (((n) & 7) << 12)
+#define SUN6I_DPHY_ANA0_REG_DEN(n)             (((n) & 0xf) << 8)
+
+#define SUN6I_DPHY_ANA1_REG            0x50
+#define SUN6I_DPHY_ANA1_REG_VTTMODE            BIT(31)
+#define SUN6I_DPHY_ANA1_REG_CSMPS(n)           (((n) & 3) << 28)
+#define SUN6I_DPHY_ANA1_REG_SVTT(n)            (((n) & 0xf) << 24)
+
+#define SUN6I_DPHY_ANA2_REG            0x54
+#define SUN6I_DPHY_ANA2_EN_P2S_CPU(n)          (((n) & 0xf) << 24)
+#define SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK                GENMASK(27, 24)
+#define SUN6I_DPHY_ANA2_EN_CK_CPU              BIT(4)
+#define SUN6I_DPHY_ANA2_REG_ENIB               BIT(1)
+
+#define SUN6I_DPHY_ANA3_REG            0x58
+#define SUN6I_DPHY_ANA3_EN_VTTD(n)             (((n) & 0xf) << 28)
+#define SUN6I_DPHY_ANA3_EN_VTTD_MASK           GENMASK(31, 28)
+#define SUN6I_DPHY_ANA3_EN_VTTC                        BIT(27)
+#define SUN6I_DPHY_ANA3_EN_DIV                 BIT(26)
+#define SUN6I_DPHY_ANA3_EN_LDOC                        BIT(25)
+#define SUN6I_DPHY_ANA3_EN_LDOD                        BIT(24)
+#define SUN6I_DPHY_ANA3_EN_LDOR                        BIT(18)
+
+#define SUN6I_DPHY_ANA4_REG            0x5c
+#define SUN6I_DPHY_ANA4_REG_DMPLVC             BIT(24)
+#define SUN6I_DPHY_ANA4_REG_DMPLVD(n)          (((n) & 0xf) << 20)
+#define SUN6I_DPHY_ANA4_REG_CKDV(n)            (((n) & 0x1f) << 12)
+#define SUN6I_DPHY_ANA4_REG_TMSC(n)            (((n) & 3) << 10)
+#define SUN6I_DPHY_ANA4_REG_TMSD(n)            (((n) & 3) << 8)
+#define SUN6I_DPHY_ANA4_REG_TXDNSC(n)          (((n) & 3) << 6)
+#define SUN6I_DPHY_ANA4_REG_TXDNSD(n)          (((n) & 3) << 4)
+#define SUN6I_DPHY_ANA4_REG_TXPUSC(n)          (((n) & 3) << 2)
+#define SUN6I_DPHY_ANA4_REG_TXPUSD(n)          ((n) & 3)
+
+#define SUN6I_DPHY_DBG5_REG            0xf4
+
+int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes)
+{
+       reset_control_deassert(dphy->reset);
+       clk_prepare_enable(dphy->mod_clk);
+       clk_set_rate_exclusive(dphy->mod_clk, 150000000);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
+                    SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME0_REG,
+                    SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(14) |
+                    SUN6I_DPHY_TX_TIME0_HS_PREPARE(6) |
+                    SUN6I_DPHY_TX_TIME0_HS_TRAIL(10));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME1_REG,
+                    SUN6I_DPHY_TX_TIME1_CLK_PREPARE(7) |
+                    SUN6I_DPHY_TX_TIME1_CLK_ZERO(50) |
+                    SUN6I_DPHY_TX_TIME1_CLK_PRE(3) |
+                    SUN6I_DPHY_TX_TIME1_CLK_POST(10));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME2_REG,
+                    SUN6I_DPHY_TX_TIME2_CLK_TRAIL(30));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME3_REG, 0);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME4_REG,
+                    SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(3) |
+                    SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
+                    SUN6I_DPHY_GCTL_LANE_NUM(lanes) |
+                    SUN6I_DPHY_GCTL_EN);
+
+       return 0;
+}
+
+int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes)
+{
+       u8 lanes_mask = GENMASK(lanes - 1, 0);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
+                    SUN6I_DPHY_ANA0_REG_PWS |
+                    SUN6I_DPHY_ANA0_REG_DMPC |
+                    SUN6I_DPHY_ANA0_REG_SLV(7) |
+                    SUN6I_DPHY_ANA0_REG_DMPD(lanes_mask) |
+                    SUN6I_DPHY_ANA0_REG_DEN(lanes_mask));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
+                    SUN6I_DPHY_ANA1_REG_CSMPS(1) |
+                    SUN6I_DPHY_ANA1_REG_SVTT(7));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
+                    SUN6I_DPHY_ANA4_REG_CKDV(1) |
+                    SUN6I_DPHY_ANA4_REG_TMSC(1) |
+                    SUN6I_DPHY_ANA4_REG_TMSD(1) |
+                    SUN6I_DPHY_ANA4_REG_TXDNSC(1) |
+                    SUN6I_DPHY_ANA4_REG_TXDNSD(1) |
+                    SUN6I_DPHY_ANA4_REG_TXPUSC(1) |
+                    SUN6I_DPHY_ANA4_REG_TXPUSD(1) |
+                    SUN6I_DPHY_ANA4_REG_DMPLVC |
+                    SUN6I_DPHY_ANA4_REG_DMPLVD(lanes_mask));
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
+                    SUN6I_DPHY_ANA2_REG_ENIB);
+       udelay(5);
+
+       regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
+                    SUN6I_DPHY_ANA3_EN_LDOR |
+                    SUN6I_DPHY_ANA3_EN_LDOC |
+                    SUN6I_DPHY_ANA3_EN_LDOD);
+       udelay(1);
+
+       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
+                          SUN6I_DPHY_ANA3_EN_VTTC |
+                          SUN6I_DPHY_ANA3_EN_VTTD_MASK,
+                          SUN6I_DPHY_ANA3_EN_VTTC |
+                          SUN6I_DPHY_ANA3_EN_VTTD(lanes_mask));
+       udelay(1);
+
+       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
+                          SUN6I_DPHY_ANA3_EN_DIV,
+                          SUN6I_DPHY_ANA3_EN_DIV);
+       udelay(1);
+
+       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
+                          SUN6I_DPHY_ANA2_EN_CK_CPU,
+                          SUN6I_DPHY_ANA2_EN_CK_CPU);
+       udelay(1);
+
+       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
+                          SUN6I_DPHY_ANA1_REG_VTTMODE,
+                          SUN6I_DPHY_ANA1_REG_VTTMODE);
+
+       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
+                          SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK,
+                          SUN6I_DPHY_ANA2_EN_P2S_CPU(lanes_mask));
+
+       return 0;
+}
+
+int sun6i_dphy_power_off(struct sun6i_dphy *dphy)
+{
+       regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
+                          SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
+
+       return 0;
+}
+
+int sun6i_dphy_exit(struct sun6i_dphy *dphy)
+{
+       clk_rate_exclusive_put(dphy->mod_clk);
+       clk_disable_unprepare(dphy->mod_clk);
+       reset_control_assert(dphy->reset);
+
+       return 0;
+}
+
+static struct regmap_config sun6i_dphy_regmap_config = {
+       .reg_bits       = 32,
+       .val_bits       = 32,
+       .reg_stride     = 4,
+       .max_register   = SUN6I_DPHY_DBG5_REG,
+       .name           = "mipi-dphy",
+};
+
+static const struct of_device_id sun6i_dphy_of_table[] = {
+       { .compatible = "allwinner,sun6i-a31-mipi-dphy" },
+       { }
+};
+
+int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node)
+{
+       struct sun6i_dphy *dphy;
+       struct resource res;
+       void __iomem *regs;
+       int ret;
+
+       if (!of_match_node(sun6i_dphy_of_table, node)) {
+               dev_err(dsi->dev, "Incompatible D-PHY\n");
+               return -EINVAL;
+       }
+
+       dphy = devm_kzalloc(dsi->dev, sizeof(*dphy), GFP_KERNEL);
+       if (!dphy)
+               return -ENOMEM;
+
+       ret = of_address_to_resource(node, 0, &res);
+       if (ret) {
+               dev_err(dsi->dev, "phy: Couldn't get our resources\n");
+               return ret;
+       }
+
+       regs = devm_ioremap_resource(dsi->dev, &res);
+       if (IS_ERR(regs)) {
+               dev_err(dsi->dev, "Couldn't map the DPHY encoder registers\n");
+               return PTR_ERR(regs);
+       }
+
+       dphy->regs = devm_regmap_init_mmio(dsi->dev, regs,
+                                          &sun6i_dphy_regmap_config);
+       if (IS_ERR(dphy->regs)) {
+               dev_err(dsi->dev, "Couldn't create the DPHY encoder regmap\n");
+               return PTR_ERR(dphy->regs);
+       }
+
+       dphy->reset = of_reset_control_get_shared(node, NULL);
+       if (IS_ERR(dphy->reset)) {
+               dev_err(dsi->dev, "Couldn't get our reset line\n");
+               return PTR_ERR(dphy->reset);
+       }
+
+       dphy->bus_clk = of_clk_get_by_name(node, "bus");
+       if (IS_ERR(dphy->bus_clk)) {
+               dev_err(dsi->dev, "Couldn't get the DPHY bus clock\n");
+               ret = PTR_ERR(dphy->bus_clk);
+               goto err_free_reset;
+       }
+       regmap_mmio_attach_clk(dphy->regs, dphy->bus_clk);
+
+       dphy->mod_clk = of_clk_get_by_name(node, "mod");
+       if (IS_ERR(dphy->mod_clk)) {
+               dev_err(dsi->dev, "Couldn't get the DPHY mod clock\n");
+               ret = PTR_ERR(dphy->mod_clk);
+               goto err_free_bus;
+       }
+
+       dsi->dphy = dphy;
+
+       return 0;
+
+err_free_bus:
+       regmap_mmio_detach_clk(dphy->regs);
+       clk_put(dphy->bus_clk);
+err_free_reset:
+       reset_control_put(dphy->reset);
+       return ret;
+}
+
+int sun6i_dphy_remove(struct sun6i_dsi *dsi)
+{
+       struct sun6i_dphy *dphy = dsi->dphy;
+
+       regmap_mmio_detach_clk(dphy->regs);
+       clk_put(dphy->mod_clk);
+       clk_put(dphy->bus_clk);
+       reset_control_put(dphy->reset);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
new file mode 100644 (file)
index 0000000..bfbf761
--- /dev/null
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Allwinnertech Co., Ltd.
+ * Copyright (C) 2017-2018 Bootlin
+ *
+ * Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/crc-ccitt.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <linux/phy/phy.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_drv.h"
+#include "sun6i_mipi_dsi.h"
+
+#include <video/mipi_display.h>
+
+#define SUN6I_DSI_CTL_REG              0x000
+#define SUN6I_DSI_CTL_EN                       BIT(0)
+
+#define SUN6I_DSI_BASIC_CTL_REG                0x00c
+#define SUN6I_DSI_BASIC_CTL_HBP_DIS            BIT(2)
+#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS                BIT(1)
+#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST                BIT(0)
+
+#define SUN6I_DSI_BASIC_CTL0_REG       0x010
+#define SUN6I_DSI_BASIC_CTL0_HS_EOTP_EN                BIT(18)
+#define SUN6I_DSI_BASIC_CTL0_CRC_EN            BIT(17)
+#define SUN6I_DSI_BASIC_CTL0_ECC_EN            BIT(16)
+#define SUN6I_DSI_BASIC_CTL0_INST_ST           BIT(0)
+
+#define SUN6I_DSI_BASIC_CTL1_REG       0x014
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(n) (((n) & 0x1fff) << 4)
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_FILL                BIT(2)
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION   BIT(1)
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_MODE                BIT(0)
+
+#define SUN6I_DSI_BASIC_SIZE0_REG      0x018
+#define SUN6I_DSI_BASIC_SIZE0_VBP(n)           (((n) & 0xfff) << 16)
+#define SUN6I_DSI_BASIC_SIZE0_VSA(n)           ((n) & 0xfff)
+
+#define SUN6I_DSI_BASIC_SIZE1_REG      0x01c
+#define SUN6I_DSI_BASIC_SIZE1_VT(n)            (((n) & 0xfff) << 16)
+#define SUN6I_DSI_BASIC_SIZE1_VACT(n)          ((n) & 0xfff)
+
+#define SUN6I_DSI_INST_FUNC_REG(n)     (0x020 + (n) * 0x04)
+#define SUN6I_DSI_INST_FUNC_INST_MODE(n)       (((n) & 0xf) << 28)
+#define SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(n)    (((n) & 0xf) << 24)
+#define SUN6I_DSI_INST_FUNC_TRANS_PACKET(n)    (((n) & 0xf) << 20)
+#define SUN6I_DSI_INST_FUNC_LANE_CEN           BIT(4)
+#define SUN6I_DSI_INST_FUNC_LANE_DEN(n)                ((n) & 0xf)
+
+#define SUN6I_DSI_INST_LOOP_SEL_REG    0x040
+
+#define SUN6I_DSI_INST_LOOP_NUM_REG(n) (0x044 + (n) * 0x10)
+#define SUN6I_DSI_INST_LOOP_NUM_N1(n)          (((n) & 0xfff) << 16)
+#define SUN6I_DSI_INST_LOOP_NUM_N0(n)          ((n) & 0xfff)
+
+#define SUN6I_DSI_INST_JUMP_SEL_REG    0x048
+
+#define SUN6I_DSI_INST_JUMP_CFG_REG(n) (0x04c + (n) * 0x04)
+#define SUN6I_DSI_INST_JUMP_CFG_TO(n)          (((n) & 0xf) << 20)
+#define SUN6I_DSI_INST_JUMP_CFG_POINT(n)       (((n) & 0xf) << 16)
+#define SUN6I_DSI_INST_JUMP_CFG_NUM(n)         ((n) & 0xffff)
+
+#define SUN6I_DSI_TRANS_START_REG      0x060
+
+#define SUN6I_DSI_TRANS_ZERO_REG       0x078
+
+#define SUN6I_DSI_TCON_DRQ_REG         0x07c
+#define SUN6I_DSI_TCON_DRQ_ENABLE_MODE         BIT(28)
+#define SUN6I_DSI_TCON_DRQ_SET(n)              ((n) & 0x3ff)
+
+#define SUN6I_DSI_PIXEL_CTL0_REG       0x080
+#define SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE   BIT(16)
+#define SUN6I_DSI_PIXEL_CTL0_FORMAT(n)         ((n) & 0xf)
+
+#define SUN6I_DSI_PIXEL_CTL1_REG       0x084
+
+#define SUN6I_DSI_PIXEL_PH_REG         0x090
+#define SUN6I_DSI_PIXEL_PH_ECC(n)              (((n) & 0xff) << 24)
+#define SUN6I_DSI_PIXEL_PH_WC(n)               (((n) & 0xffff) << 8)
+#define SUN6I_DSI_PIXEL_PH_VC(n)               (((n) & 3) << 6)
+#define SUN6I_DSI_PIXEL_PH_DT(n)               ((n) & 0x3f)
+
+#define SUN6I_DSI_PIXEL_PF0_REG                0x098
+#define SUN6I_DSI_PIXEL_PF0_CRC_FORCE(n)       ((n) & 0xffff)
+
+#define SUN6I_DSI_PIXEL_PF1_REG                0x09c
+#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(n)  (((n) & 0xffff) << 16)
+#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(n)  ((n) & 0xffff)
+
+#define SUN6I_DSI_SYNC_HSS_REG         0x0b0
+
+#define SUN6I_DSI_SYNC_HSE_REG         0x0b4
+
+#define SUN6I_DSI_SYNC_VSS_REG         0x0b8
+
+#define SUN6I_DSI_SYNC_VSE_REG         0x0bc
+
+#define SUN6I_DSI_BLK_HSA0_REG         0x0c0
+
+#define SUN6I_DSI_BLK_HSA1_REG         0x0c4
+#define SUN6I_DSI_BLK_PF(n)                    (((n) & 0xffff) << 16)
+#define SUN6I_DSI_BLK_PD(n)                    ((n) & 0xff)
+
+#define SUN6I_DSI_BLK_HBP0_REG         0x0c8
+
+#define SUN6I_DSI_BLK_HBP1_REG         0x0cc
+
+#define SUN6I_DSI_BLK_HFP0_REG         0x0d0
+
+#define SUN6I_DSI_BLK_HFP1_REG         0x0d4
+
+#define SUN6I_DSI_BLK_HBLK0_REG                0x0e0
+
+#define SUN6I_DSI_BLK_HBLK1_REG                0x0e4
+
+#define SUN6I_DSI_BLK_VBLK0_REG                0x0e8
+
+#define SUN6I_DSI_BLK_VBLK1_REG                0x0ec
+
+#define SUN6I_DSI_BURST_LINE_REG       0x0f0
+#define SUN6I_DSI_BURST_LINE_SYNC_POINT(n)     (((n) & 0xffff) << 16)
+#define SUN6I_DSI_BURST_LINE_NUM(n)            ((n) & 0xffff)
+
+#define SUN6I_DSI_BURST_DRQ_REG                0x0f4
+#define SUN6I_DSI_BURST_DRQ_EDGE1(n)           (((n) & 0xffff) << 16)
+#define SUN6I_DSI_BURST_DRQ_EDGE0(n)           ((n) & 0xffff)
+
+#define SUN6I_DSI_CMD_CTL_REG          0x200
+#define SUN6I_DSI_CMD_CTL_RX_OVERFLOW          BIT(26)
+#define SUN6I_DSI_CMD_CTL_RX_FLAG              BIT(25)
+#define SUN6I_DSI_CMD_CTL_TX_FLAG              BIT(9)
+
+#define SUN6I_DSI_CMD_RX_REG(n)                (0x240 + (n) * 0x04)
+
+#define SUN6I_DSI_DEBUG_DATA_REG       0x2f8
+
+#define SUN6I_DSI_CMD_TX_REG(n)                (0x300 + (n) * 0x04)
+
+enum sun6i_dsi_start_inst {
+       DSI_START_LPRX,
+       DSI_START_LPTX,
+       DSI_START_HSC,
+       DSI_START_HSD,
+};
+
+enum sun6i_dsi_inst_id {
+       DSI_INST_ID_LP11        = 0,
+       DSI_INST_ID_TBA,
+       DSI_INST_ID_HSC,
+       DSI_INST_ID_HSD,
+       DSI_INST_ID_LPDT,
+       DSI_INST_ID_HSCEXIT,
+       DSI_INST_ID_NOP,
+       DSI_INST_ID_DLY,
+       DSI_INST_ID_END         = 15,
+};
+
+enum sun6i_dsi_inst_mode {
+       DSI_INST_MODE_STOP      = 0,
+       DSI_INST_MODE_TBA,
+       DSI_INST_MODE_HS,
+       DSI_INST_MODE_ESCAPE,
+       DSI_INST_MODE_HSCEXIT,
+       DSI_INST_MODE_NOP,
+};
+
+enum sun6i_dsi_inst_escape {
+       DSI_INST_ESCA_LPDT      = 0,
+       DSI_INST_ESCA_ULPS,
+       DSI_INST_ESCA_UN1,
+       DSI_INST_ESCA_UN2,
+       DSI_INST_ESCA_RESET,
+       DSI_INST_ESCA_UN3,
+       DSI_INST_ESCA_UN4,
+       DSI_INST_ESCA_UN5,
+};
+
+enum sun6i_dsi_inst_packet {
+       DSI_INST_PACK_PIXEL     = 0,
+       DSI_INST_PACK_COMMAND,
+};
+
+static const u32 sun6i_dsi_ecc_array[] = {
+       [0] = (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(7) | BIT(10) |
+              BIT(11) | BIT(13) | BIT(16) | BIT(20) | BIT(21) | BIT(22) |
+              BIT(23)),
+       [1] = (BIT(0) | BIT(1) | BIT(3) | BIT(4) | BIT(6) | BIT(8) | BIT(10) |
+              BIT(12) | BIT(14) | BIT(17) | BIT(20) | BIT(21) | BIT(22) |
+              BIT(23)),
+       [2] = (BIT(0) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(9) | BIT(11) |
+              BIT(12) | BIT(15) | BIT(18) | BIT(20) | BIT(21) | BIT(22)),
+       [3] = (BIT(1) | BIT(2) | BIT(3) | BIT(7) | BIT(8) | BIT(9) | BIT(13) |
+              BIT(14) | BIT(15) | BIT(19) | BIT(20) | BIT(21) | BIT(23)),
+       [4] = (BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(16) |
+              BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(22) | BIT(23)),
+       [5] = (BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) |
+              BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(21) | BIT(22) |
+              BIT(23)),
+};
+
+static u32 sun6i_dsi_ecc_compute(unsigned int data)
+{
+       int i;
+       u8 ecc = 0;
+
+       for (i = 0; i < ARRAY_SIZE(sun6i_dsi_ecc_array); i++) {
+               u32 field = sun6i_dsi_ecc_array[i];
+               bool init = false;
+               u8 val = 0;
+               int j;
+
+               for (j = 0; j < 24; j++) {
+                       if (!(BIT(j) & field))
+                               continue;
+
+                       if (!init) {
+                               val = (BIT(j) & data) ? 1 : 0;
+                               init = true;
+                       } else {
+                               val ^= (BIT(j) & data) ? 1 : 0;
+                       }
+               }
+
+               ecc |= val << i;
+       }
+
+       return ecc;
+}
+
+static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
+{
+       return crc_ccitt(0xffff, buffer, len);
+}
+
+static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
+{
+       u8 buffer[len];
+
+       memset(buffer, pd, len);
+
+       return sun6i_dsi_crc_compute(buffer, len);
+}
+
+static u32 sun6i_dsi_build_sync_pkt(u8 dt, u8 vc, u8 d0, u8 d1)
+{
+       u32 val = dt & 0x3f;
+
+       val |= (vc & 3) << 6;
+       val |= (d0 & 0xff) << 8;
+       val |= (d1 & 0xff) << 16;
+       val |= sun6i_dsi_ecc_compute(val) << 24;
+
+       return val;
+}
+
+static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
+{
+       return sun6i_dsi_build_sync_pkt(MIPI_DSI_BLANKING_PACKET, vc,
+                                       wc & 0xff, wc >> 8);
+}
+
+static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
+{
+       u32 val = SUN6I_DSI_BLK_PD(pd);
+
+       return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
+}
+
+static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
+{
+       regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+                          SUN6I_DSI_BASIC_CTL0_INST_ST, 0);
+}
+
+static void sun6i_dsi_inst_commit(struct sun6i_dsi *dsi)
+{
+       regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+                          SUN6I_DSI_BASIC_CTL0_INST_ST,
+                          SUN6I_DSI_BASIC_CTL0_INST_ST);
+}
+
+static int sun6i_dsi_inst_wait_for_completion(struct sun6i_dsi *dsi)
+{
+       u32 val;
+
+       return regmap_read_poll_timeout(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+                                       val,
+                                       !(val & SUN6I_DSI_BASIC_CTL0_INST_ST),
+                                       100, 5000);
+}
+
+static void sun6i_dsi_inst_setup(struct sun6i_dsi *dsi,
+                                enum sun6i_dsi_inst_id id,
+                                enum sun6i_dsi_inst_mode mode,
+                                bool clock, u8 data,
+                                enum sun6i_dsi_inst_packet packet,
+                                enum sun6i_dsi_inst_escape escape)
+{
+       regmap_write(dsi->regs, SUN6I_DSI_INST_FUNC_REG(id),
+                    SUN6I_DSI_INST_FUNC_INST_MODE(mode) |
+                    SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(escape) |
+                    SUN6I_DSI_INST_FUNC_TRANS_PACKET(packet) |
+                    (clock ? SUN6I_DSI_INST_FUNC_LANE_CEN : 0) |
+                    SUN6I_DSI_INST_FUNC_LANE_DEN(data));
+}
+
+static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
+                               struct mipi_dsi_device *device)
+{
+       u8 lanes_mask = GENMASK(device->lanes - 1, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LP11, DSI_INST_MODE_STOP,
+                            true, lanes_mask, 0, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_TBA, DSI_INST_MODE_TBA,
+                            false, 1, 0, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSC, DSI_INST_MODE_HS,
+                            true, 0, DSI_INST_PACK_PIXEL, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSD, DSI_INST_MODE_HS,
+                            false, lanes_mask, DSI_INST_PACK_PIXEL, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LPDT, DSI_INST_MODE_ESCAPE,
+                            false, 1, DSI_INST_PACK_COMMAND,
+                            DSI_INST_ESCA_LPDT);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSCEXIT, DSI_INST_MODE_HSCEXIT,
+                            true, 0, 0, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_NOP, DSI_INST_MODE_STOP,
+                            false, lanes_mask, 0, 0);
+
+       sun6i_dsi_inst_setup(dsi, DSI_INST_ID_DLY, DSI_INST_MODE_NOP,
+                            true, lanes_mask, 0, 0);
+
+       regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_CFG_REG(0),
+                    SUN6I_DSI_INST_JUMP_CFG_POINT(DSI_INST_ID_NOP) |
+                    SUN6I_DSI_INST_JUMP_CFG_TO(DSI_INST_ID_HSCEXIT) |
+                    SUN6I_DSI_INST_JUMP_CFG_NUM(1));
+};
+
+static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
+                                          struct drm_display_mode *mode)
+{
+       return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
+}
+
+static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
+                                 struct drm_display_mode *mode)
+{
+       struct mipi_dsi_device *device = dsi->device;
+       u32 val = 0;
+
+       if ((mode->hsync_end - mode->hdisplay) > 20) {
+               /* Maaaaaagic */
+               u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
+
+               drq *= mipi_dsi_pixel_format_to_bpp(device->format);
+               drq /= 32;
+
+               val = (SUN6I_DSI_TCON_DRQ_ENABLE_MODE |
+                      SUN6I_DSI_TCON_DRQ_SET(drq));
+       }
+
+       regmap_write(dsi->regs, SUN6I_DSI_TCON_DRQ_REG, val);
+}
+
+static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
+                                     struct drm_display_mode *mode)
+{
+       u16 delay = 50 - 1;
+
+       regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
+                    SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
+                    SUN6I_DSI_INST_LOOP_NUM_N1(delay));
+       regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(1),
+                    SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
+                    SUN6I_DSI_INST_LOOP_NUM_N1(delay));
+}
+
+static void sun6i_dsi_setup_format(struct sun6i_dsi *dsi,
+                                  struct drm_display_mode *mode)
+{
+       struct mipi_dsi_device *device = dsi->device;
+       u32 val = SUN6I_DSI_PIXEL_PH_VC(device->channel);
+       u8 dt, fmt;
+       u16 wc;
+
+       /*
+        * TODO: The format defines are only valid in video mode and
+        * change in command mode.
+        */
+       switch (device->format) {
+       case MIPI_DSI_FMT_RGB888:
+               dt = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+               fmt = 8;
+               break;
+       case MIPI_DSI_FMT_RGB666:
+               dt = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+               fmt = 9;
+               break;
+       case MIPI_DSI_FMT_RGB666_PACKED:
+               dt = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+               fmt = 10;
+               break;
+       case MIPI_DSI_FMT_RGB565:
+               dt = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+               fmt = 11;
+               break;
+       default:
+               return;
+       }
+       val |= SUN6I_DSI_PIXEL_PH_DT(dt);
+
+       wc = mode->hdisplay * mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+       val |= SUN6I_DSI_PIXEL_PH_WC(wc);
+       val |= SUN6I_DSI_PIXEL_PH_ECC(sun6i_dsi_ecc_compute(val));
+
+       regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PH_REG, val);
+
+       regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF0_REG,
+                    SUN6I_DSI_PIXEL_PF0_CRC_FORCE(0xffff));
+
+       regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF1_REG,
+                    SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(0xffff) |
+                    SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(0xffff));
+
+       regmap_write(dsi->regs, SUN6I_DSI_PIXEL_CTL0_REG,
+                    SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE |
+                    SUN6I_DSI_PIXEL_CTL0_FORMAT(fmt));
+}
+
+static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
+                                   struct drm_display_mode *mode)
+{
+       struct mipi_dsi_device *device = dsi->device;
+       unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+       u16 hbp, hfp, hsa, hblk, vblk;
+
+       regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
+
+       regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
+                    sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
+                                             device->channel,
+                                             0, 0));
+
+       regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSE_REG,
+                    sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_END,
+                                             device->channel,
+                                             0, 0));
+
+       regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSS_REG,
+                    sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_START,
+                                             device->channel,
+                                             0, 0));
+
+       regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSE_REG,
+                    sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_END,
+                                             device->channel,
+                                             0, 0));
+
+       regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
+                    SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
+                                              mode->vsync_start) |
+                    SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start -
+                                              mode->vdisplay));
+
+       regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
+                    SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
+                    SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
+
+       /*
+        * A sync period is composed of a blanking packet (4 bytes +
+        * payload + 2 bytes) and a sync event packet (4 bytes). Its
+        * minimal size is therefore 10 bytes
+        */
+#define HSA_PACKET_OVERHEAD    10
+       hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+                 (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
+                    sun6i_dsi_build_blk0_pkt(device->channel, hsa));
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
+                    sun6i_dsi_build_blk1_pkt(0, hsa));
+
+       /*
+        * The backporch is set using a blanking packet (4 bytes +
+        * payload + 2 bytes). Its minimal size is therefore 6 bytes
+        */
+#define HBP_PACKET_OVERHEAD    6
+       hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+                 (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
+                    sun6i_dsi_build_blk0_pkt(device->channel, hbp));
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
+                    sun6i_dsi_build_blk1_pkt(0, hbp));
+
+       /*
+        * The frontporch is set using a blanking packet (4 bytes +
+        * payload + 2 bytes). Its minimal size is therefore 6 bytes
+        */
+#define HFP_PACKET_OVERHEAD    6
+       hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+                 (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
+                    sun6i_dsi_build_blk0_pkt(device->channel, hfp));
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
+                    sun6i_dsi_build_blk1_pkt(0, hfp));
+
+       /*
+        * hblk seems to be the line + porches length.
+        */
+       hblk = mode->htotal * Bpp - hsa;
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
+                    sun6i_dsi_build_blk0_pkt(device->channel, hblk));
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
+                    sun6i_dsi_build_blk1_pkt(0, hblk));
+
+       /*
+        * And I'm not entirely sure what vblk is about. The driver in
+        * Allwinner BSP is using a rather convoluted calculation
+        * there only for 4 lanes. However, using 0 (the !4 lanes
+        * case) even with a 4 lanes screen seems to work...
+        */
+       vblk = 0;
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
+                    sun6i_dsi_build_blk0_pkt(device->channel, vblk));
+       regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
+                    sun6i_dsi_build_blk1_pkt(0, vblk));
+}
+
+static int sun6i_dsi_start(struct sun6i_dsi *dsi,
+                          enum sun6i_dsi_start_inst func)
+{
+       switch (func) {
+       case DSI_START_LPTX:
+               regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+                            DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
+                            DSI_INST_ID_END  << (4 * DSI_INST_ID_LPDT));
+               break;
+       case DSI_START_LPRX:
+               regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+                            DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
+                            DSI_INST_ID_DLY  << (4 * DSI_INST_ID_LPDT) |
+                            DSI_INST_ID_TBA  << (4 * DSI_INST_ID_DLY) |
+                            DSI_INST_ID_END  << (4 * DSI_INST_ID_TBA));
+               break;
+       case DSI_START_HSC:
+               regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+                            DSI_INST_ID_HSC  << (4 * DSI_INST_ID_LP11) |
+                            DSI_INST_ID_END  << (4 * DSI_INST_ID_HSC));
+               break;
+       case DSI_START_HSD:
+               regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+                            DSI_INST_ID_NOP  << (4 * DSI_INST_ID_LP11) |
+                            DSI_INST_ID_HSD  << (4 * DSI_INST_ID_NOP) |
+                            DSI_INST_ID_DLY  << (4 * DSI_INST_ID_HSD) |
+                            DSI_INST_ID_NOP  << (4 * DSI_INST_ID_DLY) |
+                            DSI_INST_ID_END  << (4 * DSI_INST_ID_HSCEXIT));
+               break;
+       default:
+               regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+                            DSI_INST_ID_END  << (4 * DSI_INST_ID_LP11));
+               break;
+       }
+
+       sun6i_dsi_inst_abort(dsi);
+       sun6i_dsi_inst_commit(dsi);
+
+       if (func == DSI_START_HSC)
+               regmap_write_bits(dsi->regs,
+                                 SUN6I_DSI_INST_FUNC_REG(DSI_INST_ID_LP11),
+                                 SUN6I_DSI_INST_FUNC_LANE_CEN, 0);
+
+       return 0;
+}
+
+static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+       struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
+       struct mipi_dsi_device *device = dsi->device;
+       u16 delay;
+
+       DRM_DEBUG_DRIVER("Enabling DSI output\n");
+
+       pm_runtime_get_sync(dsi->dev);
+
+       delay = sun6i_dsi_get_video_start_delay(dsi, mode);
+       regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
+                    SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(delay) |
+                    SUN6I_DSI_BASIC_CTL1_VIDEO_FILL |
+                    SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION |
+                    SUN6I_DSI_BASIC_CTL1_VIDEO_MODE);
+
+       sun6i_dsi_setup_burst(dsi, mode);
+       sun6i_dsi_setup_inst_loop(dsi, mode);
+       sun6i_dsi_setup_format(dsi, mode);
+       sun6i_dsi_setup_timings(dsi, mode);
+
+       sun6i_dphy_init(dsi->dphy, device->lanes);
+       sun6i_dphy_power_on(dsi->dphy, device->lanes);
+
+       if (!IS_ERR(dsi->panel))
+               drm_panel_prepare(dsi->panel);
+
+       /*
+        * FIXME: This should be moved after the switch to HS mode.
+        *
+        * Unfortunately, once in HS mode, it seems like we're not
+        * able to send DCS commands anymore, which would prevent any
+        * panel to send any DCS command as part as their enable
+        * method, which is quite common.
+        *
+        * I haven't seen any artifact due to that sub-optimal
+        * ordering on the panels I've tested it with, so I guess this
+        * will do for now, until that IP is better understood.
+        */
+       if (!IS_ERR(dsi->panel))
+               drm_panel_enable(dsi->panel);
+
+       sun6i_dsi_start(dsi, DSI_START_HSC);
+
+       udelay(1000);
+
+       sun6i_dsi_start(dsi, DSI_START_HSD);
+}
+
+static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+       struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
+
+       DRM_DEBUG_DRIVER("Disabling DSI output\n");
+
+       if (!IS_ERR(dsi->panel)) {
+               drm_panel_disable(dsi->panel);
+               drm_panel_unprepare(dsi->panel);
+       }
+
+       sun6i_dphy_power_off(dsi->dphy);
+       sun6i_dphy_exit(dsi->dphy);
+
+       pm_runtime_put(dsi->dev);
+}
+
+static int sun6i_dsi_get_modes(struct drm_connector *connector)
+{
+       struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+       return drm_panel_get_modes(dsi->panel);
+}
+
+static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
+       .get_modes      = sun6i_dsi_get_modes,
+};
+
+static enum drm_connector_status
+sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
+       .detect                 = sun6i_dsi_connector_detect,
+       .fill_modes             = drm_helper_probe_single_connector_modes,
+       .destroy                = drm_connector_cleanup,
+       .reset                  = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
+       .disable        = sun6i_dsi_encoder_disable,
+       .enable         = sun6i_dsi_encoder_enable,
+};
+
+static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
+       .destroy        = drm_encoder_cleanup,
+};
+
+static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
+                                      const struct mipi_dsi_msg *msg)
+{
+       u32 pkt = msg->type;
+
+       if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
+               pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
+               pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
+       } else {
+               pkt |= (((u8 *)msg->tx_buf)[0] << 8);
+               if (msg->tx_len > 1)
+                       pkt |= (((u8 *)msg->tx_buf)[1] << 16);
+       }
+
+       pkt |= sun6i_dsi_ecc_compute(pkt) << 24;
+
+       return pkt;
+}
+
+static int sun6i_dsi_dcs_write_short(struct sun6i_dsi *dsi,
+                                    const struct mipi_dsi_msg *msg)
+{
+       regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
+                    sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
+       regmap_write_bits(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
+                         0xff, (4 - 1));
+
+       sun6i_dsi_start(dsi, DSI_START_LPTX);
+
+       return msg->tx_len;
+}
+
+static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
+                                   const struct mipi_dsi_msg *msg)
+{
+       int ret, len = 0;
+       u8 *bounce;
+       u16 crc;
+
+       regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
+                    sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
+
+       bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
+       if (!bounce)
+               return -ENOMEM;
+
+       memcpy(bounce, msg->tx_buf, msg->tx_len);
+       len += msg->tx_len;
+
+       crc = sun6i_dsi_crc_compute(bounce, msg->tx_len);
+       memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
+       len += sizeof(crc);
+
+       regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
+       regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
+       kfree(bounce);
+
+       sun6i_dsi_start(dsi, DSI_START_LPTX);
+
+       ret = sun6i_dsi_inst_wait_for_completion(dsi);
+       if (ret < 0) {
+               sun6i_dsi_inst_abort(dsi);
+               return ret;
+       }
+
+       /*
+        * TODO: There's some bits (reg 0x200, bits 8/9) that
+        * apparently can be used to check whether the data have been
+        * sent, but I couldn't get it to work reliably.
+        */
+       return msg->tx_len;
+}
+
+static int sun6i_dsi_dcs_read(struct sun6i_dsi *dsi,
+                             const struct mipi_dsi_msg *msg)
+{
+       u32 val;
+       int ret;
+       u8 byte0;
+
+       regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
+                    sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
+       regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
+                    (4 - 1));
+
+       sun6i_dsi_start(dsi, DSI_START_LPRX);
+
+       ret = sun6i_dsi_inst_wait_for_completion(dsi);
+       if (ret < 0) {
+               sun6i_dsi_inst_abort(dsi);
+               return ret;
+       }
+
+       /*
+        * TODO: There's some bits (reg 0x200, bits 24/25) that
+        * apparently can be used to check whether the data have been
+        * received, but I couldn't get it to work reliably.
+        */
+       regmap_read(dsi->regs, SUN6I_DSI_CMD_CTL_REG, &val);
+       if (val & SUN6I_DSI_CMD_CTL_RX_OVERFLOW)
+               return -EIO;
+
+       regmap_read(dsi->regs, SUN6I_DSI_CMD_RX_REG(0), &val);
+       byte0 = val & 0xff;
+       if (byte0 == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT)
+               return -EIO;
+
+       ((u8 *)msg->rx_buf)[0] = (val >> 8);
+
+       return 1;
+}
+
+static int sun6i_dsi_attach(struct mipi_dsi_host *host,
+                           struct mipi_dsi_device *device)
+{
+       struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+
+       dsi->device = device;
+       dsi->panel = of_drm_find_panel(device->dev.of_node);
+       if (!dsi->panel)
+               return -EINVAL;
+
+       dev_info(host->dev, "Attached device %s\n", device->name);
+
+       return 0;
+}
+
+static int sun6i_dsi_detach(struct mipi_dsi_host *host,
+                           struct mipi_dsi_device *device)
+{
+       struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+
+       dsi->panel = NULL;
+       dsi->device = NULL;
+
+       return 0;
+}
+
+static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
+                                 const struct mipi_dsi_msg *msg)
+{
+       struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+       int ret;
+
+       ret = sun6i_dsi_inst_wait_for_completion(dsi);
+       if (ret < 0)
+               sun6i_dsi_inst_abort(dsi);
+
+       regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
+                    SUN6I_DSI_CMD_CTL_RX_OVERFLOW |
+                    SUN6I_DSI_CMD_CTL_RX_FLAG |
+                    SUN6I_DSI_CMD_CTL_TX_FLAG);
+
+       switch (msg->type) {
+       case MIPI_DSI_DCS_SHORT_WRITE:
+       case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+               ret = sun6i_dsi_dcs_write_short(dsi, msg);
+               break;
+
+       case MIPI_DSI_DCS_LONG_WRITE:
+               ret = sun6i_dsi_dcs_write_long(dsi, msg);
+               break;
+
+       case MIPI_DSI_DCS_READ:
+               if (msg->rx_len == 1) {
+                       ret = sun6i_dsi_dcs_read(dsi, msg);
+                       break;
+               }
+
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static const struct mipi_dsi_host_ops sun6i_dsi_host_ops = {
+       .attach         = sun6i_dsi_attach,
+       .detach         = sun6i_dsi_detach,
+       .transfer       = sun6i_dsi_transfer,
+};
+
+static const struct regmap_config sun6i_dsi_regmap_config = {
+       .reg_bits       = 32,
+       .val_bits       = 32,
+       .reg_stride     = 4,
+       .max_register   = SUN6I_DSI_CMD_TX_REG(255),
+       .name           = "mipi-dsi",
+};
+
+static int sun6i_dsi_bind(struct device *dev, struct device *master,
+                        void *data)
+{
+       struct drm_device *drm = data;
+       struct sun4i_drv *drv = drm->dev_private;
+       struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+       int ret;
+
+       if (!dsi->panel)
+               return -EPROBE_DEFER;
+
+       dsi->drv = drv;
+
+       drm_encoder_helper_add(&dsi->encoder,
+                              &sun6i_dsi_enc_helper_funcs);
+       ret = drm_encoder_init(drm,
+                              &dsi->encoder,
+                              &sun6i_dsi_enc_funcs,
+                              DRM_MODE_ENCODER_DSI,
+                              NULL);
+       if (ret) {
+               dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
+               return ret;
+       }
+       dsi->encoder.possible_crtcs = BIT(0);
+
+       drm_connector_helper_add(&dsi->connector,
+                                &sun6i_dsi_connector_helper_funcs);
+       ret = drm_connector_init(drm, &dsi->connector,
+                                &sun6i_dsi_connector_funcs,
+                                DRM_MODE_CONNECTOR_DSI);
+       if (ret) {
+               dev_err(dsi->dev,
+                       "Couldn't initialise the DSI connector\n");
+               goto err_cleanup_connector;
+       }
+
+       drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+       drm_panel_attach(dsi->panel, &dsi->connector);
+
+       return 0;
+
+err_cleanup_connector:
+       drm_encoder_cleanup(&dsi->encoder);
+       return ret;
+}
+
+static void sun6i_dsi_unbind(struct device *dev, struct device *master,
+                           void *data)
+{
+       struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+       drm_panel_detach(dsi->panel);
+}
+
+static const struct component_ops sun6i_dsi_ops = {
+       .bind   = sun6i_dsi_bind,
+       .unbind = sun6i_dsi_unbind,
+};
+
+static int sun6i_dsi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *dphy_node;
+       struct sun6i_dsi *dsi;
+       struct resource *res;
+       void __iomem *base;
+       int ret;
+
+       dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+       dev_set_drvdata(dev, dsi);
+       dsi->dev = dev;
+       dsi->host.ops = &sun6i_dsi_host_ops;
+       dsi->host.dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base)) {
+               dev_err(dev, "Couldn't map the DSI encoder registers\n");
+               return PTR_ERR(base);
+       }
+
+       dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
+                                             &sun6i_dsi_regmap_config);
+       if (IS_ERR(dsi->regs)) {
+               dev_err(dev, "Couldn't create the DSI encoder regmap\n");
+               return PTR_ERR(dsi->regs);
+       }
+
+       dsi->reset = devm_reset_control_get_shared(dev, NULL);
+       if (IS_ERR(dsi->reset)) {
+               dev_err(dev, "Couldn't get our reset line\n");
+               return PTR_ERR(dsi->reset);
+       }
+
+       dsi->mod_clk = devm_clk_get(dev, "mod");
+       if (IS_ERR(dsi->mod_clk)) {
+               dev_err(dev, "Couldn't get the DSI mod clock\n");
+               return PTR_ERR(dsi->mod_clk);
+       }
+
+       /*
+        * In order to operate properly, that clock seems to be always
+        * set to 297MHz.
+        */
+       clk_set_rate_exclusive(dsi->mod_clk, 297000000);
+
+       dphy_node = of_parse_phandle(dev->of_node, "phys", 0);
+       ret = sun6i_dphy_probe(dsi, dphy_node);
+       of_node_put(dphy_node);
+       if (ret) {
+               dev_err(dev, "Couldn't get the MIPI D-PHY\n");
+               goto err_unprotect_clk;
+       }
+
+       pm_runtime_enable(dev);
+
+       ret = mipi_dsi_host_register(&dsi->host);
+       if (ret) {
+               dev_err(dev, "Couldn't register MIPI-DSI host\n");
+               goto err_remove_phy;
+       }
+
+       ret = component_add(&pdev->dev, &sun6i_dsi_ops);
+       if (ret) {
+               dev_err(dev, "Couldn't register our component\n");
+               goto err_remove_dsi_host;
+       }
+
+       return 0;
+
+err_remove_dsi_host:
+       mipi_dsi_host_unregister(&dsi->host);
+err_remove_phy:
+       pm_runtime_disable(dev);
+       sun6i_dphy_remove(dsi);
+err_unprotect_clk:
+       clk_rate_exclusive_put(dsi->mod_clk);
+       return ret;
+}
+
+static int sun6i_dsi_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+       component_del(&pdev->dev, &sun6i_dsi_ops);
+       mipi_dsi_host_unregister(&dsi->host);
+       pm_runtime_disable(dev);
+       sun6i_dphy_remove(dsi);
+       clk_rate_exclusive_put(dsi->mod_clk);
+
+       return 0;
+}
+
+static int sun6i_dsi_runtime_resume(struct device *dev)
+{
+       struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+       reset_control_deassert(dsi->reset);
+       clk_prepare_enable(dsi->mod_clk);
+
+       /*
+        * Enable the DSI block.
+        *
+        * Some part of it can only be done once we get a number of
+        * lanes, see sun6i_dsi_inst_init
+        */
+       regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+       regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+                    SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+       regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+       regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+       if (dsi->device)
+               sun6i_dsi_inst_init(dsi, dsi->device);
+
+       regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
+
+       return 0;
+}
+
+static int sun6i_dsi_runtime_suspend(struct device *dev)
+{
+       struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(dsi->mod_clk);
+       reset_control_assert(dsi->reset);
+
+       return 0;
+}
+
+static const struct dev_pm_ops sun6i_dsi_pm_ops = {
+       SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
+                          sun6i_dsi_runtime_resume,
+                          NULL)
+};
+
+static const struct of_device_id sun6i_dsi_of_table[] = {
+       { .compatible = "allwinner,sun6i-a31-mipi-dsi" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
+
+static struct platform_driver sun6i_dsi_platform_driver = {
+       .probe          = sun6i_dsi_probe,
+       .remove         = sun6i_dsi_remove,
+       .driver         = {
+               .name           = "sun6i-mipi-dsi",
+               .of_match_table = sun6i_dsi_of_table,
+               .pm             = &sun6i_dsi_pm_ops,
+       },
+};
+module_platform_driver(sun6i_dsi_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 DSI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
new file mode 100644 (file)
index 0000000..dbbc5b3
--- /dev/null
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Allwinnertech Co., Ltd.
+ * Copyright (C) 2017-2018 Bootlin
+ *
+ * Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#ifndef _SUN6I_MIPI_DSI_H_
+#define _SUN6I_MIPI_DSI_H_
+
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
+
+struct sun6i_dphy {
+       struct clk              *bus_clk;
+       struct clk              *mod_clk;
+       struct regmap           *regs;
+       struct reset_control    *reset;
+};
+
+struct sun6i_dsi {
+       struct drm_connector    connector;
+       struct drm_encoder      encoder;
+       struct mipi_dsi_host    host;
+
+       struct clk              *bus_clk;
+       struct clk              *mod_clk;
+       struct regmap           *regs;
+       struct reset_control    *reset;
+       struct sun6i_dphy       *dphy;
+
+       struct device           *dev;
+       struct sun4i_drv        *drv;
+       struct mipi_dsi_device  *device;
+       struct drm_panel        *panel;
+};
+
+static inline struct sun6i_dsi *host_to_sun6i_dsi(struct mipi_dsi_host *host)
+{
+       return container_of(host, struct sun6i_dsi, host);
+};
+
+static inline struct sun6i_dsi *connector_to_sun6i_dsi(struct drm_connector *connector)
+{
+       return container_of(connector, struct sun6i_dsi, connector);
+};
+
+static inline struct sun6i_dsi *encoder_to_sun6i_dsi(const struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct sun6i_dsi, encoder);
+};
+
+int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node);
+int sun6i_dphy_remove(struct sun6i_dsi *dsi);
+
+int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes);
+int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes);
+int sun6i_dphy_power_off(struct sun6i_dphy *dphy);
+int sun6i_dphy_exit(struct sun6i_dphy *dphy);
+
+#endif /* _SUN6I_MIPI_DSI_H_ */
index 7afe2f6..a051961 100644 (file)
@@ -38,26 +38,11 @@ static int tegra_atomic_check(struct drm_device *drm,
 {
        int err;
 
-       err = drm_atomic_helper_check_modeset(drm, state);
+       err = drm_atomic_helper_check(drm, state);
        if (err < 0)
                return err;
 
-       err = tegra_display_hub_atomic_check(drm, state);
-       if (err < 0)
-               return err;
-
-       err = drm_atomic_normalize_zpos(drm, state);
-       if (err < 0)
-               return err;
-
-       err = drm_atomic_helper_check_planes(drm, state);
-       if (err < 0)
-               return err;
-
-       if (state->legacy_cursor_update)
-               state->async_update = !drm_atomic_helper_async_check(drm, state);
-
-       return 0;
+       return tegra_display_hub_atomic_check(drm, state);
 }
 
 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
@@ -151,6 +136,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 
        drm->mode_config.allow_fb_modifiers = true;
 
+       drm->mode_config.normalize_zpos = true;
+
        drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
        drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
 
index 4c66162..24a33bf 100644 (file)
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
  * GEM object state and frees the memory used to store the object itself using
  * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
  * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
- * can use this as their &drm_driver->gem_free_object callback.
+ * can use this as their &drm_driver->gem_free_object_unlocked callback.
  */
 void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
index d1c3ce9..dcd3901 100644 (file)
@@ -78,6 +78,36 @@ bool tinydrm_merge_clips(struct drm_clip_rect *dst,
 }
 EXPORT_SYMBOL(tinydrm_merge_clips);
 
+int tinydrm_fb_dirty(struct drm_framebuffer *fb,
+                    struct drm_file *file_priv,
+                    unsigned int flags, unsigned int color,
+                    struct drm_clip_rect *clips,
+                    unsigned int num_clips)
+{
+       struct tinydrm_device *tdev = fb->dev->dev_private;
+       struct drm_plane *plane = &tdev->pipe.plane;
+       int ret = 0;
+
+       drm_modeset_lock(&plane->mutex, NULL);
+
+       /* fbdev can flush even when we're not interested */
+       if (plane->state->fb == fb) {
+               mutex_lock(&tdev->dirty_lock);
+               ret = tdev->fb_dirty(fb, file_priv, flags,
+                                    color, clips, num_clips);
+               mutex_unlock(&tdev->dirty_lock);
+       }
+
+       drm_modeset_unlock(&plane->mutex);
+
+       if (ret)
+               dev_err_once(fb->dev->dev,
+                            "Failed to update display %d\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(tinydrm_fb_dirty);
+
 /**
  * tinydrm_memcpy - Copy clip buffer
  * @dst: Destination buffer
index 11ae950..7e8e24d 100644 (file)
@@ -125,9 +125,8 @@ void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
        struct drm_crtc *crtc = &tdev->pipe.crtc;
 
        if (fb && (fb != old_state->fb)) {
-               pipe->plane.fb = fb;
-               if (fb->funcs->dirty)
-                       fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+               if (tdev->fb_dirty)
+                       tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
        }
 
        if (crtc->state->event) {
@@ -139,23 +138,6 @@ void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
 }
 EXPORT_SYMBOL(tinydrm_display_pipe_update);
 
-/**
- * tinydrm_display_pipe_prepare_fb - Display pipe prepare_fb helper
- * @pipe: Simple display pipe
- * @plane_state: Plane state
- *
- * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has an
- * dma-buf attached, extracts the exclusive fence and attaches it to plane
- * state for the atomic helper to wait on. Drivers can use this as their
- * &drm_simple_display_pipe_funcs->prepare_fb callback.
- */
-int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
-                                   struct drm_plane_state *plane_state)
-{
-       return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb);
-
 static int tinydrm_rotate_mode(struct drm_display_mode *mode,
                               unsigned int rotation)
 {
index a075950..841c69a 100644 (file)
@@ -88,14 +88,8 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
        bool full;
        void *tr;
 
-       mutex_lock(&tdev->dirty_lock);
-
        if (!mipi->enabled)
-               goto out_unlock;
-
-       /* fbdev can flush even when we're not interested */
-       if (tdev->pipe.plane.fb != fb)
-               goto out_unlock;
+               return 0;
 
        full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
                                   fb->width, fb->height);
@@ -108,7 +102,7 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
                tr = mipi->tx_buf;
                ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
                if (ret)
-                       goto out_unlock;
+                       return ret;
        } else {
                tr = cma_obj->vaddr;
        }
@@ -159,24 +153,18 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
        ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
                                (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
 
-out_unlock:
-       mutex_unlock(&tdev->dirty_lock);
-
-       if (ret)
-               dev_err_once(fb->dev->dev, "Failed to update display %d\n",
-                            ret);
-
        return ret;
 }
 
 static const struct drm_framebuffer_funcs ili9225_fb_funcs = {
        .destroy        = drm_gem_fb_destroy,
        .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = ili9225_fb_dirty,
+       .dirty          = tinydrm_fb_dirty,
 };
 
 static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
-                               struct drm_crtc_state *crtc_state)
+                               struct drm_crtc_state *crtc_state,
+                               struct drm_plane_state *plane_state)
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -268,7 +256,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
 
        ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
 
-       mipi_dbi_enable_flush(mipi);
+       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -341,6 +329,8 @@ static int ili9225_init(struct device *dev, struct mipi_dbi *mipi,
        if (ret)
                return ret;
 
+       tdev->fb_dirty = ili9225_fb_dirty;
+
        ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
                                        ili9225_formats,
@@ -364,7 +354,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
        .enable         = ili9225_pipe_enable,
        .disable        = ili9225_pipe_disable,
        .update         = tinydrm_display_pipe_update,
-       .prepare_fb     = tinydrm_display_pipe_prepare_fb,
+       .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode ili9225_mode = {
index d8ed6e6..d5ef651 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_modeset_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
 #include <video/mipi_display.h>
@@ -49,7 +50,8 @@
 #define ILI9341_MADCTL_MY      BIT(7)
 
 static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
-                           struct drm_crtc_state *crtc_state)
+                           struct drm_crtc_state *crtc_state,
+                           struct drm_plane_state *plane_state)
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -126,14 +128,14 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
        msleep(100);
 
 out_enable:
-       mipi_dbi_enable_flush(mipi);
+       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
        .enable = mi0283qt_enable,
        .disable = mipi_dbi_pipe_disable,
        .update = tinydrm_display_pipe_update,
-       .prepare_fb = tinydrm_display_pipe_prepare_fb,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode mi0283qt_mode = {
index 9e90381..4d1fb31 100644 (file)
@@ -219,14 +219,8 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
        bool full;
        void *tr;
 
-       mutex_lock(&tdev->dirty_lock);
-
        if (!mipi->enabled)
-               goto out_unlock;
-
-       /* fbdev can flush even when we're not interested */
-       if (tdev->pipe.plane.fb != fb)
-               goto out_unlock;
+               return 0;
 
        full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
                                   fb->width, fb->height);
@@ -239,7 +233,7 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
                tr = mipi->tx_buf;
                ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
                if (ret)
-                       goto out_unlock;
+                       return ret;
        } else {
                tr = cma_obj->vaddr;
        }
@@ -254,20 +248,13 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
        ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
                                (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
 
-out_unlock:
-       mutex_unlock(&tdev->dirty_lock);
-
-       if (ret)
-               dev_err_once(fb->dev->dev, "Failed to update display %d\n",
-                            ret);
-
        return ret;
 }
 
 static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
        .destroy        = drm_gem_fb_destroy,
        .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = mipi_dbi_fb_dirty,
+       .dirty          = tinydrm_fb_dirty,
 };
 
 /**
@@ -278,13 +265,16 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
  * enables the backlight. Drivers can use this in their
  * &drm_simple_display_pipe_funcs->enable callback.
  */
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi)
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+                          struct drm_crtc_state *crtc_state,
+                          struct drm_plane_state *plane_state)
 {
-       struct drm_framebuffer *fb = mipi->tinydrm.pipe.plane.fb;
+       struct tinydrm_device *tdev = &mipi->tinydrm;
+       struct drm_framebuffer *fb = plane_state->fb;
 
        mipi->enabled = true;
        if (fb)
-               fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+               tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
 
        backlight_enable(mipi->backlight);
 }
@@ -381,6 +371,8 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
        if (ret)
                return ret;
 
+       tdev->fb_dirty = mipi_dbi_fb_dirty;
+
        /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
        ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
index 7574063..1ee6855 100644 (file)
@@ -540,14 +540,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
        clip.y1 = 0;
        clip.y2 = fb->height;
 
-       mutex_lock(&tdev->dirty_lock);
-
        if (!epd->enabled)
-               goto out_unlock;
-
-       /* fbdev can flush even when we're not interested */
-       if (tdev->pipe.plane.fb != fb)
-               goto out_unlock;
+               return 0;
 
        repaper_get_temperature(epd);
 
@@ -555,16 +549,14 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
                  epd->factored_stage_time);
 
        buf = kmalloc(fb->width * fb->height, GFP_KERNEL);
-       if (!buf) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (!buf)
+               return -ENOMEM;
 
        if (import_attach) {
                ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
                                               DMA_FROM_DEVICE);
                if (ret)
-                       goto out_unlock;
+                       goto out_free;
        }
 
        tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
@@ -573,7 +565,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
                ret = dma_buf_end_cpu_access(import_attach->dmabuf,
                                             DMA_FROM_DEVICE);
                if (ret)
-                       goto out_unlock;
+                       goto out_free;
        }
 
        repaper_gray8_to_mono_reversed(buf, fb->width, fb->height);
@@ -625,11 +617,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
                        }
        }
 
-out_unlock:
-       mutex_unlock(&tdev->dirty_lock);
-
-       if (ret)
-               DRM_DEV_ERROR(fb->dev->dev, "Failed to update display (%d)\n", ret);
+out_free:
        kfree(buf);
 
        return ret;
@@ -638,7 +626,7 @@ out_unlock:
 static const struct drm_framebuffer_funcs repaper_fb_funcs = {
        .destroy        = drm_gem_fb_destroy,
        .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = repaper_fb_dirty,
+       .dirty          = tinydrm_fb_dirty,
 };
 
 static void power_off(struct repaper_epd *epd)
@@ -659,7 +647,8 @@ static void power_off(struct repaper_epd *epd)
 }
 
 static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
-                               struct drm_crtc_state *crtc_state)
+                               struct drm_crtc_state *crtc_state,
+                               struct drm_plane_state *plane_state)
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct repaper_epd *epd = epd_from_tinydrm(tdev);
@@ -852,7 +841,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
        .enable = repaper_pipe_enable,
        .disable = repaper_pipe_disable,
        .update = tinydrm_display_pipe_update,
-       .prepare_fb = tinydrm_display_pipe_prepare_fb,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const uint32_t repaper_formats[] = {
@@ -1069,6 +1058,8 @@ static int repaper_probe(struct spi_device *spi)
        if (ret)
                return ret;
 
+       tdev->fb_dirty = repaper_fb_dirty;
+
        ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
                                        repaper_formats,
index a6396ef..5c29e38 100644 (file)
@@ -120,14 +120,8 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
        int start, end;
        int ret = 0;
 
-       mutex_lock(&tdev->dirty_lock);
-
        if (!mipi->enabled)
-               goto out_unlock;
-
-       /* fbdev can flush even when we're not interested */
-       if (tdev->pipe.plane.fb != fb)
-               goto out_unlock;
+               return 0;
 
        tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
                            fb->height);
@@ -141,7 +135,7 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
 
        ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
        if (ret)
-               goto out_unlock;
+               return ret;
 
        /* Pixels are packed 3 per byte */
        start = clip.x1 / 3;
@@ -158,24 +152,18 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
                                   (u8 *)mipi->tx_buf,
                                   (end - start) * (clip.y2 - clip.y1));
 
-out_unlock:
-       mutex_unlock(&tdev->dirty_lock);
-
-       if (ret)
-               dev_err_once(fb->dev->dev, "Failed to update display %d\n",
-                            ret);
-
        return ret;
 }
 
 static const struct drm_framebuffer_funcs st7586_fb_funcs = {
        .destroy        = drm_gem_fb_destroy,
        .create_handle  = drm_gem_fb_create_handle,
-       .dirty          = st7586_fb_dirty,
+       .dirty          = tinydrm_fb_dirty,
 };
 
 static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
-                              struct drm_crtc_state *crtc_state)
+                              struct drm_crtc_state *crtc_state,
+                              struct drm_plane_state *plane_state)
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -237,7 +225,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
 
        mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
 
-       mipi_dbi_enable_flush(mipi);
+       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -277,6 +265,8 @@ static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
        if (ret)
                return ret;
 
+       tdev->fb_dirty = st7586_fb_dirty;
+
        ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
                                        DRM_MODE_CONNECTOR_VIRTUAL,
                                        st7586_formats,
@@ -300,7 +290,7 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
        .enable         = st7586_pipe_enable,
        .disable        = st7586_pipe_disable,
        .update         = tinydrm_display_pipe_update,
-       .prepare_fb     = tinydrm_display_pipe_prepare_fb,
+       .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode st7586_mode = {
index 67d197e..6c7b15c 100644 (file)
@@ -37,7 +37,8 @@
 #define ST7735R_MV     BIT(5)
 
 static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
-                                     struct drm_crtc_state *crtc_state)
+                                     struct drm_crtc_state *crtc_state,
+                                     struct drm_plane_state *plane_state)
 {
        struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
        struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
@@ -98,14 +99,14 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
 
        msleep(20);
 
-       mipi_dbi_enable_flush(mipi);
+       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
        .enable         = jd_t18003_t01_pipe_enable,
        .disable        = mipi_dbi_pipe_disable,
        .update         = tinydrm_display_pipe_update,
-       .prepare_fb     = tinydrm_display_pipe_prepare_fb,
+       .prepare_fb     = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode jd_t18003_t01_mode = {
index db397fc..e8723a2 100644 (file)
@@ -120,7 +120,8 @@ static int tve200_display_check(struct drm_simple_display_pipe *pipe,
 }
 
 static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
-                                struct drm_crtc_state *cstate)
+                                struct drm_crtc_state *cstate,
+                                struct drm_plane_state *plane_state)
 {
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_plane *plane = &pipe->plane;
@@ -292,18 +293,12 @@ static void tve200_display_disable_vblank(struct drm_simple_display_pipe *pipe)
        writel(0, priv->regs + TVE200_INT_EN);
 }
 
-static int tve200_display_prepare_fb(struct drm_simple_display_pipe *pipe,
-                                   struct drm_plane_state *plane_state)
-{
-       return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
        .check = tve200_display_check,
        .enable = tve200_display_enable,
        .disable = tve200_display_disable,
        .update = tve200_display_update,
-       .prepare_fb = tve200_display_prepare_fb,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
        .enable_vblank = tve200_display_enable_vblank,
        .disable_vblank = tve200_display_disable_vblank,
 };
index c3dc1fd..09dc585 100644 (file)
@@ -105,7 +105,7 @@ static int udl_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static int udl_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
                          struct drm_display_mode *mode)
 {
        struct udl_device *udl = connector->dev->dev_private;
index 2867ed1..0a20695 100644 (file)
@@ -76,6 +76,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
        struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
        struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
        struct drm_device *dev = obj->base.dev;
+       struct udl_device *udl = dev->dev_private;
        struct scatterlist *rd, *wr;
        struct sg_table *sgt = NULL;
        unsigned int i;
@@ -112,7 +113,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
                return ERR_PTR(-ENOMEM);
        }
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&udl->gem_lock);
 
        rd = obj->sg->sgl;
        wr = sgt->sgl;
@@ -137,7 +138,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
        attach->priv = udl_attach;
 
 err_unlock:
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&udl->gem_lock);
        return sgt;
 }
 
index 3c45a30..9ef515d 100644 (file)
@@ -53,7 +53,7 @@ static struct drm_driver driver = {
        .unload = udl_driver_unload,
 
        /* gem hooks */
-       .gem_free_object = udl_gem_free_object,
+       .gem_free_object_unlocked = udl_gem_free_object,
        .gem_vm_ops = &udl_gem_vm_ops,
 
        .dumb_create = udl_dumb_create,
index 2a75ab8..55c0cc3 100644 (file)
@@ -54,6 +54,8 @@ struct udl_device {
        struct usb_device *udev;
        struct drm_crtc *crtc;
 
+       struct mutex gem_lock;
+
        int sku_pixel_limit;
 
        struct urb_list urbs;
index dee6bd9..9a15cce 100644 (file)
@@ -214,9 +214,10 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 {
        struct udl_gem_object *gobj;
        struct drm_gem_object *obj;
+       struct udl_device *udl = dev->dev_private;
        int ret = 0;
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&udl->gem_lock);
        obj = drm_gem_object_lookup(file, handle);
        if (obj == NULL) {
                ret = -ENOENT;
@@ -236,6 +237,6 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 out:
        drm_gem_object_put(&gobj->base);
 unlock:
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&udl->gem_lock);
        return ret;
 }
index f1ec452..d518de8 100644 (file)
@@ -324,6 +324,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
        udl->ddev = dev;
        dev->dev_private = udl;
 
+       mutex_init(&udl->gem_lock);
+
        if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
                ret = -ENODEV;
                DRM_ERROR("firmware not recognized. Assume incompatible device\n");
index bf46674..83d3b79 100644 (file)
 #include "vc4_drv.h"
 #include "vc4_regs.h"
 
-struct vc4_crtc {
-       struct drm_crtc base;
-       const struct vc4_crtc_data *data;
-       void __iomem *regs;
-
-       /* Timestamp at start of vblank irq - unaffected by lock delays. */
-       ktime_t t_vblank;
-
-       /* Which HVS channel we're using for our CRTC. */
-       int channel;
-
-       u8 lut_r[256];
-       u8 lut_g[256];
-       u8 lut_b[256];
-       /* Size in pixels of the COB memory allocated to this CRTC. */
-       u32 cob_size;
-
-       struct drm_pending_vblank_event *event;
-};
-
 struct vc4_crtc_state {
        struct drm_crtc_state base;
        /* Dlist area for this CRTC configuration. */
        struct drm_mm_node mm;
 };
 
-static inline struct vc4_crtc *
-to_vc4_crtc(struct drm_crtc *crtc)
-{
-       return (struct vc4_crtc *)crtc;
-}
-
 static inline struct vc4_crtc_state *
 to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
 {
        return (struct vc4_crtc_state *)crtc_state;
 }
 
-struct vc4_crtc_data {
-       /* Which channel of the HVS this pixelvalve sources from. */
-       int hvs_channel;
-
-       enum vc4_encoder_type encoder_types[4];
-};
-
 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
 #define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
 
@@ -298,23 +265,21 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
                HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
 }
 
-static int
-vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                  uint32_t size,
-                  struct drm_modeset_acquire_ctx *ctx)
+static void
+vc4_crtc_update_gamma_lut(struct drm_crtc *crtc)
 {
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+       struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+       u32 length = drm_color_lut_size(crtc->state->gamma_lut);
        u32 i;
 
-       for (i = 0; i < size; i++) {
-               vc4_crtc->lut_r[i] = r[i] >> 8;
-               vc4_crtc->lut_g[i] = g[i] >> 8;
-               vc4_crtc->lut_b[i] = b[i] >> 8;
+       for (i = 0; i < length; i++) {
+               vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8);
+               vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8);
+               vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8);
        }
 
        vc4_crtc_lut_load(crtc);
-
-       return 0;
 }
 
 static u32 vc4_get_fifo_full_level(u32 format)
@@ -699,6 +664,22 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
        if (crtc->state->active && old_state->active)
                vc4_crtc_update_dlist(crtc);
 
+       if (crtc->state->color_mgmt_changed) {
+               u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel));
+
+               if (crtc->state->gamma_lut) {
+                       vc4_crtc_update_gamma_lut(crtc);
+                       dispbkgndx |= SCALER_DISPBKGND_GAMMA;
+               } else {
+                       /* Unsetting DISPBKGND_GAMMA skips the gamma lut step
+                        * in hardware, which is the same as a linear lut that
+                        * DRM expects us to use in absence of a user lut.
+                        */
+                       dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
+               }
+               HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx);
+       }
+
        if (debug_dump_regs) {
                DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
                vc4_hvs_dump_state(dev);
@@ -909,7 +890,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
        .reset = vc4_crtc_reset,
        .atomic_duplicate_state = vc4_crtc_duplicate_state,
        .atomic_destroy_state = vc4_crtc_destroy_state,
-       .gamma_set = vc4_crtc_gamma_set,
+       .gamma_set = drm_atomic_helper_legacy_gamma_set,
        .enable_vblank = vc4_enable_vblank,
        .disable_vblank = vc4_disable_vblank,
 };
@@ -1035,6 +1016,12 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
        primary_plane->crtc = crtc;
        vc4_crtc->channel = vc4_crtc->data->hvs_channel;
        drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
+       drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
+
+       /* We support CTM, but only for one CRTC at a time. It's therefore
+        * implemented as private driver state in vc4_kms, not here.
+        */
+       drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
 
        /* Set up some arbitrary number of planes.  We're not limited
         * by a set number of physical registers, just the space in
index 94b99c9..40ddeaa 100644 (file)
@@ -318,8 +318,8 @@ dev_unref:
 
 static void vc4_drm_unbind(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct drm_device *drm = platform_get_drvdata(pdev);
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
 
        drm_dev_unregister(drm);
 
@@ -327,6 +327,8 @@ static void vc4_drm_unbind(struct device *dev)
 
        drm_mode_config_cleanup(drm);
 
+       drm_atomic_private_obj_fini(&vc4->ctm_manager);
+
        drm_dev_unref(drm);
 }
 
index 1b4cd1f..22589d3 100644 (file)
@@ -10,6 +10,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_atomic.h>
 
 #include "uapi/drm/vc4_drm.h"
 
@@ -193,6 +194,9 @@ struct vc4_dev {
        } hangcheck;
 
        struct semaphore async_modeset;
+
+       struct drm_modeset_lock ctm_state_lock;
+       struct drm_private_obj ctm_manager;
 };
 
 static inline struct vc4_dev *
@@ -392,6 +396,39 @@ to_vc4_encoder(struct drm_encoder *encoder)
        return container_of(encoder, struct vc4_encoder, base);
 }
 
+struct vc4_crtc_data {
+       /* Which channel of the HVS this pixelvalve sources from. */
+       int hvs_channel;
+
+       enum vc4_encoder_type encoder_types[4];
+};
+
+struct vc4_crtc {
+       struct drm_crtc base;
+       const struct vc4_crtc_data *data;
+       void __iomem *regs;
+
+       /* Timestamp at start of vblank irq - unaffected by lock delays. */
+       ktime_t t_vblank;
+
+       /* Which HVS channel we're using for our CRTC. */
+       int channel;
+
+       u8 lut_r[256];
+       u8 lut_g[256];
+       u8 lut_b[256];
+       /* Size in pixels of the COB memory allocated to this CRTC. */
+       u32 cob_size;
+
+       struct drm_pending_vblank_event *event;
+};
+
+static inline struct vc4_crtc *
+to_vc4_crtc(struct drm_crtc *crtc)
+{
+       return (struct vc4_crtc *)crtc;
+}
+
 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
index 2b62fc5..5d8c749 100644 (file)
@@ -58,6 +58,10 @@ static const struct {
        HVS_REG(SCALER_DISPSTAT2),
        HVS_REG(SCALER_DISPBASE2),
        HVS_REG(SCALER_DISPALPHA2),
+       HVS_REG(SCALER_OLEDOFFS),
+       HVS_REG(SCALER_OLEDCOEF0),
+       HVS_REG(SCALER_OLEDCOEF1),
+       HVS_REG(SCALER_OLEDCOEF2),
 };
 
 void vc4_hvs_dump_state(struct drm_device *dev)
index ba60153..8a411e5 100644 (file)
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include "vc4_drv.h"
+#include "vc4_regs.h"
+
+struct vc4_ctm_state {
+       struct drm_private_state base;
+       struct drm_color_ctm *ctm;
+       int fifo;
+};
+
+static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
+{
+       return container_of(priv, struct vc4_ctm_state, base);
+}
+
+static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
+                                              struct drm_private_obj *manager)
+{
+       struct drm_device *dev = state->dev;
+       struct vc4_dev *vc4 = dev->dev_private;
+       struct drm_private_state *priv_state;
+       int ret;
+
+       ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
+       if (ret)
+               return ERR_PTR(ret);
+
+       priv_state = drm_atomic_get_private_obj_state(state, manager);
+       if (IS_ERR(priv_state))
+               return ERR_CAST(priv_state);
+
+       return to_vc4_ctm_state(priv_state);
+}
+
+static struct drm_private_state *
+vc4_ctm_duplicate_state(struct drm_private_obj *obj)
+{
+       struct vc4_ctm_state *state;
+
+       state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+       return &state->base;
+}
+
+static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
+                                 struct drm_private_state *state)
+{
+       struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
+
+       kfree(ctm_state);
+}
+
+static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
+       .atomic_duplicate_state = vc4_ctm_duplicate_state,
+       .atomic_destroy_state = vc4_ctm_destroy_state,
+};
+
+/* Converts a DRM S31.32 value to the HW S0.9 format. */
+static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
+{
+       u16 r;
+
+       /* Sign bit. */
+       r = in & BIT_ULL(63) ? BIT(9) : 0;
+
+       if ((in & GENMASK_ULL(62, 32)) > 0) {
+               /* We have zero integer bits so we can only saturate here. */
+               r |= GENMASK(8, 0);
+       } else {
+               /* Otherwise take the 9 most important fractional bits. */
+               r |= (in >> 23) & GENMASK(8, 0);
+       }
+
+       return r;
+}
+
+static void
+vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
+{
+       struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
+       struct drm_color_ctm *ctm = ctm_state->ctm;
+
+       if (ctm_state->fifo) {
+               HVS_WRITE(SCALER_OLEDCOEF2,
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
+                                       SCALER_OLEDCOEF2_R_TO_R) |
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
+                                       SCALER_OLEDCOEF2_R_TO_G) |
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
+                                       SCALER_OLEDCOEF2_R_TO_B));
+               HVS_WRITE(SCALER_OLEDCOEF1,
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
+                                       SCALER_OLEDCOEF1_G_TO_R) |
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
+                                       SCALER_OLEDCOEF1_G_TO_G) |
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
+                                       SCALER_OLEDCOEF1_G_TO_B));
+               HVS_WRITE(SCALER_OLEDCOEF0,
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
+                                       SCALER_OLEDCOEF0_B_TO_R) |
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
+                                       SCALER_OLEDCOEF0_B_TO_G) |
+                         VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
+                                       SCALER_OLEDCOEF0_B_TO_B));
+       }
+
+       HVS_WRITE(SCALER_OLEDOFFS,
+                 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
+}
 
 static void
 vc4_atomic_complete_commit(struct drm_atomic_state *state)
@@ -36,6 +147,8 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
+       vc4_ctm_commit(vc4, state);
+
        drm_atomic_helper_commit_planes(dev, state, 0);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
@@ -90,6 +203,26 @@ static int vc4_atomic_commit(struct drm_device *dev,
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        int ret;
 
+       if (state->async_update) {
+               ret = down_interruptible(&vc4->async_modeset);
+               if (ret)
+                       return ret;
+
+               ret = drm_atomic_helper_prepare_planes(dev, state);
+               if (ret) {
+                       up(&vc4->async_modeset);
+                       return ret;
+               }
+
+               drm_atomic_helper_async_commit(dev, state);
+
+               drm_atomic_helper_cleanup_planes(dev, state);
+
+               up(&vc4->async_modeset);
+
+               return 0;
+       }
+
        ret = drm_atomic_helper_setup_commit(state, nonblock);
        if (ret)
                return ret;
@@ -187,9 +320,89 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
        return drm_gem_fb_create(dev, file_priv, mode_cmd);
 }
 
+/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
+ * at a time and the HW only supports S0.9 scalars. To account for the latter,
+ * we don't allow userland to set a CTM that we have no hope of approximating.
+ */
+static int
+vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_ctm_state *ctm_state = NULL;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct drm_color_ctm *ctm;
+       int i;
+
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               /* CTM is being disabled. */
+               if (!new_crtc_state->ctm && old_crtc_state->ctm) {
+                       ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
+                       if (IS_ERR(ctm_state))
+                               return PTR_ERR(ctm_state);
+                       ctm_state->fifo = 0;
+               }
+       }
+
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               if (new_crtc_state->ctm == old_crtc_state->ctm)
+                       continue;
+
+               if (!ctm_state) {
+                       ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
+                       if (IS_ERR(ctm_state))
+                               return PTR_ERR(ctm_state);
+               }
+
+               /* CTM is being enabled or the matrix changed. */
+               if (new_crtc_state->ctm) {
+                       /* fifo is 1-based since 0 disables CTM. */
+                       int fifo = to_vc4_crtc(crtc)->channel + 1;
+
+                       /* Check userland isn't trying to turn on CTM for more
+                        * than one CRTC at a time.
+                        */
+                       if (ctm_state->fifo && ctm_state->fifo != fifo) {
+                               DRM_DEBUG_DRIVER("Too many CTM configured\n");
+                               return -EINVAL;
+                       }
+
+                       /* Check we can approximate the specified CTM.
+                        * We disallow scalars |c| > 1.0 since the HW has
+                        * no integer bits.
+                        */
+                       ctm = new_crtc_state->ctm->data;
+                       for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
+                               u64 val = ctm->matrix[i];
+
+                               val &= ~BIT_ULL(63);
+                               if (val > BIT_ULL(32))
+                                       return -EINVAL;
+                       }
+
+                       ctm_state->fifo = fifo;
+                       ctm_state->ctm = ctm;
+               }
+       }
+
+       return 0;
+}
+
+static int
+vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+       int ret;
+
+       ret = vc4_ctm_atomic_check(dev, state);
+       if (ret < 0)
+               return ret;
+
+       return drm_atomic_helper_check(dev, state);
+}
+
 static const struct drm_mode_config_funcs vc4_mode_funcs = {
        .output_poll_changed = drm_fb_helper_output_poll_changed,
-       .atomic_check = drm_atomic_helper_check,
+       .atomic_check = vc4_atomic_check,
        .atomic_commit = vc4_atomic_commit,
        .fb_create = vc4_fb_create,
 };
@@ -197,6 +410,7 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
 int vc4_kms_load(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_ctm_state *ctm_state;
        int ret;
 
        sema_init(&vc4->async_modeset, 1);
@@ -217,6 +431,14 @@ int vc4_kms_load(struct drm_device *dev)
        dev->mode_config.async_page_flip = true;
        dev->mode_config.allow_fb_modifiers = true;
 
+       drm_modeset_lock_init(&vc4->ctm_state_lock);
+
+       ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
+       if (!ctm_state)
+               return -ENOMEM;
+       drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
+                                   &vc4_ctm_state_funcs);
+
        drm_mode_config_reset(dev);
 
        if (dev->mode_config.num_connector)
index ce39390..3483c05 100644 (file)
@@ -201,6 +201,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
                return;
 
        plane->state = &vc4_state->base;
+       plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
        vc4_state->base.plane = plane;
 }
 
@@ -467,6 +468,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        u32 ctl0_offset = vc4_state->dlist_count;
        const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
        int num_planes = drm_format_num_planes(format->drm);
+       bool mix_plane_alpha;
        bool covers_screen;
        u32 scl0, scl1, pitch0;
        u32 lbm_size, tiling;
@@ -552,7 +554,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        /* Position Word 0: Image Positions and Alpha Value */
        vc4_state->pos0_offset = vc4_state->dlist_count;
        vc4_dlist_write(vc4_state,
-                       VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
+                       VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
                        VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
                        VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
 
@@ -565,6 +567,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                                              SCALER_POS1_SCL_HEIGHT));
        }
 
+       /* Don't waste cycles mixing with plane alpha if the set alpha
+        * is opaque or there is no per-pixel alpha information.
+        * In any case we use the alpha property value as the fixed alpha.
+        */
+       mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
+                         fb->format->has_alpha;
+
        /* Position Word 2: Source Image Size, Alpha */
        vc4_state->pos2_offset = vc4_state->dlist_count;
        vc4_dlist_write(vc4_state,
@@ -572,6 +581,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                                      SCALER_POS2_ALPHA_MODE_PIPELINE :
                                      SCALER_POS2_ALPHA_MODE_FIXED,
                                      SCALER_POS2_ALPHA_MODE) |
+                       (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
                        (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) |
                        VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
                        VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
@@ -653,10 +663,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                        vc4_state->crtc_w == state->crtc->mode.hdisplay &&
                        vc4_state->crtc_h == state->crtc->mode.vdisplay;
        /* Background fill might be necessary when the plane has per-pixel
-        * alpha content and blends from the background or does not cover
-        * the entire screen.
+        * alpha content or a non-opaque plane alpha and could blend from the
+        * background or does not cover the entire screen.
         */
-       vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen;
+       vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
+                                  state->alpha != DRM_BLEND_ALPHA_OPAQUE;
 
        return 0;
 }
@@ -741,6 +752,57 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
        vc4_state->dlist[vc4_state->ptr0_offset] = addr;
 }
 
+static void vc4_plane_atomic_async_update(struct drm_plane *plane,
+                                         struct drm_plane_state *state)
+{
+       struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+
+       if (plane->state->fb != state->fb) {
+               vc4_plane_async_set_fb(plane, state->fb);
+               drm_atomic_set_fb_for_plane(plane->state, state->fb);
+       }
+
+       /* Set the cursor's position on the screen.  This is the
+        * expected change from the drm_mode_cursor_universal()
+        * helper.
+        */
+       plane->state->crtc_x = state->crtc_x;
+       plane->state->crtc_y = state->crtc_y;
+
+       /* Allow changing the start position within the cursor BO, if
+        * that matters.
+        */
+       plane->state->src_x = state->src_x;
+       plane->state->src_y = state->src_y;
+
+       /* Update the display list based on the new crtc_x/y. */
+       vc4_plane_atomic_check(plane, plane->state);
+
+       /* Note that we can't just call vc4_plane_write_dlist()
+        * because that would smash the context data that the HVS is
+        * currently using.
+        */
+       writel(vc4_state->dlist[vc4_state->pos0_offset],
+              &vc4_state->hw_dlist[vc4_state->pos0_offset]);
+       writel(vc4_state->dlist[vc4_state->pos2_offset],
+              &vc4_state->hw_dlist[vc4_state->pos2_offset]);
+       writel(vc4_state->dlist[vc4_state->ptr0_offset],
+              &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+}
+
+static int vc4_plane_atomic_async_check(struct drm_plane *plane,
+                                       struct drm_plane_state *state)
+{
+       /* No configuring new scaling in the fast path. */
+       if (plane->state->crtc_w != state->crtc_w ||
+           plane->state->crtc_h != state->crtc_h ||
+           plane->state->src_w != state->src_w ||
+           plane->state->src_h != state->src_h)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int vc4_prepare_fb(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
@@ -780,6 +842,8 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
        .atomic_update = vc4_plane_atomic_update,
        .prepare_fb = vc4_prepare_fb,
        .cleanup_fb = vc4_cleanup_fb,
+       .atomic_async_check = vc4_plane_atomic_async_check,
+       .atomic_async_update = vc4_plane_atomic_async_update,
 };
 
 static void vc4_plane_destroy(struct drm_plane *plane)
@@ -788,82 +852,6 @@ static void vc4_plane_destroy(struct drm_plane *plane)
        drm_plane_cleanup(plane);
 }
 
-/* Implements immediate (non-vblank-synced) updates of the cursor
- * position, or falls back to the atomic helper otherwise.
- */
-static int
-vc4_update_plane(struct drm_plane *plane,
-                struct drm_crtc *crtc,
-                struct drm_framebuffer *fb,
-                int crtc_x, int crtc_y,
-                unsigned int crtc_w, unsigned int crtc_h,
-                uint32_t src_x, uint32_t src_y,
-                uint32_t src_w, uint32_t src_h,
-                struct drm_modeset_acquire_ctx *ctx)
-{
-       struct drm_plane_state *plane_state;
-       struct vc4_plane_state *vc4_state;
-
-       if (plane != crtc->cursor)
-               goto out;
-
-       plane_state = plane->state;
-       vc4_state = to_vc4_plane_state(plane_state);
-
-       if (!plane_state)
-               goto out;
-
-       /* No configuring new scaling in the fast path. */
-       if (crtc_w != plane_state->crtc_w ||
-           crtc_h != plane_state->crtc_h ||
-           src_w != plane_state->src_w ||
-           src_h != plane_state->src_h) {
-               goto out;
-       }
-
-       if (fb != plane_state->fb) {
-               drm_atomic_set_fb_for_plane(plane->state, fb);
-               vc4_plane_async_set_fb(plane, fb);
-       }
-
-       /* Set the cursor's position on the screen.  This is the
-        * expected change from the drm_mode_cursor_universal()
-        * helper.
-        */
-       plane_state->crtc_x = crtc_x;
-       plane_state->crtc_y = crtc_y;
-
-       /* Allow changing the start position within the cursor BO, if
-        * that matters.
-        */
-       plane_state->src_x = src_x;
-       plane_state->src_y = src_y;
-
-       /* Update the display list based on the new crtc_x/y. */
-       vc4_plane_atomic_check(plane, plane_state);
-
-       /* Note that we can't just call vc4_plane_write_dlist()
-        * because that would smash the context data that the HVS is
-        * currently using.
-        */
-       writel(vc4_state->dlist[vc4_state->pos0_offset],
-              &vc4_state->hw_dlist[vc4_state->pos0_offset]);
-       writel(vc4_state->dlist[vc4_state->pos2_offset],
-              &vc4_state->hw_dlist[vc4_state->pos2_offset]);
-       writel(vc4_state->dlist[vc4_state->ptr0_offset],
-              &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
-
-       return 0;
-
-out:
-       return drm_atomic_helper_update_plane(plane, crtc, fb,
-                                             crtc_x, crtc_y,
-                                             crtc_w, crtc_h,
-                                             src_x, src_y,
-                                             src_w, src_h,
-                                             ctx);
-}
-
 static bool vc4_format_mod_supported(struct drm_plane *plane,
                                     uint32_t format,
                                     uint64_t modifier)
@@ -891,7 +879,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
 }
 
 static const struct drm_plane_funcs vc4_plane_funcs = {
-       .update_plane = vc4_update_plane,
+       .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = vc4_plane_destroy,
        .set_property = NULL,
@@ -939,5 +927,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
 
        drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
 
+       drm_plane_create_alpha_property(plane);
+
        return plane;
 }
index a141496..d1fb6fe 100644 (file)
 #define SCALER_DISPCTRL0                        0x00000040
 # define SCALER_DISPCTRLX_ENABLE               BIT(31)
 # define SCALER_DISPCTRLX_RESET                        BIT(30)
+/* Generates a single frame when VSTART is seen and stops at the last
+ * pixel read from the FIFO.
+ */
+# define SCALER_DISPCTRLX_ONESHOT              BIT(29)
+/* Processes a single context in the dlist and then task switch,
+ * instead of an entire line.
+ */
+# define SCALER_DISPCTRLX_ONECTX               BIT(28)
+/* Set to have DISPSLAVE return 2 16bpp pixels and no status data. */
+# define SCALER_DISPCTRLX_FIFO32               BIT(27)
+/* Turns on output to the DISPSLAVE register instead of the normal
+ * FIFO.
+ */
+# define SCALER_DISPCTRLX_FIFOREG              BIT(26)
+
 # define SCALER_DISPCTRLX_WIDTH_MASK           VC4_MASK(23, 12)
 # define SCALER_DISPCTRLX_WIDTH_SHIFT          12
 # define SCALER_DISPCTRLX_HEIGHT_MASK          VC4_MASK(11, 0)
  */
 # define SCALER_GAMADDR_SRAMENB                        BIT(30)
 
+#define SCALER_OLEDOFFS                         0x00000080
+/* Clamps R to [16,235] and G/B to [16,240]. */
+# define SCALER_OLEDOFFS_YUVCLAMP               BIT(31)
+
+/* Chooses which display FIFO the matrix applies to. */
+# define SCALER_OLEDOFFS_DISPFIFO_MASK          VC4_MASK(25, 24)
+# define SCALER_OLEDOFFS_DISPFIFO_SHIFT         24
+# define SCALER_OLEDOFFS_DISPFIFO_DISABLED      0
+# define SCALER_OLEDOFFS_DISPFIFO_0             1
+# define SCALER_OLEDOFFS_DISPFIFO_1             2
+# define SCALER_OLEDOFFS_DISPFIFO_2             3
+
+/* Offsets are 8-bit 2s-complement. */
+# define SCALER_OLEDOFFS_RED_MASK               VC4_MASK(23, 16)
+# define SCALER_OLEDOFFS_RED_SHIFT              16
+# define SCALER_OLEDOFFS_GREEN_MASK             VC4_MASK(15, 8)
+# define SCALER_OLEDOFFS_GREEN_SHIFT            8
+# define SCALER_OLEDOFFS_BLUE_MASK              VC4_MASK(7, 0)
+# define SCALER_OLEDOFFS_BLUE_SHIFT             0
+
+/* The coefficients are S0.9 fractions. */
+#define SCALER_OLEDCOEF0                        0x00000084
+# define SCALER_OLEDCOEF0_B_TO_R_MASK           VC4_MASK(29, 20)
+# define SCALER_OLEDCOEF0_B_TO_R_SHIFT          20
+# define SCALER_OLEDCOEF0_B_TO_G_MASK           VC4_MASK(19, 10)
+# define SCALER_OLEDCOEF0_B_TO_G_SHIFT          10
+# define SCALER_OLEDCOEF0_B_TO_B_MASK           VC4_MASK(9, 0)
+# define SCALER_OLEDCOEF0_B_TO_B_SHIFT          0
+
+#define SCALER_OLEDCOEF1                        0x00000088
+# define SCALER_OLEDCOEF1_G_TO_R_MASK           VC4_MASK(29, 20)
+# define SCALER_OLEDCOEF1_G_TO_R_SHIFT          20
+# define SCALER_OLEDCOEF1_G_TO_G_MASK           VC4_MASK(19, 10)
+# define SCALER_OLEDCOEF1_G_TO_G_SHIFT          10
+# define SCALER_OLEDCOEF1_G_TO_B_MASK           VC4_MASK(9, 0)
+# define SCALER_OLEDCOEF1_G_TO_B_SHIFT          0
+
+#define SCALER_OLEDCOEF2                        0x0000008c
+# define SCALER_OLEDCOEF2_R_TO_R_MASK           VC4_MASK(29, 20)
+# define SCALER_OLEDCOEF2_R_TO_R_SHIFT          20
+# define SCALER_OLEDCOEF2_R_TO_G_MASK           VC4_MASK(19, 10)
+# define SCALER_OLEDCOEF2_R_TO_G_SHIFT          10
+# define SCALER_OLEDCOEF2_R_TO_B_MASK           VC4_MASK(9, 0)
+# define SCALER_OLEDCOEF2_R_TO_B_SHIFT          0
+
+/* Slave addresses for DMAing from HVS composition output to other
+ * devices.  The top bits are valid only in !FIFO32 mode.
+ */
+#define SCALER_DISPSLAVE0                       0x000000c0
+#define SCALER_DISPSLAVE1                       0x000000c9
+#define SCALER_DISPSLAVE2                       0x000000d0
+# define SCALER_DISPSLAVE_ISSUE_VSTART          BIT(31)
+# define SCALER_DISPSLAVE_ISSUE_HSTART          BIT(30)
+/* Set when the current line has been read and an HSTART is required. */
+# define SCALER_DISPSLAVE_EOL                   BIT(26)
+/* Set when the display FIFO is empty. */
+# define SCALER_DISPSLAVE_EMPTY                 BIT(25)
+/* Set when there is RGB data ready to read. */
+# define SCALER_DISPSLAVE_VALID                 BIT(24)
+# define SCALER_DISPSLAVE_RGB_MASK              VC4_MASK(23, 0)
+# define SCALER_DISPSLAVE_RGB_SHIFT             0
+
 #define SCALER_GAMDATA                          0x000000e0
 #define SCALER_DLIST_START                      0x00002000
 #define SCALER_DLIST_SIZE                       0x00004000
@@ -767,6 +844,10 @@ enum hvs_pixel_format {
        HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9,
        HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10,
        HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11,
+       HVS_PIXEL_FORMAT_H264 = 12,
+       HVS_PIXEL_FORMAT_PALETTE = 13,
+       HVS_PIXEL_FORMAT_YUV444_RGB = 14,
+       HVS_PIXEL_FORMAT_AYUV444_RGB = 15,
 };
 
 /* Note: the LSB is the rightmost character shown.  Only valid for
@@ -800,12 +881,27 @@ enum hvs_pixel_format {
 #define SCALER_CTL0_TILING_128B                        2
 #define SCALER_CTL0_TILING_256B_OR_T           3
 
+#define SCALER_CTL0_ALPHA_MASK                  BIT(19)
 #define SCALER_CTL0_HFLIP                       BIT(16)
 #define SCALER_CTL0_VFLIP                       BIT(15)
 
+#define SCALER_CTL0_KEY_MODE_MASK              VC4_MASK(18, 17)
+#define SCALER_CTL0_KEY_MODE_SHIFT             17
+#define SCALER_CTL0_KEY_DISABLED               0
+#define SCALER_CTL0_KEY_LUMA_OR_COMMON_RGB     1
+#define SCALER_CTL0_KEY_MATCH                  2 /* turn transparent */
+#define SCALER_CTL0_KEY_REPLACE                        3 /* replace with value from key mask word 2 */
+
 #define SCALER_CTL0_ORDER_MASK                 VC4_MASK(14, 13)
 #define SCALER_CTL0_ORDER_SHIFT                        13
 
+#define SCALER_CTL0_RGBA_EXPAND_MASK           VC4_MASK(12, 11)
+#define SCALER_CTL0_RGBA_EXPAND_SHIFT          11
+#define SCALER_CTL0_RGBA_EXPAND_ZERO           0
+#define SCALER_CTL0_RGBA_EXPAND_LSB            1
+#define SCALER_CTL0_RGBA_EXPAND_MSB            2
+#define SCALER_CTL0_RGBA_EXPAND_ROUND          3
+
 #define SCALER_CTL0_SCL1_MASK                  VC4_MASK(10, 8)
 #define SCALER_CTL0_SCL1_SHIFT                 8
 
@@ -849,6 +945,7 @@ enum hvs_pixel_format {
 #define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO   2
 #define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3
 #define SCALER_POS2_ALPHA_PREMULT              BIT(29)
+#define SCALER_POS2_ALPHA_MIX                  BIT(28)
 
 #define SCALER_POS2_HEIGHT_MASK                        VC4_MASK(27, 16)
 #define SCALER_POS2_HEIGHT_SHIFT               16
index 8cc8c34..a5edd86 100644 (file)
@@ -208,7 +208,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
        return count;
 }
 
-static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
                                      struct drm_display_mode *mode)
 {
        struct virtio_gpu_output *output =
index f11601b..6728c62 100644 (file)
@@ -384,9 +384,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
        hotspot_x = du->hotspot_x;
        hotspot_y = du->hotspot_y;
 
-       if (plane->fb) {
-               hotspot_x += plane->fb->hot_x;
-               hotspot_y += plane->fb->hot_y;
+       if (plane->state->fb) {
+               hotspot_x += plane->state->fb->hot_x;
+               hotspot_y += plane->state->fb->hot_y;
        }
 
        du->cursor_surface = vps->surf;
index 3824595..4a5907e 100644 (file)
@@ -281,39 +281,6 @@ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
  * Legacy Display Plane Functions
  */
 
-/**
- * vmw_ldu_primary_plane_cleanup_fb - Noop
- *
- * @plane:  display plane
- * @old_state: Contains the FB to clean up
- *
- * Unpins the display surface
- *
- * Returns 0 on success
- */
-static void
-vmw_ldu_primary_plane_cleanup_fb(struct drm_plane *plane,
-                                struct drm_plane_state *old_state)
-{
-}
-
-
-/**
- * vmw_ldu_primary_plane_prepare_fb - Noop
- *
- * @plane:  display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-static int
-vmw_ldu_primary_plane_prepare_fb(struct drm_plane *plane,
-                                struct drm_plane_state *new_state)
-{
-       return 0;
-}
-
-
 static void
 vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_plane_state *old_state)
@@ -373,8 +340,6 @@ static const struct
 drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
        .atomic_check = vmw_du_primary_plane_atomic_check,
        .atomic_update = vmw_ldu_primary_plane_atomic_update,
-       .prepare_fb = vmw_ldu_primary_plane_prepare_fb,
-       .cleanup_fb = vmw_ldu_primary_plane_cleanup_fb,
 };
 
 static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
new file mode 100644 (file)
index 0000000..4cca160
--- /dev/null
@@ -0,0 +1,17 @@
+config DRM_XEN
+       bool "DRM Support for Xen guest OS"
+       depends on XEN
+       help
+         Choose this option if you want to enable DRM support
+         for Xen.
+
+config DRM_XEN_FRONTEND
+       tristate "Para-virtualized frontend driver for Xen guest OS"
+       depends on DRM_XEN
+       depends on DRM
+       select DRM_KMS_HELPER
+       select VIDEOMODE_HELPERS
+       select XEN_XENBUS_FRONTEND
+       help
+         Choose this option if you want to enable a para-virtualized
+         frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
new file mode 100644 (file)
index 0000000..712afff
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+
+drm_xen_front-objs := xen_drm_front.o \
+                     xen_drm_front_kms.o \
+                     xen_drm_front_conn.o \
+                     xen_drm_front_evtchnl.o \
+                     xen_drm_front_shbuf.o \
+                     xen_drm_front_cfg.o \
+                     xen_drm_front_gem.o
+
+obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
new file mode 100644 (file)
index 0000000..1b0ea9a
--- /dev/null
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/of_device.h>
+
+#include <xen/platform_pci.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+
+#include <xen/interface/io/displif.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_cfg.h"
+#include "xen_drm_front_evtchnl.h"
+#include "xen_drm_front_gem.h"
+#include "xen_drm_front_kms.h"
+#include "xen_drm_front_shbuf.h"
+
+struct xen_drm_front_dbuf {
+       struct list_head list;
+       u64 dbuf_cookie;
+       u64 fb_cookie;
+       struct xen_drm_front_shbuf *shbuf;
+};
+
+static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
+                           struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
+{
+       struct xen_drm_front_dbuf *dbuf;
+
+       dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
+       if (!dbuf)
+               return -ENOMEM;
+
+       dbuf->dbuf_cookie = dbuf_cookie;
+       dbuf->shbuf = shbuf;
+       list_add(&dbuf->list, &front_info->dbuf_list);
+       return 0;
+}
+
+static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
+                                          u64 dbuf_cookie)
+{
+       struct xen_drm_front_dbuf *buf, *q;
+
+       list_for_each_entry_safe(buf, q, dbuf_list, list)
+               if (buf->dbuf_cookie == dbuf_cookie)
+                       return buf;
+
+       return NULL;
+}
+
+static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
+{
+       struct xen_drm_front_dbuf *buf, *q;
+
+       list_for_each_entry_safe(buf, q, dbuf_list, list)
+               if (buf->fb_cookie == fb_cookie)
+                       xen_drm_front_shbuf_flush(buf->shbuf);
+}
+
+static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
+{
+       struct xen_drm_front_dbuf *buf, *q;
+
+       list_for_each_entry_safe(buf, q, dbuf_list, list)
+               if (buf->dbuf_cookie == dbuf_cookie) {
+                       list_del(&buf->list);
+                       xen_drm_front_shbuf_unmap(buf->shbuf);
+                       xen_drm_front_shbuf_free(buf->shbuf);
+                       kfree(buf);
+                       break;
+               }
+}
+
+static void dbuf_free_all(struct list_head *dbuf_list)
+{
+       struct xen_drm_front_dbuf *buf, *q;
+
+       list_for_each_entry_safe(buf, q, dbuf_list, list) {
+               list_del(&buf->list);
+               xen_drm_front_shbuf_unmap(buf->shbuf);
+               xen_drm_front_shbuf_free(buf->shbuf);
+               kfree(buf);
+       }
+}
+
+static struct xendispl_req *
+be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
+{
+       struct xendispl_req *req;
+
+       req = RING_GET_REQUEST(&evtchnl->u.req.ring,
+                              evtchnl->u.req.ring.req_prod_pvt);
+       req->operation = operation;
+       req->id = evtchnl->evt_next_id++;
+       evtchnl->evt_id = req->id;
+       return req;
+}
+
+static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
+                          struct xendispl_req *req)
+{
+       reinit_completion(&evtchnl->u.req.completion);
+       if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+               return -EIO;
+
+       xen_drm_front_evtchnl_flush(evtchnl);
+       return 0;
+}
+
+static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
+{
+       if (wait_for_completion_timeout(&evtchnl->u.req.completion,
+                       msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
+               return -ETIMEDOUT;
+
+       return evtchnl->u.req.resp_status;
+}
+
+int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
+                          u32 x, u32 y, u32 width, u32 height,
+                          u32 bpp, u64 fb_cookie)
+{
+       struct xen_drm_front_evtchnl *evtchnl;
+       struct xen_drm_front_info *front_info;
+       struct xendispl_req *req;
+       unsigned long flags;
+       int ret;
+
+       front_info = pipeline->drm_info->front_info;
+       evtchnl = &front_info->evt_pairs[pipeline->index].req;
+       if (unlikely(!evtchnl))
+               return -EIO;
+
+       mutex_lock(&evtchnl->u.req.req_io_lock);
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
+       req->op.set_config.x = x;
+       req->op.set_config.y = y;
+       req->op.set_config.width = width;
+       req->op.set_config.height = height;
+       req->op.set_config.bpp = bpp;
+       req->op.set_config.fb_cookie = fb_cookie;
+
+       ret = be_stream_do_io(evtchnl, req);
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+       if (ret == 0)
+               ret = be_stream_wait_io(evtchnl);
+
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       return ret;
+}
+
+int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
+                             u64 dbuf_cookie, u32 width, u32 height,
+                             u32 bpp, u64 size, struct page **pages)
+{
+       struct xen_drm_front_evtchnl *evtchnl;
+       struct xen_drm_front_shbuf *shbuf;
+       struct xendispl_req *req;
+       struct xen_drm_front_shbuf_cfg buf_cfg;
+       unsigned long flags;
+       int ret;
+
+       evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+       if (unlikely(!evtchnl))
+               return -EIO;
+
+       memset(&buf_cfg, 0, sizeof(buf_cfg));
+       buf_cfg.xb_dev = front_info->xb_dev;
+       buf_cfg.pages = pages;
+       buf_cfg.size = size;
+       buf_cfg.be_alloc = front_info->cfg.be_alloc;
+
+       shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
+       if (!shbuf)
+               return -ENOMEM;
+
+       ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
+       if (ret < 0) {
+               xen_drm_front_shbuf_free(shbuf);
+               return ret;
+       }
+
+       mutex_lock(&evtchnl->u.req.req_io_lock);
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
+       req->op.dbuf_create.gref_directory =
+                       xen_drm_front_shbuf_get_dir_start(shbuf);
+       req->op.dbuf_create.buffer_sz = size;
+       req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
+       req->op.dbuf_create.width = width;
+       req->op.dbuf_create.height = height;
+       req->op.dbuf_create.bpp = bpp;
+       if (buf_cfg.be_alloc)
+               req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
+
+       ret = be_stream_do_io(evtchnl, req);
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+       if (ret < 0)
+               goto fail;
+
+       ret = be_stream_wait_io(evtchnl);
+       if (ret < 0)
+               goto fail;
+
+       ret = xen_drm_front_shbuf_map(shbuf);
+       if (ret < 0)
+               goto fail;
+
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       return 0;
+
+fail:
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+       return ret;
+}
+
+static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
+                                     u64 dbuf_cookie)
+{
+       struct xen_drm_front_evtchnl *evtchnl;
+       struct xendispl_req *req;
+       unsigned long flags;
+       bool be_alloc;
+       int ret;
+
+       evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+       if (unlikely(!evtchnl))
+               return -EIO;
+
+       be_alloc = front_info->cfg.be_alloc;
+
+       /*
+        * For the backend allocated buffer release references now, so backend
+        * can free the buffer.
+        */
+       if (be_alloc)
+               dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+
+       mutex_lock(&evtchnl->u.req.req_io_lock);
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
+       req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
+
+       ret = be_stream_do_io(evtchnl, req);
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+       if (ret == 0)
+               ret = be_stream_wait_io(evtchnl);
+
+       /*
+        * Do this regardless of communication status with the backend:
+        * if we cannot remove remote resources remove what we can locally.
+        */
+       if (!be_alloc)
+               dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       return ret;
+}
+
+int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
+                           u64 dbuf_cookie, u64 fb_cookie, u32 width,
+                           u32 height, u32 pixel_format)
+{
+       struct xen_drm_front_evtchnl *evtchnl;
+       struct xen_drm_front_dbuf *buf;
+       struct xendispl_req *req;
+       unsigned long flags;
+       int ret;
+
+       evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+       if (unlikely(!evtchnl))
+               return -EIO;
+
+       buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
+       if (!buf)
+               return -EINVAL;
+
+       buf->fb_cookie = fb_cookie;
+
+       mutex_lock(&evtchnl->u.req.req_io_lock);
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
+       req->op.fb_attach.dbuf_cookie = dbuf_cookie;
+       req->op.fb_attach.fb_cookie = fb_cookie;
+       req->op.fb_attach.width = width;
+       req->op.fb_attach.height = height;
+       req->op.fb_attach.pixel_format = pixel_format;
+
+       ret = be_stream_do_io(evtchnl, req);
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+       if (ret == 0)
+               ret = be_stream_wait_io(evtchnl);
+
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       return ret;
+}
+
+int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
+                           u64 fb_cookie)
+{
+       struct xen_drm_front_evtchnl *evtchnl;
+       struct xendispl_req *req;
+       unsigned long flags;
+       int ret;
+
+       evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+       if (unlikely(!evtchnl))
+               return -EIO;
+
+       mutex_lock(&evtchnl->u.req.req_io_lock);
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
+       req->op.fb_detach.fb_cookie = fb_cookie;
+
+       ret = be_stream_do_io(evtchnl, req);
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+       if (ret == 0)
+               ret = be_stream_wait_io(evtchnl);
+
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       return ret;
+}
+
+int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
+                           int conn_idx, u64 fb_cookie)
+{
+       struct xen_drm_front_evtchnl *evtchnl;
+       struct xendispl_req *req;
+       unsigned long flags;
+       int ret;
+
+       if (unlikely(conn_idx >= front_info->num_evt_pairs))
+               return -EINVAL;
+
+       dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
+       evtchnl = &front_info->evt_pairs[conn_idx].req;
+
+       mutex_lock(&evtchnl->u.req.req_io_lock);
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
+       req->op.pg_flip.fb_cookie = fb_cookie;
+
+       ret = be_stream_do_io(evtchnl, req);
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+       if (ret == 0)
+               ret = be_stream_wait_io(evtchnl);
+
+       mutex_unlock(&evtchnl->u.req.req_io_lock);
+       return ret;
+}
+
+void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
+                                int conn_idx, u64 fb_cookie)
+{
+       struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
+
+       if (unlikely(conn_idx >= front_info->cfg.num_connectors))
+               return;
+
+       xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
+                                       fb_cookie);
+}
+
+static int xen_drm_drv_dumb_create(struct drm_file *filp,
+                                  struct drm_device *dev,
+                                  struct drm_mode_create_dumb *args)
+{
+       struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+       struct drm_gem_object *obj;
+       int ret;
+
+       /*
+        * Dumb creation is a two stage process: first we create a fully
+        * constructed GEM object which is communicated to the backend, and
+        * only after that we can create GEM's handle. This is done so,
+        * because of the possible races: once you create a handle it becomes
+        * immediately visible to user-space, so the latter can try accessing
+        * object without pages etc.
+        * For details also see drm_gem_handle_create
+        */
+       args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       args->size = args->pitch * args->height;
+
+       obj = xen_drm_front_gem_create(dev, args->size);
+       if (IS_ERR_OR_NULL(obj)) {
+               ret = PTR_ERR(obj);
+               goto fail;
+       }
+
+       ret = xen_drm_front_dbuf_create(drm_info->front_info,
+                                       xen_drm_front_dbuf_to_cookie(obj),
+                                       args->width, args->height, args->bpp,
+                                       args->size,
+                                       xen_drm_front_gem_get_pages(obj));
+       if (ret)
+               goto fail_backend;
+
+       /* This is the tail of GEM object creation */
+       ret = drm_gem_handle_create(filp, obj, &args->handle);
+       if (ret)
+               goto fail_handle;
+
+       /* Drop reference from allocate - handle holds it now */
+       drm_gem_object_put_unlocked(obj);
+       return 0;
+
+fail_handle:
+       xen_drm_front_dbuf_destroy(drm_info->front_info,
+                                  xen_drm_front_dbuf_to_cookie(obj));
+fail_backend:
+       /* drop reference from allocate */
+       drm_gem_object_put_unlocked(obj);
+fail:
+       DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
+       return ret;
+}
+
+static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
+{
+       struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
+       int idx;
+
+       if (drm_dev_enter(obj->dev, &idx)) {
+               xen_drm_front_dbuf_destroy(drm_info->front_info,
+                                          xen_drm_front_dbuf_to_cookie(obj));
+               drm_dev_exit(idx);
+       } else {
+               dbuf_free(&drm_info->front_info->dbuf_list,
+                         xen_drm_front_dbuf_to_cookie(obj));
+       }
+
+       xen_drm_front_gem_free_object_unlocked(obj);
+}
+
+static void xen_drm_drv_release(struct drm_device *dev)
+{
+       struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+       struct xen_drm_front_info *front_info = drm_info->front_info;
+
+       xen_drm_front_kms_fini(drm_info);
+
+       drm_atomic_helper_shutdown(dev);
+       drm_mode_config_cleanup(dev);
+
+       drm_dev_fini(dev);
+       kfree(dev);
+
+       if (front_info->cfg.be_alloc)
+               xenbus_switch_state(front_info->xb_dev,
+                                   XenbusStateInitialising);
+
+       kfree(drm_info);
+}
+
+static const struct file_operations xen_drm_dev_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .release        = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = drm_compat_ioctl,
+#endif
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .llseek         = no_llseek,
+       .mmap           = xen_drm_front_gem_mmap,
+};
+
+static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+       .open           = drm_gem_vm_open,
+       .close          = drm_gem_vm_close,
+};
+
+static struct drm_driver xen_drm_driver = {
+       .driver_features           = DRIVER_GEM | DRIVER_MODESET |
+                                    DRIVER_PRIME | DRIVER_ATOMIC,
+       .release                   = xen_drm_drv_release,
+       .gem_vm_ops                = &xen_drm_drv_vm_ops,
+       .gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
+       .prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
+       .gem_prime_import          = drm_gem_prime_import,
+       .gem_prime_export          = drm_gem_prime_export,
+       .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
+       .gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
+       .gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
+       .gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
+       .gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
+       .dumb_create               = xen_drm_drv_dumb_create,
+       .fops                      = &xen_drm_dev_fops,
+       .name                      = "xendrm-du",
+       .desc                      = "Xen PV DRM Display Unit",
+       .date                      = "20180221",
+       .major                     = 1,
+       .minor                     = 0,
+
+};
+
+static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
+{
+       struct device *dev = &front_info->xb_dev->dev;
+       struct xen_drm_front_drm_info *drm_info;
+       struct drm_device *drm_dev;
+       int ret;
+
+       DRM_INFO("Creating %s\n", xen_drm_driver.desc);
+
+       drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
+       if (!drm_info) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       drm_info->front_info = front_info;
+       front_info->drm_info = drm_info;
+
+       drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
+       if (!drm_dev) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       drm_info->drm_dev = drm_dev;
+
+       drm_dev->dev_private = drm_info;
+
+       ret = xen_drm_front_kms_init(drm_info);
+       if (ret) {
+               DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
+               goto fail_modeset;
+       }
+
+       ret = drm_dev_register(drm_dev, 0);
+       if (ret)
+               goto fail_register;
+
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+                xen_drm_driver.name, xen_drm_driver.major,
+                xen_drm_driver.minor, xen_drm_driver.patchlevel,
+                xen_drm_driver.date, drm_dev->primary->index);
+
+       return 0;
+
+fail_register:
+       drm_dev_unregister(drm_dev);
+fail_modeset:
+       drm_kms_helper_poll_fini(drm_dev);
+       drm_mode_config_cleanup(drm_dev);
+fail:
+       kfree(drm_info);
+       return ret;
+}
+
+static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
+{
+       struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
+       struct drm_device *dev;
+
+       if (!drm_info)
+               return;
+
+       dev = drm_info->drm_dev;
+       if (!dev)
+               return;
+
+       /* Nothing to do if device is already unplugged */
+       if (drm_dev_is_unplugged(dev))
+               return;
+
+       drm_kms_helper_poll_fini(dev);
+       drm_dev_unplug(dev);
+
+       front_info->drm_info = NULL;
+
+       xen_drm_front_evtchnl_free_all(front_info);
+       dbuf_free_all(&front_info->dbuf_list);
+
+       /*
+        * If we are not using backend allocated buffers, then tell the
+        * backend we are ready to (re)initialize. Otherwise, wait for
+        * drm_driver.release.
+        */
+       if (!front_info->cfg.be_alloc)
+               xenbus_switch_state(front_info->xb_dev,
+                                   XenbusStateInitialising);
+}
+
+static int displback_initwait(struct xen_drm_front_info *front_info)
+{
+       struct xen_drm_front_cfg *cfg = &front_info->cfg;
+       int ret;
+
+       cfg->front_info = front_info;
+       ret = xen_drm_front_cfg_card(front_info, cfg);
+       if (ret < 0)
+               return ret;
+
+       DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+       /* Create event channels for all connectors and publish */
+       ret = xen_drm_front_evtchnl_create_all(front_info);
+       if (ret < 0)
+               return ret;
+
+       return xen_drm_front_evtchnl_publish_all(front_info);
+}
+
+static int displback_connect(struct xen_drm_front_info *front_info)
+{
+       xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
+       return xen_drm_drv_init(front_info);
+}
+
+static void displback_disconnect(struct xen_drm_front_info *front_info)
+{
+       if (!front_info->drm_info)
+               return;
+
+       /* Tell the backend to wait until we release the DRM driver. */
+       xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
+
+       xen_drm_drv_fini(front_info);
+}
+
+static void displback_changed(struct xenbus_device *xb_dev,
+                             enum xenbus_state backend_state)
+{
+       struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
+       int ret;
+
+       DRM_DEBUG("Backend state is %s, front is %s\n",
+                 xenbus_strstate(backend_state),
+                 xenbus_strstate(xb_dev->state));
+
+       switch (backend_state) {
+       case XenbusStateReconfiguring:
+               /* fall through */
+       case XenbusStateReconfigured:
+               /* fall through */
+       case XenbusStateInitialised:
+               break;
+
+       case XenbusStateInitialising:
+               if (xb_dev->state == XenbusStateReconfiguring)
+                       break;
+
+               /* recovering after backend unexpected closure */
+               displback_disconnect(front_info);
+               break;
+
+       case XenbusStateInitWait:
+               if (xb_dev->state == XenbusStateReconfiguring)
+                       break;
+
+               /* recovering after backend unexpected closure */
+               displback_disconnect(front_info);
+               if (xb_dev->state != XenbusStateInitialising)
+                       break;
+
+               ret = displback_initwait(front_info);
+               if (ret < 0)
+                       xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
+               else
+                       xenbus_switch_state(xb_dev, XenbusStateInitialised);
+               break;
+
+       case XenbusStateConnected:
+               if (xb_dev->state != XenbusStateInitialised)
+                       break;
+
+               ret = displback_connect(front_info);
+               if (ret < 0) {
+                       displback_disconnect(front_info);
+                       xenbus_dev_fatal(xb_dev, ret, "connecting backend");
+               } else {
+                       xenbus_switch_state(xb_dev, XenbusStateConnected);
+               }
+               break;
+
+       case XenbusStateClosing:
+               /*
+                * in this state backend starts freeing resources,
+                * so let it go into closed state, so we can also
+                * remove ours
+                */
+               break;
+
+       case XenbusStateUnknown:
+               /* fall through */
+       case XenbusStateClosed:
+               if (xb_dev->state == XenbusStateClosed)
+                       break;
+
+               displback_disconnect(front_info);
+               break;
+       }
+}
+
+static int xen_drv_probe(struct xenbus_device *xb_dev,
+                        const struct xenbus_device_id *id)
+{
+       struct xen_drm_front_info *front_info;
+       struct device *dev = &xb_dev->dev;
+       int ret;
+
+       /*
+        * The device is not spawn from a device tree, so arch_setup_dma_ops
+        * is not called, thus leaving the device with dummy DMA ops.
+        * This makes the device return error on PRIME buffer import, which
+        * is not correct: to fix this call of_dma_configure() with a NULL
+        * node to set default DMA ops.
+        */
+       dev->bus->force_dma = true;
+       dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = of_dma_configure(dev, NULL);
+       if (ret < 0) {
+               DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+               return ret;
+       }
+
+       front_info = devm_kzalloc(&xb_dev->dev,
+                                 sizeof(*front_info), GFP_KERNEL);
+       if (!front_info)
+               return -ENOMEM;
+
+       front_info->xb_dev = xb_dev;
+       spin_lock_init(&front_info->io_lock);
+       INIT_LIST_HEAD(&front_info->dbuf_list);
+       dev_set_drvdata(&xb_dev->dev, front_info);
+
+       return xenbus_switch_state(xb_dev, XenbusStateInitialising);
+}
+
+static int xen_drv_remove(struct xenbus_device *dev)
+{
+       struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
+       int to = 100;
+
+       xenbus_switch_state(dev, XenbusStateClosing);
+
+       /*
+        * On driver removal it is disconnected from XenBus,
+        * so no backend state change events come via .otherend_changed
+        * callback. This prevents us from exiting gracefully, e.g.
+        * signaling the backend to free event channels, waiting for its
+        * state to change to XenbusStateClosed and cleaning at our end.
+        * Normally when front driver removed backend will finally go into
+        * XenbusStateInitWait state.
+        *
+        * Workaround: read backend's state manually and wait with time-out.
+        */
+       while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
+                                    XenbusStateUnknown) != XenbusStateInitWait) &&
+                                    to--)
+               msleep(10);
+
+       if (!to) {
+               unsigned int state;
+
+               state = xenbus_read_unsigned(front_info->xb_dev->otherend,
+                                            "state", XenbusStateUnknown);
+               DRM_ERROR("Backend state is %s while removing driver\n",
+                         xenbus_strstate(state));
+       }
+
+       xen_drm_drv_fini(front_info);
+       xenbus_frontend_closed(dev);
+       return 0;
+}
+
+static const struct xenbus_device_id xen_driver_ids[] = {
+       { XENDISPL_DRIVER_NAME },
+       { "" }
+};
+
+static struct xenbus_driver xen_driver = {
+       .ids = xen_driver_ids,
+       .probe = xen_drv_probe,
+       .remove = xen_drv_remove,
+       .otherend_changed = displback_changed,
+};
+
+static int __init xen_drv_init(void)
+{
+       /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
+       if (XEN_PAGE_SIZE != PAGE_SIZE) {
+               DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
+                         XEN_PAGE_SIZE, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       if (!xen_domain())
+               return -ENODEV;
+
+       if (!xen_has_pv_devices())
+               return -ENODEV;
+
+       DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
+       return xenbus_register_frontend(&xen_driver);
+}
+
+static void __exit xen_drv_fini(void)
+{
+       DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
+       xenbus_unregister_driver(&xen_driver);
+}
+
+module_init(xen_drv_init);
+module_exit(xen_drv_fini);
+
+MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
new file mode 100644 (file)
index 0000000..2c2479b
--- /dev/null
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_H_
+#define __XEN_DRM_FRONT_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/scatterlist.h>
+
+#include "xen_drm_front_cfg.h"
+
+/**
+ * DOC: Driver modes of operation in terms of display buffers used
+ *
+ * Depending on the requirements for the para-virtualized environment, namely
+ * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
+ * host and guest environments, display buffers can be allocated by either
+ * frontend driver or backend.
+ */
+
+/**
+ * DOC: Buffers allocated by the frontend driver
+ *
+ * In this mode of operation driver allocates buffers from system memory.
+ *
+ * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
+ * may require IOMMU support on the platform, so accompanying DRM/vGPU
+ * hardware can still reach display buffer memory while importing PRIME
+ * buffers from the frontend driver.
+ */
+
+/**
+ * DOC: Buffers allocated by the backend
+ *
+ * This mode of operation is run-time configured via guest domain configuration
+ * through XenStore entries.
+ *
+ * For systems which do not provide IOMMU support, but having specific
+ * requirements for display buffers it is possible to allocate such buffers
+ * at backend side and share those with the frontend.
+ * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
+ * physically contiguous memory, this allows implementing zero-copying
+ * use-cases.
+ *
+ * Note, while using this scenario the following should be considered:
+ *
+ * #. If guest domain dies then pages/grants received from the backend
+ *    cannot be claimed back
+ *
+ * #. Misbehaving guest may send too many requests to the
+ *    backend exhausting its grant references and memory
+ *    (consider this from security POV)
+ */
+
+/**
+ * DOC: Driver limitations
+ *
+ * #. Only primary plane without additional properties is supported.
+ *
+ * #. Only one video mode per connector supported which is configured
+ *    via XenStore.
+ *
+ * #. All CRTCs operate at fixed frequency of 60Hz.
+ */
+
+/* timeout in ms to wait for backend to respond */
+#define XEN_DRM_FRONT_WAIT_BACK_MS     3000
+
+#ifndef GRANT_INVALID_REF
+/*
+ * Note on usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ */
+#define GRANT_INVALID_REF      0
+#endif
+
+struct xen_drm_front_info {
+       struct xenbus_device *xb_dev;
+       struct xen_drm_front_drm_info *drm_info;
+
+       /* to protect data between backend IO code and interrupt handler */
+       spinlock_t io_lock;
+
+       int num_evt_pairs;
+       struct xen_drm_front_evtchnl_pair *evt_pairs;
+       struct xen_drm_front_cfg cfg;
+
+       /* display buffers */
+       struct list_head dbuf_list;
+};
+
+struct xen_drm_front_drm_pipeline {
+       struct xen_drm_front_drm_info *drm_info;
+
+       int index;
+
+       struct drm_simple_display_pipe pipe;
+
+       struct drm_connector conn;
+       /* These are only for connector mode checking */
+       int width, height;
+
+       struct drm_pending_vblank_event *pending_event;
+
+       struct delayed_work pflip_to_worker;
+
+       bool conn_connected;
+};
+
+struct xen_drm_front_drm_info {
+       struct xen_drm_front_info *front_info;
+       struct drm_device *drm_dev;
+
+       struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
+};
+
+static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
+{
+       return (u64)fb;
+}
+
+static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
+{
+       return (u64)gem_obj;
+}
+
+int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
+                          u32 x, u32 y, u32 width, u32 height,
+                          u32 bpp, u64 fb_cookie);
+
+int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
+                             u64 dbuf_cookie, u32 width, u32 height,
+                             u32 bpp, u64 size, struct page **pages);
+
+int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
+                           u64 dbuf_cookie, u64 fb_cookie, u32 width,
+                           u32 height, u32 pixel_format);
+
+int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
+                           u64 fb_cookie);
+
+int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
+                           int conn_idx, u64 fb_cookie);
+
+void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
+                                int conn_idx, u64 fb_cookie);
+
+#endif /* __XEN_DRM_FRONT_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
new file mode 100644 (file)
index 0000000..5baf2b9
--- /dev/null
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+
+#include <xen/interface/io/displif.h>
+#include <xen/xenbus.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_cfg.h"
+
+static int cfg_connector(struct xen_drm_front_info *front_info,
+                        struct xen_drm_front_cfg_connector *connector,
+                        const char *path, int index)
+{
+       char *connector_path;
+
+       connector_path = devm_kasprintf(&front_info->xb_dev->dev,
+                                       GFP_KERNEL, "%s/%d", path, index);
+       if (!connector_path)
+               return -ENOMEM;
+
+       if (xenbus_scanf(XBT_NIL, connector_path, XENDISPL_FIELD_RESOLUTION,
+                        "%d" XENDISPL_RESOLUTION_SEPARATOR "%d",
+                        &connector->width, &connector->height) < 0) {
+               /* either no entry configured or wrong resolution set */
+               connector->width = 0;
+               connector->height = 0;
+               return -EINVAL;
+       }
+
+       connector->xenstore_path = connector_path;
+
+       DRM_INFO("Connector %s: resolution %dx%d\n",
+                connector_path, connector->width, connector->height);
+       return 0;
+}
+
+int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
+                          struct xen_drm_front_cfg *cfg)
+{
+       struct xenbus_device *xb_dev = front_info->xb_dev;
+       int ret, i;
+
+       if (xenbus_read_unsigned(front_info->xb_dev->nodename,
+                                XENDISPL_FIELD_BE_ALLOC, 0)) {
+               DRM_INFO("Backend can provide display buffers\n");
+               cfg->be_alloc = true;
+       }
+
+       cfg->num_connectors = 0;
+       for (i = 0; i < ARRAY_SIZE(cfg->connectors); i++) {
+               ret = cfg_connector(front_info, &cfg->connectors[i],
+                                   xb_dev->nodename, i);
+               if (ret < 0)
+                       break;
+               cfg->num_connectors++;
+       }
+
+       if (!cfg->num_connectors) {
+               DRM_ERROR("No connector(s) configured at %s\n",
+                         xb_dev->nodename);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.h b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
new file mode 100644 (file)
index 0000000..aa8490b
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_CFG_H_
+#define __XEN_DRM_FRONT_CFG_H_
+
+#include <linux/types.h>
+
+#define XEN_DRM_FRONT_MAX_CRTCS        4
+
+struct xen_drm_front_cfg_connector {
+       int width;
+       int height;
+       char *xenstore_path;
+};
+
+struct xen_drm_front_cfg {
+       struct xen_drm_front_info *front_info;
+       /* number of connectors in this configuration */
+       int num_connectors;
+       /* connector configurations */
+       struct xen_drm_front_cfg_connector connectors[XEN_DRM_FRONT_MAX_CRTCS];
+       /* set if dumb buffers are allocated externally on backend side */
+       bool be_alloc;
+};
+
+int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
+                          struct xen_drm_front_cfg *cfg);
+
+#endif /* __XEN_DRM_FRONT_CFG_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
new file mode 100644 (file)
index 0000000..c91ae53
--- /dev/null
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <video/videomode.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_conn.h"
+#include "xen_drm_front_kms.h"
+
+static struct xen_drm_front_drm_pipeline *
+to_xen_drm_pipeline(struct drm_connector *connector)
+{
+       return container_of(connector, struct xen_drm_front_drm_pipeline, conn);
+}
+
+static const u32 plane_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XRGB4444,
+       DRM_FORMAT_ARGB4444,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_ARGB1555,
+};
+
+const u32 *xen_drm_front_conn_get_formats(int *format_count)
+{
+       *format_count = ARRAY_SIZE(plane_formats);
+       return plane_formats;
+}
+
+static int connector_detect(struct drm_connector *connector,
+                           struct drm_modeset_acquire_ctx *ctx,
+                           bool force)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       to_xen_drm_pipeline(connector);
+
+       if (drm_dev_is_unplugged(connector->dev))
+               pipeline->conn_connected = false;
+
+       return pipeline->conn_connected ? connector_status_connected :
+                       connector_status_disconnected;
+}
+
+#define XEN_DRM_CRTC_VREFRESH_HZ       60
+
+static int connector_get_modes(struct drm_connector *connector)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       to_xen_drm_pipeline(connector);
+       struct drm_display_mode *mode;
+       struct videomode videomode;
+       int width, height;
+
+       mode = drm_mode_create(connector->dev);
+       if (!mode)
+               return 0;
+
+       memset(&videomode, 0, sizeof(videomode));
+       videomode.hactive = pipeline->width;
+       videomode.vactive = pipeline->height;
+       width = videomode.hactive + videomode.hfront_porch +
+                       videomode.hback_porch + videomode.hsync_len;
+       height = videomode.vactive + videomode.vfront_porch +
+                       videomode.vback_porch + videomode.vsync_len;
+       videomode.pixelclock = width * height * XEN_DRM_CRTC_VREFRESH_HZ;
+       mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+
+       drm_display_mode_from_videomode(&videomode, mode);
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+       .get_modes = connector_get_modes,
+       .detect_ctx = connector_detect,
+};
+
+static const struct drm_connector_funcs connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
+                           struct drm_connector *connector)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       to_xen_drm_pipeline(connector);
+
+       drm_connector_helper_add(connector, &connector_helper_funcs);
+
+       pipeline->conn_connected = true;
+
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                       DRM_CONNECTOR_POLL_DISCONNECT;
+
+       return drm_connector_init(drm_info->drm_dev, connector,
+                                 &connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
new file mode 100644 (file)
index 0000000..39de7cf
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_CONN_H_
+#define __XEN_DRM_FRONT_CONN_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+
+#include <linux/wait.h>
+
+struct xen_drm_front_drm_info;
+
+int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
+                           struct drm_connector *connector);
+
+const u32 *xen_drm_front_conn_get_formats(int *format_count);
+
+#endif /* __XEN_DRM_FRONT_CONN_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
new file mode 100644 (file)
index 0000000..945226a
--- /dev/null
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/errno.h>
+#include <linux/irq.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_evtchnl.h"
+
+static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
+{
+       struct xen_drm_front_evtchnl *evtchnl = dev_id;
+       struct xen_drm_front_info *front_info = evtchnl->front_info;
+       struct xendispl_resp *resp;
+       RING_IDX i, rp;
+       unsigned long flags;
+
+       if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+               return IRQ_HANDLED;
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+
+again:
+       rp = evtchnl->u.req.ring.sring->rsp_prod;
+       /* ensure we see queued responses up to rp */
+       virt_rmb();
+
+       for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
+               resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
+               if (unlikely(resp->id != evtchnl->evt_id))
+                       continue;
+
+               switch (resp->operation) {
+               case XENDISPL_OP_PG_FLIP:
+               case XENDISPL_OP_FB_ATTACH:
+               case XENDISPL_OP_FB_DETACH:
+               case XENDISPL_OP_DBUF_CREATE:
+               case XENDISPL_OP_DBUF_DESTROY:
+               case XENDISPL_OP_SET_CONFIG:
+                       evtchnl->u.req.resp_status = resp->status;
+                       complete(&evtchnl->u.req.completion);
+                       break;
+
+               default:
+                       DRM_ERROR("Operation %d is not supported\n",
+                                 resp->operation);
+                       break;
+               }
+       }
+
+       evtchnl->u.req.ring.rsp_cons = i;
+
+       if (i != evtchnl->u.req.ring.req_prod_pvt) {
+               int more_to_do;
+
+               RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
+                                              more_to_do);
+               if (more_to_do)
+                       goto again;
+       } else {
+               evtchnl->u.req.ring.sring->rsp_event = i + 1;
+       }
+
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
+{
+       struct xen_drm_front_evtchnl *evtchnl = dev_id;
+       struct xen_drm_front_info *front_info = evtchnl->front_info;
+       struct xendispl_event_page *page = evtchnl->u.evt.page;
+       u32 cons, prod;
+       unsigned long flags;
+
+       if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+               return IRQ_HANDLED;
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+
+       prod = page->in_prod;
+       /* ensure we see ring contents up to prod */
+       virt_rmb();
+       if (prod == page->in_cons)
+               goto out;
+
+       for (cons = page->in_cons; cons != prod; cons++) {
+               struct xendispl_evt *event;
+
+               event = &XENDISPL_IN_RING_REF(page, cons);
+               if (unlikely(event->id != evtchnl->evt_id++))
+                       continue;
+
+               switch (event->type) {
+               case XENDISPL_EVT_PG_FLIP:
+                       xen_drm_front_on_frame_done(front_info, evtchnl->index,
+                                                   event->op.pg_flip.fb_cookie);
+                       break;
+               }
+       }
+       page->in_cons = cons;
+       /* ensure ring contents */
+       virt_wmb();
+
+out:
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+       return IRQ_HANDLED;
+}
+
+static void evtchnl_free(struct xen_drm_front_info *front_info,
+                        struct xen_drm_front_evtchnl *evtchnl)
+{
+       unsigned long page = 0;
+
+       if (evtchnl->type == EVTCHNL_TYPE_REQ)
+               page = (unsigned long)evtchnl->u.req.ring.sring;
+       else if (evtchnl->type == EVTCHNL_TYPE_EVT)
+               page = (unsigned long)evtchnl->u.evt.page;
+       if (!page)
+               return;
+
+       evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
+
+       if (evtchnl->type == EVTCHNL_TYPE_REQ) {
+               /* release all who still waits for response if any */
+               evtchnl->u.req.resp_status = -EIO;
+               complete_all(&evtchnl->u.req.completion);
+       }
+
+       if (evtchnl->irq)
+               unbind_from_irqhandler(evtchnl->irq, evtchnl);
+
+       if (evtchnl->port)
+               xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
+
+       /* end access and free the page */
+       if (evtchnl->gref != GRANT_INVALID_REF)
+               gnttab_end_foreign_access(evtchnl->gref, 0, page);
+
+       memset(evtchnl, 0, sizeof(*evtchnl));
+}
+
+static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
+                        struct xen_drm_front_evtchnl *evtchnl,
+                        enum xen_drm_front_evtchnl_type type)
+{
+       struct xenbus_device *xb_dev = front_info->xb_dev;
+       unsigned long page;
+       grant_ref_t gref;
+       irq_handler_t handler;
+       int ret;
+
+       memset(evtchnl, 0, sizeof(*evtchnl));
+       evtchnl->type = type;
+       evtchnl->index = index;
+       evtchnl->front_info = front_info;
+       evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
+       evtchnl->gref = GRANT_INVALID_REF;
+
+       page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
+       if (!page) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       if (type == EVTCHNL_TYPE_REQ) {
+               struct xen_displif_sring *sring;
+
+               init_completion(&evtchnl->u.req.completion);
+               mutex_init(&evtchnl->u.req.req_io_lock);
+               sring = (struct xen_displif_sring *)page;
+               SHARED_RING_INIT(sring);
+               FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
+
+               ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
+               if (ret < 0) {
+                       evtchnl->u.req.ring.sring = NULL;
+                       free_page(page);
+                       goto fail;
+               }
+
+               handler = evtchnl_interrupt_ctrl;
+       } else {
+               ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
+                                                 virt_to_gfn((void *)page), 0);
+               if (ret < 0) {
+                       free_page(page);
+                       goto fail;
+               }
+
+               evtchnl->u.evt.page = (struct xendispl_event_page *)page;
+               gref = ret;
+               handler = evtchnl_interrupt_evt;
+       }
+       evtchnl->gref = gref;
+
+       ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
+       if (ret < 0)
+               goto fail;
+
+       ret = bind_evtchn_to_irqhandler(evtchnl->port,
+                                       handler, 0, xb_dev->devicetype,
+                                       evtchnl);
+       if (ret < 0)
+               goto fail;
+
+       evtchnl->irq = ret;
+       return 0;
+
+fail:
+       DRM_ERROR("Failed to allocate ring: %d\n", ret);
+       return ret;
+}
+
+int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
+{
+       struct xen_drm_front_cfg *cfg;
+       int ret, conn;
+
+       cfg = &front_info->cfg;
+
+       front_info->evt_pairs =
+                       kcalloc(cfg->num_connectors,
+                               sizeof(struct xen_drm_front_evtchnl_pair),
+                               GFP_KERNEL);
+       if (!front_info->evt_pairs) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       for (conn = 0; conn < cfg->num_connectors; conn++) {
+               ret = evtchnl_alloc(front_info, conn,
+                                   &front_info->evt_pairs[conn].req,
+                                   EVTCHNL_TYPE_REQ);
+               if (ret < 0) {
+                       DRM_ERROR("Error allocating control channel\n");
+                       goto fail;
+               }
+
+               ret = evtchnl_alloc(front_info, conn,
+                                   &front_info->evt_pairs[conn].evt,
+                                   EVTCHNL_TYPE_EVT);
+               if (ret < 0) {
+                       DRM_ERROR("Error allocating in-event channel\n");
+                       goto fail;
+               }
+       }
+       front_info->num_evt_pairs = cfg->num_connectors;
+       return 0;
+
+fail:
+       xen_drm_front_evtchnl_free_all(front_info);
+       return ret;
+}
+
+static int evtchnl_publish(struct xenbus_transaction xbt,
+                          struct xen_drm_front_evtchnl *evtchnl,
+                          const char *path, const char *node_ring,
+                          const char *node_chnl)
+{
+       struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
+       int ret;
+
+       /* write control channel ring reference */
+       ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
+       if (ret < 0) {
+               xenbus_dev_error(xb_dev, ret, "writing ring-ref");
+               return ret;
+       }
+
+       /* write event channel ring reference */
+       ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
+       if (ret < 0) {
+               xenbus_dev_error(xb_dev, ret, "writing event channel");
+               return ret;
+       }
+
+       return 0;
+}
+
+int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
+{
+       struct xenbus_transaction xbt;
+       struct xen_drm_front_cfg *plat_data;
+       int ret, conn;
+
+       plat_data = &front_info->cfg;
+
+again:
+       ret = xenbus_transaction_start(&xbt);
+       if (ret < 0) {
+               xenbus_dev_fatal(front_info->xb_dev, ret,
+                                "starting transaction");
+               return ret;
+       }
+
+       for (conn = 0; conn < plat_data->num_connectors; conn++) {
+               ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
+                                     plat_data->connectors[conn].xenstore_path,
+                                     XENDISPL_FIELD_REQ_RING_REF,
+                                     XENDISPL_FIELD_REQ_CHANNEL);
+               if (ret < 0)
+                       goto fail;
+
+               ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
+                                     plat_data->connectors[conn].xenstore_path,
+                                     XENDISPL_FIELD_EVT_RING_REF,
+                                     XENDISPL_FIELD_EVT_CHANNEL);
+               if (ret < 0)
+                       goto fail;
+       }
+
+       ret = xenbus_transaction_end(xbt, 0);
+       if (ret < 0) {
+               if (ret == -EAGAIN)
+                       goto again;
+
+               xenbus_dev_fatal(front_info->xb_dev, ret,
+                                "completing transaction");
+               goto fail_to_end;
+       }
+
+       return 0;
+
+fail:
+       xenbus_transaction_end(xbt, 1);
+
+fail_to_end:
+       xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
+       return ret;
+}
+
+void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
+{
+       int notify;
+
+       evtchnl->u.req.ring.req_prod_pvt++;
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
+       if (notify)
+               notify_remote_via_irq(evtchnl->irq);
+}
+
+void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
+                                    enum xen_drm_front_evtchnl_state state)
+{
+       unsigned long flags;
+       int i;
+
+       if (!front_info->evt_pairs)
+               return;
+
+       spin_lock_irqsave(&front_info->io_lock, flags);
+       for (i = 0; i < front_info->num_evt_pairs; i++) {
+               front_info->evt_pairs[i].req.state = state;
+               front_info->evt_pairs[i].evt.state = state;
+       }
+       spin_unlock_irqrestore(&front_info->io_lock, flags);
+}
+
+void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
+{
+       int i;
+
+       if (!front_info->evt_pairs)
+               return;
+
+       for (i = 0; i < front_info->num_evt_pairs; i++) {
+               evtchnl_free(front_info, &front_info->evt_pairs[i].req);
+               evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
+       }
+
+       kfree(front_info->evt_pairs);
+       front_info->evt_pairs = NULL;
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
new file mode 100644 (file)
index 0000000..b0af699
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_EVTCHNL_H_
+#define __XEN_DRM_FRONT_EVTCHNL_H_
+
+#include <linux/completion.h>
+#include <linux/types.h>
+
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/displif.h>
+
+/*
+ * All operations which are not connector oriented use this ctrl event channel,
+ * e.g. fb_attach/destroy which belong to a DRM device, not to a CRTC.
+ */
+#define GENERIC_OP_EVT_CHNL    0
+
+enum xen_drm_front_evtchnl_state {
+       EVTCHNL_STATE_DISCONNECTED,
+       EVTCHNL_STATE_CONNECTED,
+};
+
+enum xen_drm_front_evtchnl_type {
+       EVTCHNL_TYPE_REQ,
+       EVTCHNL_TYPE_EVT,
+};
+
+struct xen_drm_front_drm_info;
+
+struct xen_drm_front_evtchnl {
+       struct xen_drm_front_info *front_info;
+       int gref;
+       int port;
+       int irq;
+       int index;
+       enum xen_drm_front_evtchnl_state state;
+       enum xen_drm_front_evtchnl_type type;
+       /* either response id or incoming event id */
+       u16 evt_id;
+       /* next request id or next expected event id */
+       u16 evt_next_id;
+       union {
+               struct {
+                       struct xen_displif_front_ring ring;
+                       struct completion completion;
+                       /* latest response status */
+                       int resp_status;
+                       /* serializer for backend IO: request/response */
+                       struct mutex req_io_lock;
+               } req;
+               struct {
+                       struct xendispl_event_page *page;
+               } evt;
+       } u;
+};
+
+struct xen_drm_front_evtchnl_pair {
+       struct xen_drm_front_evtchnl req;
+       struct xen_drm_front_evtchnl evt;
+};
+
+int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info);
+
+int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info);
+
+void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl);
+
+void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
+                                    enum xen_drm_front_evtchnl_state state);
+
+void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info);
+
+#endif /* __XEN_DRM_FRONT_EVTCHNL_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
new file mode 100644 (file)
index 0000000..c85bfe7
--- /dev/null
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include "xen_drm_front_gem.h"
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/shmem_fs.h>
+
+#include <xen/balloon.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_shbuf.h"
+
+struct xen_gem_object {
+       struct drm_gem_object base;
+
+       size_t num_pages;
+       struct page **pages;
+
+       /* set for buffers allocated by the backend */
+       bool be_alloc;
+
+       /* this is for imported PRIME buffer */
+       struct sg_table *sgt_imported;
+};
+
+static inline struct xen_gem_object *
+to_xen_gem_obj(struct drm_gem_object *gem_obj)
+{
+       return container_of(gem_obj, struct xen_gem_object, base);
+}
+
+static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
+                                size_t buf_size)
+{
+       xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
+       xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
+                                       sizeof(struct page *), GFP_KERNEL);
+       return !xen_obj->pages ? -ENOMEM : 0;
+}
+
+static void gem_free_pages_array(struct xen_gem_object *xen_obj)
+{
+       kvfree(xen_obj->pages);
+       xen_obj->pages = NULL;
+}
+
+static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
+                                            size_t size)
+{
+       struct xen_gem_object *xen_obj;
+       int ret;
+
+       xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
+       if (!xen_obj)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_gem_object_init(dev, &xen_obj->base, size);
+       if (ret < 0) {
+               kfree(xen_obj);
+               return ERR_PTR(ret);
+       }
+
+       return xen_obj;
+}
+
+static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+{
+       struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+       struct xen_gem_object *xen_obj;
+       int ret;
+
+       size = round_up(size, PAGE_SIZE);
+       xen_obj = gem_create_obj(dev, size);
+       if (IS_ERR_OR_NULL(xen_obj))
+               return xen_obj;
+
+       if (drm_info->front_info->cfg.be_alloc) {
+               /*
+                * backend will allocate space for this buffer, so
+                * only allocate array of pointers to pages
+                */
+               ret = gem_alloc_pages_array(xen_obj, size);
+               if (ret < 0)
+                       goto fail;
+
+               /*
+                * allocate ballooned pages which will be used to map
+                * grant references provided by the backend
+                */
+               ret = alloc_xenballooned_pages(xen_obj->num_pages,
+                                              xen_obj->pages);
+               if (ret < 0) {
+                       DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
+                                 xen_obj->num_pages, ret);
+                       gem_free_pages_array(xen_obj);
+                       goto fail;
+               }
+
+               xen_obj->be_alloc = true;
+               return xen_obj;
+       }
+       /*
+        * need to allocate backing pages now, so we can share those
+        * with the backend
+        */
+       xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+       xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
+       if (IS_ERR_OR_NULL(xen_obj->pages)) {
+               ret = PTR_ERR(xen_obj->pages);
+               xen_obj->pages = NULL;
+               goto fail;
+       }
+
+       return xen_obj;
+
+fail:
+       DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
+       return ERR_PTR(ret);
+}
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+                                               size_t size)
+{
+       struct xen_gem_object *xen_obj;
+
+       xen_obj = gem_create(dev, size);
+       if (IS_ERR_OR_NULL(xen_obj))
+               return ERR_CAST(xen_obj);
+
+       return &xen_obj->base;
+}
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
+{
+       struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+       if (xen_obj->base.import_attach) {
+               drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
+               gem_free_pages_array(xen_obj);
+       } else {
+               if (xen_obj->pages) {
+                       if (xen_obj->be_alloc) {
+                               free_xenballooned_pages(xen_obj->num_pages,
+                                                       xen_obj->pages);
+                               gem_free_pages_array(xen_obj);
+                       } else {
+                               drm_gem_put_pages(&xen_obj->base,
+                                                 xen_obj->pages, true, false);
+                       }
+               }
+       }
+       drm_gem_object_release(gem_obj);
+       kfree(xen_obj);
+}
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
+{
+       struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+       return xen_obj->pages;
+}
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
+{
+       struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+       if (!xen_obj->pages)
+               return NULL;
+
+       return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+}
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+                                 struct dma_buf_attachment *attach,
+                                 struct sg_table *sgt)
+{
+       struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+       struct xen_gem_object *xen_obj;
+       size_t size;
+       int ret;
+
+       size = attach->dmabuf->size;
+       xen_obj = gem_create_obj(dev, size);
+       if (IS_ERR_OR_NULL(xen_obj))
+               return ERR_CAST(xen_obj);
+
+       ret = gem_alloc_pages_array(xen_obj, size);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       xen_obj->sgt_imported = sgt;
+
+       ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
+                                              NULL, xen_obj->num_pages);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       ret = xen_drm_front_dbuf_create(drm_info->front_info,
+                                       xen_drm_front_dbuf_to_cookie(&xen_obj->base),
+                                       0, 0, 0, size, xen_obj->pages);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
+                 size, sgt->nents);
+
+       return &xen_obj->base;
+}
+
+static int gem_mmap_obj(struct xen_gem_object *xen_obj,
+                       struct vm_area_struct *vma)
+{
+       unsigned long addr = vma->vm_start;
+       int i;
+
+       /*
+        * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+        * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+        * the whole buffer.
+        */
+       vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_flags |= VM_MIXEDMAP;
+       vma->vm_pgoff = 0;
+       vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+       /*
+        * vm_operations_struct.fault handler will be called if CPU access
+        * to VM is here. For GPUs this isn't the case, because CPU
+        * doesn't touch the memory. Insert pages now, so both CPU and GPU are
+        * happy.
+        * FIXME: as we insert all the pages now then no .fault handler must
+        * be called, so don't provide one
+        */
+       for (i = 0; i < xen_obj->num_pages; i++) {
+               int ret;
+
+               ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
+               if (ret < 0) {
+                       DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
+                       return ret;
+               }
+
+               addr += PAGE_SIZE;
+       }
+       return 0;
+}
+
+int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct xen_gem_object *xen_obj;
+       struct drm_gem_object *gem_obj;
+       int ret;
+
+       ret = drm_gem_mmap(filp, vma);
+       if (ret < 0)
+               return ret;
+
+       gem_obj = vma->vm_private_data;
+       xen_obj = to_xen_gem_obj(gem_obj);
+       return gem_mmap_obj(xen_obj, vma);
+}
+
+void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
+{
+       struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+       if (!xen_obj->pages)
+               return NULL;
+
+       return vmap(xen_obj->pages, xen_obj->num_pages,
+                   VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+}
+
+void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+                                   void *vaddr)
+{
+       vunmap(vaddr);
+}
+
+int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
+                                struct vm_area_struct *vma)
+{
+       struct xen_gem_object *xen_obj;
+       int ret;
+
+       ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
+       if (ret < 0)
+               return ret;
+
+       xen_obj = to_xen_gem_obj(gem_obj);
+       return gem_mmap_obj(xen_obj, vma);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
new file mode 100644 (file)
index 0000000..d5ab734
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_GEM_H
+#define __XEN_DRM_FRONT_GEM_H
+
+#include <drm/drmP.h>
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+                                               size_t size);
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+                                 struct dma_buf_attachment *attach,
+                                 struct sg_table *sgt);
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj);
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);
+
+int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj);
+
+void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+                                   void *vaddr);
+
+int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
+                                struct vm_area_struct *vma);
+
+#endif /* __XEN_DRM_FRONT_GEM_H */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
new file mode 100644 (file)
index 0000000..a3479eb
--- /dev/null
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include "xen_drm_front_kms.h"
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_conn.h"
+
+/*
+ * Timeout in ms to wait for frame done event from the backend:
+ * must be a bit more than IO time-out
+ */
+#define FRAME_DONE_TO_MS       (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
+
+static struct xen_drm_front_drm_pipeline *
+to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
+{
+       return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
+}
+
+static void fb_destroy(struct drm_framebuffer *fb)
+{
+       struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
+       int idx;
+
+       if (drm_dev_enter(fb->dev, &idx)) {
+               xen_drm_front_fb_detach(drm_info->front_info,
+                                       xen_drm_front_fb_to_cookie(fb));
+               drm_dev_exit(idx);
+       }
+       drm_gem_fb_destroy(fb);
+}
+
+static struct drm_framebuffer_funcs fb_funcs = {
+       .destroy = fb_destroy,
+};
+
+static struct drm_framebuffer *
+fb_create(struct drm_device *dev, struct drm_file *filp,
+         const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+       static struct drm_framebuffer *fb;
+       struct drm_gem_object *gem_obj;
+       int ret;
+
+       fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+       if (IS_ERR_OR_NULL(fb))
+               return fb;
+
+       gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+       if (!gem_obj) {
+               DRM_ERROR("Failed to lookup GEM object\n");
+               ret = -ENOENT;
+               goto fail;
+       }
+
+       drm_gem_object_put_unlocked(gem_obj);
+
+       ret = xen_drm_front_fb_attach(drm_info->front_info,
+                                     xen_drm_front_dbuf_to_cookie(gem_obj),
+                                     xen_drm_front_fb_to_cookie(fb),
+                                     fb->width, fb->height,
+                                     fb->format->format);
+       if (ret < 0) {
+               DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
+               goto fail;
+       }
+
+       return fb;
+
+fail:
+       drm_gem_fb_destroy(fb);
+       return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+       .fb_create = fb_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
+{
+       struct drm_crtc *crtc = &pipeline->pipe.crtc;
+       struct drm_device *dev = crtc->dev;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       if (pipeline->pending_event)
+               drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
+       pipeline->pending_event = NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void display_enable(struct drm_simple_display_pipe *pipe,
+                          struct drm_crtc_state *crtc_state,
+                          struct drm_plane_state *plane_state)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       to_xen_drm_pipeline(pipe);
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_framebuffer *fb = plane_state->fb;
+       int ret, idx;
+
+       if (!drm_dev_enter(pipe->crtc.dev, &idx))
+               return;
+
+       ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
+                                    fb->width, fb->height,
+                                    fb->format->cpp[0] * 8,
+                                    xen_drm_front_fb_to_cookie(fb));
+
+       if (ret) {
+               DRM_ERROR("Failed to enable display: %d\n", ret);
+               pipeline->conn_connected = false;
+       }
+
+       drm_dev_exit(idx);
+}
+
+static void display_disable(struct drm_simple_display_pipe *pipe)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       to_xen_drm_pipeline(pipe);
+       int ret = 0, idx;
+
+       if (drm_dev_enter(pipe->crtc.dev, &idx)) {
+               ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
+                                            xen_drm_front_fb_to_cookie(NULL));
+               drm_dev_exit(idx);
+       }
+       if (ret)
+               DRM_ERROR("Failed to disable display: %d\n", ret);
+
+       /* Make sure we can restart with enabled connector next time */
+       pipeline->conn_connected = true;
+
+       /* release stalled event if any */
+       send_pending_event(pipeline);
+}
+
+void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
+                                    u64 fb_cookie)
+{
+       /*
+        * This runs in interrupt context, e.g. under
+        * drm_info->front_info->io_lock, so we cannot call _sync version
+        * to cancel the work
+        */
+       cancel_delayed_work(&pipeline->pflip_to_worker);
+
+       send_pending_event(pipeline);
+}
+
+static void pflip_to_worker(struct work_struct *work)
+{
+       struct delayed_work *delayed_work = to_delayed_work(work);
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       container_of(delayed_work,
+                                    struct xen_drm_front_drm_pipeline,
+                                    pflip_to_worker);
+
+       DRM_ERROR("Frame done timed-out, releasing");
+       send_pending_event(pipeline);
+}
+
+static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
+                                  struct drm_plane_state *old_plane_state)
+{
+       struct drm_plane_state *plane_state =
+                       drm_atomic_get_new_plane_state(old_plane_state->state,
+                                                      &pipe->plane);
+
+       /*
+        * If old_plane_state->fb is NULL and plane_state->fb is not,
+        * then this is an atomic commit which will enable display.
+        * If old_plane_state->fb is not NULL and plane_state->fb is,
+        * then this is an atomic commit which will disable display.
+        * Ignore these and do not send page flip as this framebuffer will be
+        * sent to the backend as a part of display_set_config call.
+        */
+       if (old_plane_state->fb && plane_state->fb) {
+               struct xen_drm_front_drm_pipeline *pipeline =
+                               to_xen_drm_pipeline(pipe);
+               struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
+               int ret;
+
+               schedule_delayed_work(&pipeline->pflip_to_worker,
+                                     msecs_to_jiffies(FRAME_DONE_TO_MS));
+
+               ret = xen_drm_front_page_flip(drm_info->front_info,
+                                             pipeline->index,
+                                             xen_drm_front_fb_to_cookie(plane_state->fb));
+               if (ret) {
+                       DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
+
+                       pipeline->conn_connected = false;
+                       /*
+                        * Report the flip not handled, so pending event is
+                        * sent, unblocking user-space.
+                        */
+                       return false;
+               }
+               /*
+                * Signal that page flip was handled, pending event will be sent
+                * on frame done event from the backend.
+                */
+               return true;
+       }
+
+       return false;
+}
+
+static void display_update(struct drm_simple_display_pipe *pipe,
+                          struct drm_plane_state *old_plane_state)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       to_xen_drm_pipeline(pipe);
+       struct drm_crtc *crtc = &pipe->crtc;
+       struct drm_pending_vblank_event *event;
+       int idx;
+
+       event = crtc->state->event;
+       if (event) {
+               struct drm_device *dev = crtc->dev;
+               unsigned long flags;
+
+               WARN_ON(pipeline->pending_event);
+
+               spin_lock_irqsave(&dev->event_lock, flags);
+               crtc->state->event = NULL;
+
+               pipeline->pending_event = event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+
+       if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
+               send_pending_event(pipeline);
+               return;
+       }
+
+       /*
+        * Send page flip request to the backend *after* we have event cached
+        * above, so on page flip done event from the backend we can
+        * deliver it and there is no race condition between this code and
+        * event from the backend.
+        * If this is not a page flip, e.g. no flip done event from the backend
+        * is expected, then send now.
+        */
+       if (!display_send_page_flip(pipe, old_plane_state))
+               send_pending_event(pipeline);
+
+       drm_dev_exit(idx);
+}
+
+static enum drm_mode_status
+display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+       struct xen_drm_front_drm_pipeline *pipeline =
+                       container_of(crtc, struct xen_drm_front_drm_pipeline,
+                                    pipe.crtc);
+
+       if (mode->hdisplay != pipeline->width)
+               return MODE_ERROR;
+
+       if (mode->vdisplay != pipeline->height)
+               return MODE_ERROR;
+
+       return MODE_OK;
+}
+
+static const struct drm_simple_display_pipe_funcs display_funcs = {
+       .mode_valid = display_mode_valid,
+       .enable = display_enable,
+       .disable = display_disable,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+       .update = display_update,
+};
+
+static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
+                            int index, struct xen_drm_front_cfg_connector *cfg,
+                            struct xen_drm_front_drm_pipeline *pipeline)
+{
+       struct drm_device *dev = drm_info->drm_dev;
+       const u32 *formats;
+       int format_count;
+       int ret;
+
+       pipeline->drm_info = drm_info;
+       pipeline->index = index;
+       pipeline->height = cfg->height;
+       pipeline->width = cfg->width;
+
+       INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
+
+       ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
+       if (ret)
+               return ret;
+
+       formats = xen_drm_front_conn_get_formats(&format_count);
+
+       return drm_simple_display_pipe_init(dev, &pipeline->pipe,
+                                           &display_funcs, formats,
+                                           format_count, NULL,
+                                           &pipeline->conn);
+}
+
+int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
+{
+       struct drm_device *dev = drm_info->drm_dev;
+       int i, ret;
+
+       drm_mode_config_init(dev);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       dev->mode_config.max_width = 4095;
+       dev->mode_config.max_height = 2047;
+       dev->mode_config.funcs = &mode_config_funcs;
+
+       for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
+               struct xen_drm_front_cfg_connector *cfg =
+                               &drm_info->front_info->cfg.connectors[i];
+               struct xen_drm_front_drm_pipeline *pipeline =
+                               &drm_info->pipeline[i];
+
+               ret = display_pipe_init(drm_info, i, cfg, pipeline);
+               if (ret) {
+                       drm_mode_config_cleanup(dev);
+                       return ret;
+               }
+       }
+
+       drm_mode_config_reset(dev);
+       drm_kms_helper_poll_init(dev);
+       return 0;
+}
+
+void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
+{
+       int i;
+
+       for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
+               struct xen_drm_front_drm_pipeline *pipeline =
+                               &drm_info->pipeline[i];
+
+               cancel_delayed_work_sync(&pipeline->pflip_to_worker);
+
+               send_pending_event(pipeline);
+       }
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.h b/drivers/gpu/drm/xen/xen_drm_front_kms.h
new file mode 100644 (file)
index 0000000..ab2fbad
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_KMS_H_
+#define __XEN_DRM_FRONT_KMS_H_
+
+#include <linux/types.h>
+
+struct xen_drm_front_drm_info;
+struct xen_drm_front_drm_pipeline;
+
+int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info);
+
+void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info);
+
+void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
+                                    u64 fb_cookie);
+
+#endif /* __XEN_DRM_FRONT_KMS_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
new file mode 100644 (file)
index 0000000..d570525
--- /dev/null
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+
+#if defined(CONFIG_X86)
+#include <drm/drm_cache.h>
+#endif
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/xen/hypervisor.h>
+#include <xen/balloon.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/displif.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_shbuf.h"
+
+struct xen_drm_front_shbuf_ops {
+       /*
+        * Calculate number of grefs required to handle this buffer,
+        * e.g. if grefs are required for page directory only or the buffer
+        * pages as well.
+        */
+       void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
+       /* Fill page directory according to para-virtual display protocol. */
+       void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
+       /* Claim grant references for the pages of the buffer. */
+       int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
+                                    grant_ref_t *priv_gref_head, int gref_idx);
+       /* Map grant references of the buffer. */
+       int (*map)(struct xen_drm_front_shbuf *buf);
+       /* Unmap grant references of the buffer. */
+       int (*unmap)(struct xen_drm_front_shbuf *buf);
+};
+
+grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
+{
+       if (!buf->grefs)
+               return GRANT_INVALID_REF;
+
+       return buf->grefs[0];
+}
+
+int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
+{
+       if (buf->ops->map)
+               return buf->ops->map(buf);
+
+       /* no need to map own grant references */
+       return 0;
+}
+
+int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
+{
+       if (buf->ops->unmap)
+               return buf->ops->unmap(buf);
+
+       /* no need to unmap own grant references */
+       return 0;
+}
+
+void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
+{
+#if defined(CONFIG_X86)
+       drm_clflush_pages(buf->pages, buf->num_pages);
+#endif
+}
+
+void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
+{
+       if (buf->grefs) {
+               int i;
+
+               for (i = 0; i < buf->num_grefs; i++)
+                       if (buf->grefs[i] != GRANT_INVALID_REF)
+                               gnttab_end_foreign_access(buf->grefs[i],
+                                                         0, 0UL);
+       }
+       kfree(buf->grefs);
+       kfree(buf->directory);
+       kfree(buf);
+}
+
+/*
+ * number of grefs a page can hold with respect to the
+ * struct xendispl_page_directory header
+ */
+#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
+               offsetof(struct xendispl_page_directory, gref)) / \
+               sizeof(grant_ref_t))
+
+static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
+{
+       /* number of pages the page directory consumes itself */
+       return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
+}
+
+static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
+{
+       /* only for pages the page directory consumes itself */
+       buf->num_grefs = get_num_pages_dir(buf);
+}
+
+static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
+{
+       /*
+        * number of pages the page directory consumes itself
+        * plus grefs for the buffer pages
+        */
+       buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
+}
+
+#define xen_page_to_vaddr(page) \
+               ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+
+static int backend_unmap(struct xen_drm_front_shbuf *buf)
+{
+       struct gnttab_unmap_grant_ref *unmap_ops;
+       int i, ret;
+
+       if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
+               return 0;
+
+       unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
+                           GFP_KERNEL);
+       if (!unmap_ops) {
+               DRM_ERROR("Failed to get memory while unmapping\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < buf->num_pages; i++) {
+               phys_addr_t addr;
+
+               addr = xen_page_to_vaddr(buf->pages[i]);
+               gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
+                                   buf->backend_map_handles[i]);
+       }
+
+       ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
+                               buf->num_pages);
+
+       for (i = 0; i < buf->num_pages; i++) {
+               if (unlikely(unmap_ops[i].status != GNTST_okay))
+                       DRM_ERROR("Failed to unmap page %d: %d\n",
+                                 i, unmap_ops[i].status);
+       }
+
+       if (ret)
+               DRM_ERROR("Failed to unmap grant references, ret %d", ret);
+
+       kfree(unmap_ops);
+       kfree(buf->backend_map_handles);
+       buf->backend_map_handles = NULL;
+       return ret;
+}
+
+static int backend_map(struct xen_drm_front_shbuf *buf)
+{
+       struct gnttab_map_grant_ref *map_ops = NULL;
+       unsigned char *ptr;
+       int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
+
+       map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
+       if (!map_ops)
+               return -ENOMEM;
+
+       buf->backend_map_handles = kcalloc(buf->num_pages,
+                                          sizeof(*buf->backend_map_handles),
+                                          GFP_KERNEL);
+       if (!buf->backend_map_handles) {
+               kfree(map_ops);
+               return -ENOMEM;
+       }
+
+       /*
+        * read page directory to get grefs from the backend: for external
+        * buffer we only allocate buf->grefs for the page directory,
+        * so buf->num_grefs has number of pages in the page directory itself
+        */
+       ptr = buf->directory;
+       grefs_left = buf->num_pages;
+       cur_page = 0;
+       for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
+               struct xendispl_page_directory *page_dir =
+                               (struct xendispl_page_directory *)ptr;
+               int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
+
+               if (to_copy > grefs_left)
+                       to_copy = grefs_left;
+
+               for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
+                       phys_addr_t addr;
+
+                       addr = xen_page_to_vaddr(buf->pages[cur_page]);
+                       gnttab_set_map_op(&map_ops[cur_page], addr,
+                                         GNTMAP_host_map,
+                                         page_dir->gref[cur_gref],
+                                         buf->xb_dev->otherend_id);
+                       cur_page++;
+               }
+
+               grefs_left -= to_copy;
+               ptr += PAGE_SIZE;
+       }
+       ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
+
+       /* save handles even if error, so we can unmap */
+       for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
+               buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
+               if (unlikely(map_ops[cur_page].status != GNTST_okay))
+                       DRM_ERROR("Failed to map page %d: %d\n",
+                                 cur_page, map_ops[cur_page].status);
+       }
+
+       if (ret) {
+               DRM_ERROR("Failed to map grant references, ret %d", ret);
+               backend_unmap(buf);
+       }
+
+       kfree(map_ops);
+       return ret;
+}
+
+static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
+{
+       struct xendispl_page_directory *page_dir;
+       unsigned char *ptr;
+       int i, num_pages_dir;
+
+       ptr = buf->directory;
+       num_pages_dir = get_num_pages_dir(buf);
+
+       /* fill only grefs for the page directory itself */
+       for (i = 0; i < num_pages_dir - 1; i++) {
+               page_dir = (struct xendispl_page_directory *)ptr;
+
+               page_dir->gref_dir_next_page = buf->grefs[i + 1];
+               ptr += PAGE_SIZE;
+       }
+       /* last page must say there is no more pages */
+       page_dir = (struct xendispl_page_directory *)ptr;
+       page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+}
+
+static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
+{
+       unsigned char *ptr;
+       int cur_gref, grefs_left, to_copy, i, num_pages_dir;
+
+       ptr = buf->directory;
+       num_pages_dir = get_num_pages_dir(buf);
+
+       /*
+        * while copying, skip grefs at start, they are for pages
+        * granted for the page directory itself
+        */
+       cur_gref = num_pages_dir;
+       grefs_left = buf->num_pages;
+       for (i = 0; i < num_pages_dir; i++) {
+               struct xendispl_page_directory *page_dir =
+                               (struct xendispl_page_directory *)ptr;
+
+               if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
+                       to_copy = grefs_left;
+                       page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+               } else {
+                       to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
+                       page_dir->gref_dir_next_page = buf->grefs[i + 1];
+               }
+               memcpy(&page_dir->gref, &buf->grefs[cur_gref],
+                      to_copy * sizeof(grant_ref_t));
+               ptr += PAGE_SIZE;
+               grefs_left -= to_copy;
+               cur_gref += to_copy;
+       }
+}
+
+static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
+                                      grant_ref_t *priv_gref_head,
+                                      int gref_idx)
+{
+       int i, cur_ref, otherend_id;
+
+       otherend_id = buf->xb_dev->otherend_id;
+       for (i = 0; i < buf->num_pages; i++) {
+               cur_ref = gnttab_claim_grant_reference(priv_gref_head);
+               if (cur_ref < 0)
+                       return cur_ref;
+
+               gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
+                                               xen_page_to_gfn(buf->pages[i]),
+                                               0);
+               buf->grefs[gref_idx++] = cur_ref;
+       }
+       return 0;
+}
+
+static int grant_references(struct xen_drm_front_shbuf *buf)
+{
+       grant_ref_t priv_gref_head;
+       int ret, i, j, cur_ref;
+       int otherend_id, num_pages_dir;
+
+       ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
+       if (ret < 0) {
+               DRM_ERROR("Cannot allocate grant references\n");
+               return ret;
+       }
+
+       otherend_id = buf->xb_dev->otherend_id;
+       j = 0;
+       num_pages_dir = get_num_pages_dir(buf);
+       for (i = 0; i < num_pages_dir; i++) {
+               unsigned long frame;
+
+               cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+               if (cur_ref < 0)
+                       return cur_ref;
+
+               frame = xen_page_to_gfn(virt_to_page(buf->directory +
+                                       PAGE_SIZE * i));
+               gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
+               buf->grefs[j++] = cur_ref;
+       }
+
+       if (buf->ops->grant_refs_for_buffer) {
+               ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
+               if (ret)
+                       return ret;
+       }
+
+       gnttab_free_grant_references(priv_gref_head);
+       return 0;
+}
+
+static int alloc_storage(struct xen_drm_front_shbuf *buf)
+{
+       buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
+       if (!buf->grefs)
+               return -ENOMEM;
+
+       buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
+       if (!buf->directory)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/*
+ * For be allocated buffers we don't need grant_refs_for_buffer as those
+ * grant references are allocated at backend side
+ */
+static const struct xen_drm_front_shbuf_ops backend_ops = {
+       .calc_num_grefs = backend_calc_num_grefs,
+       .fill_page_dir = backend_fill_page_dir,
+       .map = backend_map,
+       .unmap = backend_unmap
+};
+
+/* For locally granted references we do not need to map/unmap the references */
+static const struct xen_drm_front_shbuf_ops local_ops = {
+       .calc_num_grefs = guest_calc_num_grefs,
+       .fill_page_dir = guest_fill_page_dir,
+       .grant_refs_for_buffer = guest_grant_refs_for_buffer,
+};
+
+struct xen_drm_front_shbuf *
+xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
+{
+       struct xen_drm_front_shbuf *buf;
+       int ret;
+
+       buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+       if (!buf)
+               return NULL;
+
+       if (cfg->be_alloc)
+               buf->ops = &backend_ops;
+       else
+               buf->ops = &local_ops;
+
+       buf->xb_dev = cfg->xb_dev;
+       buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
+       buf->pages = cfg->pages;
+
+       buf->ops->calc_num_grefs(buf);
+
+       ret = alloc_storage(buf);
+       if (ret)
+               goto fail;
+
+       ret = grant_references(buf);
+       if (ret)
+               goto fail;
+
+       buf->ops->fill_page_dir(buf);
+
+       return buf;
+
+fail:
+       xen_drm_front_shbuf_free(buf);
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
new file mode 100644 (file)
index 0000000..7545c69
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_SHBUF_H_
+#define __XEN_DRM_FRONT_SHBUF_H_
+
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+
+#include <xen/grant_table.h>
+
+struct xen_drm_front_shbuf {
+       /*
+        * number of references granted for the backend use:
+        *  - for allocated/imported dma-buf's this holds number of grant
+        *    references for the page directory and pages of the buffer
+        *  - for the buffer provided by the backend this holds number of
+        *    grant references for the page directory as grant references for
+        *    the buffer will be provided by the backend
+        */
+       int num_grefs;
+       grant_ref_t *grefs;
+       unsigned char *directory;
+
+       int num_pages;
+       struct page **pages;
+
+       struct xenbus_device *xb_dev;
+
+       /* these are the ops used internally depending on be_alloc mode */
+       const struct xen_drm_front_shbuf_ops *ops;
+
+       /* Xen map handles for the buffer allocated by the backend */
+       grant_handle_t *backend_map_handles;
+};
+
+struct xen_drm_front_shbuf_cfg {
+       struct xenbus_device *xb_dev;
+       size_t size;
+       struct page **pages;
+       bool be_alloc;
+};
+
+struct xen_drm_front_shbuf *
+xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
+
+grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
+
+int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
+
+int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
+
+void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
+
+void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
+
+#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
index 94545ad..d1931f5 100644 (file)
@@ -268,7 +268,7 @@ static void zx_plane_atomic_disable(struct drm_plane *plane,
        struct zx_plane *zplane = to_zx_plane(plane);
        void __iomem *hbsc = zplane->hbsc;
 
-       zx_vou_layer_disable(plane);
+       zx_vou_layer_disable(plane, old_state);
 
        /* Disable HBSC block */
        zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
index 7491813..442311d 100644 (file)
@@ -627,9 +627,10 @@ void zx_vou_layer_enable(struct drm_plane *plane)
        zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
 }
 
-void zx_vou_layer_disable(struct drm_plane *plane)
+void zx_vou_layer_disable(struct drm_plane *plane,
+                         struct drm_plane_state *old_state)
 {
-       struct zx_crtc *zcrtc = to_zx_crtc(plane->crtc);
+       struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc);
        struct zx_vou_hw *vou = zcrtc->vou;
        struct zx_plane *zplane = to_zx_plane(plane);
        const struct vou_layer_bits *bits = zplane->bits;
index 97d72bf..5b7f84f 100644 (file)
@@ -62,6 +62,7 @@ void zx_vou_config_dividers(struct drm_crtc *crtc,
                            struct vou_div_config *configs, int num);
 
 void zx_vou_layer_enable(struct drm_plane *plane);
-void zx_vou_layer_disable(struct drm_plane *plane);
+void zx_vou_layer_disable(struct drm_plane *plane,
+                         struct drm_plane_state *old_state);
 
 #endif /* __ZX_VOU_H__ */
index e18642e..f6d26be 100644 (file)
@@ -242,7 +242,7 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 
-       .gem_free_object = vbox_gem_free_object,
+       .gem_free_object_unlocked = vbox_gem_free_object,
        .dumb_create = vbox_dumb_create,
        .dumb_map_offset = vbox_dumb_mmap_offset,
        .dumb_destroy = drm_gem_dumb_destroy,
index e9a1116..475b706 100644 (file)
@@ -33,7 +33,8 @@ struct analogix_dp_plat_data {
        struct drm_connector *connector;
        bool skip_connector;
 
-       int (*power_on)(struct analogix_dp_plat_data *);
+       int (*power_on_start)(struct analogix_dp_plat_data *);
+       int (*power_on_end)(struct analogix_dp_plat_data *);
        int (*power_off)(struct analogix_dp_plat_data *);
        int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
                      struct drm_connector *);
index c6666cd..f5099c1 100644 (file)
@@ -95,14 +95,6 @@ struct dma_buf_attachment;
 struct pci_dev;
 struct pci_controller;
 
-/***********************************************************************/
-/** \name DRM template customization defaults */
-/*@{*/
-
-/***********************************************************************/
-/** \name Internal types and structures */
-/*@{*/
-
 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
 
 /**
@@ -123,27 +115,13 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
 #define DRM_SWITCH_POWER_CHANGING 2
 #define DRM_SWITCH_POWER_DYNAMIC_OFF 3
 
-static __inline__ int drm_core_check_feature(struct drm_device *dev,
-                                            int feature)
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
 {
-       return ((dev->driver->driver_features & feature) ? 1 : 0);
+       return dev->driver->driver_features & feature;
 }
 
-/******************************************************************/
-/** \name Internal function definitions */
-/*@{*/
-
-                               /* Driver support (drm_drv.h) */
-
-/*
- * These are exported to drivers so that they can implement fencing using
- * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
- */
-
-/*@}*/
-
 /* returns true if currently okay to sleep */
-static __inline__ bool drm_can_sleep(void)
+static inline bool drm_can_sleep(void)
 {
        if (in_atomic() || in_dbg_master() || irqs_disabled())
                return false;
index 1760602..330c561 100644 (file)
@@ -36,6 +36,9 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation)
        return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270);
 }
 
+#define DRM_BLEND_ALPHA_OPAQUE         0xffff
+
+int drm_plane_create_alpha_property(struct drm_plane *plane);
 int drm_plane_create_rotation_property(struct drm_plane *plane,
                                       unsigned int rotation,
                                       unsigned int supported_rotations);
index 7c4fa32..3a0eac2 100644 (file)
@@ -46,7 +46,14 @@ struct drm_device {
        /* currently active master for this device. Protected by master_mutex */
        struct drm_master *master;
 
-       atomic_t unplugged;                     /**< Flag whether dev is dead */
+       /**
+        * @unplugged:
+        *
+        * Flag to tell if the device has been unplugged.
+        * See drm_dev_enter() and drm_dev_is_unplugged().
+        */
+       bool unplugged;
+
        struct inode *anon_inode;               /**< inode for private address-space */
        char *unique;                           /**< unique name of the device */
        /*@} */
index d23dcdd..7e545f5 100644 (file)
@@ -624,6 +624,8 @@ void drm_dev_get(struct drm_device *dev);
 void drm_dev_put(struct drm_device *dev);
 void drm_dev_unref(struct drm_device *dev);
 void drm_put_dev(struct drm_device *dev);
+bool drm_dev_enter(struct drm_device *dev, int *idx);
+void drm_dev_exit(int idx);
 void drm_dev_unplug(struct drm_device *dev);
 
 /**
@@ -635,11 +637,16 @@ void drm_dev_unplug(struct drm_device *dev);
  * unplugged, these two functions guarantee that any store before calling
  * drm_dev_unplug() is visible to callers of this function after it completes
  */
-static inline int drm_dev_is_unplugged(struct drm_device *dev)
+static inline bool drm_dev_is_unplugged(struct drm_device *dev)
 {
-       int ret = atomic_read(&dev->unplugged);
-       smp_rmb();
-       return ret;
+       int idx;
+
+       if (drm_dev_enter(dev, &idx)) {
+               drm_dev_exit(idx);
+               return false;
+       }
+
+       return true;
 }
 
 
index 8d89a9c..b25d12e 100644 (file)
@@ -465,8 +465,6 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
                                     struct i2c_adapter *adapter);
 struct edid *drm_edid_duplicate(const struct edid *edid);
-void drm_reset_display_info(struct drm_connector *connector);
-u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
index 5ca7cdc..a38de7e 100644 (file)
@@ -10,6 +10,7 @@ struct drm_gem_object;
 struct drm_mode_fb_cmd2;
 struct drm_plane;
 struct drm_plane_state;
+struct drm_simple_display_pipe;
 
 struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
                                          unsigned int plane);
@@ -27,6 +28,8 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
 
 int drm_gem_fb_prepare_fb(struct drm_plane *plane,
                          struct drm_plane_state *state);
+int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+                                             struct drm_plane_state *plane_state);
 
 struct drm_framebuffer *
 drm_gem_fbdev_fb_create(struct drm_device *dev,
index cf0e7d8..8fad66f 100644 (file)
@@ -194,8 +194,8 @@ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
 void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
 void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
 
-static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
-                                                          unsigned int token)
+static inline struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
+                                                      unsigned int token)
 {
        struct drm_map_list *_entry;
        list_for_each_entry(_entry, &dev->maplist, head)
index 7569f22..33b3a96 100644 (file)
@@ -796,6 +796,14 @@ struct drm_mode_config {
        bool allow_fb_modifiers;
 
        /**
+        * @normalize_zpos:
+        *
+        * If true the drm core will call drm_atomic_normalize_zpos() as part of
+        * atomic mode checking from drm_atomic_helper_check()
+        */
+       bool normalize_zpos;
+
+       /**
         * @modifiers_property: Plane property to list support modifier/format
         * combination.
         */
index 3e76ca8..35e2a3a 100644 (file)
@@ -1004,11 +1004,14 @@ struct drm_plane_helper_funcs {
         * This function must not block for outstanding rendering, since it is
         * called in the context of the atomic IOCTL even for async commits to
         * be able to return any errors to userspace. Instead the recommended
-        * way is to fill out the fence member of the passed-in
+        * way is to fill out the &drm_plane_state.fence of the passed-in
         * &drm_plane_state. If the driver doesn't support native fences then
         * equivalent functionality should be implemented through private
         * members in the plane structure.
         *
+        * Drivers which always have their buffers pinned should use
+        * drm_gem_fb_prepare_fb() for this hook.
+        *
         * The helpers will call @cleanup_fb with matching arguments for every
         * successful call to this hook.
         *
index f7bf4a4..26fa50c 100644 (file)
@@ -43,6 +43,7 @@ struct drm_modeset_acquire_ctx;
  *     plane (in 16.16)
  * @src_w: width of visible portion of plane (in 16.16)
  * @src_h: height of visible portion of plane (in 16.16)
+ * @alpha: opacity of the plane
  * @rotation: rotation of the plane
  * @zpos: priority of the given plane on crtc (optional)
  *     Note that multiple active planes on the same crtc can have an identical
@@ -51,8 +52,8 @@ struct drm_modeset_acquire_ctx;
  *     plane with a lower ID.
  * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
  *     where N is the number of active planes for given crtc. Note that
- *     the driver must call drm_atomic_normalize_zpos() to update this before
- *     it can be trusted.
+ *     the driver must set drm_mode_config.normalize_zpos or call
+ *     drm_atomic_normalize_zpos() to update this before it can be trusted.
  * @src: clipped source coordinates of the plane (in 16.16)
  * @dst: clipped destination coordinates of the plane
  * @state: backpointer to global drm_atomic_state
@@ -79,8 +80,15 @@ struct drm_plane_state {
        /**
         * @fence:
         *
-        * Optional fence to wait for before scanning out @fb. Do not write this
-        * directly, use drm_atomic_set_fence_for_plane()
+        * Optional fence to wait for before scanning out @fb. The core atomic
+        * code will set this when userspace is using explicit fencing. Do not
+        * write this directly for a driver's implicit fence, use
+        * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
+        * preserved.
+        *
+        * Drivers should store any implicit fence in this from their
+        * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+        * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
         */
        struct dma_fence *fence;
 
@@ -106,6 +114,9 @@ struct drm_plane_state {
        uint32_t src_x, src_y;
        uint32_t src_h, src_w;
 
+       /* Plane opacity */
+       u16 alpha;
+
        /* Plane rotation */
        unsigned int rotation;
 
@@ -496,6 +507,7 @@ enum drm_plane_type {
  * @funcs: helper functions
  * @properties: property tracking for this plane
  * @type: type of plane (overlay, primary, cursor)
+ * @alpha_property: alpha property for this plane
  * @zpos_property: zpos property for this plane
  * @rotation_property: rotation property for this plane
  * @helper_private: mid-layer private data
@@ -571,6 +583,7 @@ struct drm_plane {
         */
        struct drm_plane_state *state;
 
+       struct drm_property *alpha_property;
        struct drm_property *zpos_property;
        struct drm_property *rotation_property;
 
index d1423c7..ab8167b 100644 (file)
@@ -281,32 +281,6 @@ struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob);
 void drm_property_blob_put(struct drm_property_blob *blob);
 
 /**
- * drm_property_reference_blob - acquire a blob property reference
- * @blob: DRM blob property
- *
- * This is a compatibility alias for drm_property_blob_get() and should not be
- * used by new code.
- */
-static inline struct drm_property_blob *
-drm_property_reference_blob(struct drm_property_blob *blob)
-{
-       return drm_property_blob_get(blob);
-}
-
-/**
- * drm_property_unreference_blob - release a blob property reference
- * @blob: DRM blob property
- *
- * This is a compatibility alias for drm_property_blob_put() and should not be
- * used by new code.
- */
-static inline void
-drm_property_unreference_blob(struct drm_property_blob *blob)
-{
-       drm_property_blob_put(blob);
-}
-
-/**
  * drm_property_find - find property object
  * @dev: DRM device
  * @file_priv: drm file to check for lease against.
index 1b4e352..4519604 100644 (file)
@@ -64,7 +64,8 @@ struct drm_simple_display_pipe_funcs {
         * This hook is optional.
         */
        void (*enable)(struct drm_simple_display_pipe *pipe,
-                      struct drm_crtc_state *crtc_state);
+                      struct drm_crtc_state *crtc_state,
+                      struct drm_plane_state *plane_state);
        /**
         * @disable:
         *
@@ -115,6 +116,9 @@ struct drm_simple_display_pipe_funcs {
         * Optional, called by &drm_plane_helper_funcs.prepare_fb.  Please read
         * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
         * more details.
+        *
+        * Drivers which always have their buffers pinned should use
+        * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
         */
        int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
                          struct drm_plane_state *plane_state);
index 44e824a..b8ba588 100644 (file)
@@ -67,7 +67,9 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
                  const struct drm_simple_display_pipe_funcs *pipe_funcs,
                  struct drm_driver *driver,
                  const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi);
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+                          struct drm_crtc_state *crtc_state,
+                          struct drm_plane_state *plan_state);
 void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
 void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
 bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
index 0a4ddbc..5b96f0b 100644 (file)
@@ -36,6 +36,11 @@ static inline bool tinydrm_machine_little_endian(void)
 bool tinydrm_merge_clips(struct drm_clip_rect *dst,
                         struct drm_clip_rect *src, unsigned int num_clips,
                         unsigned int flags, u32 max_width, u32 max_height);
+int tinydrm_fb_dirty(struct drm_framebuffer *fb,
+                    struct drm_file *file_priv,
+                    unsigned int flags, unsigned int color,
+                    struct drm_clip_rect *clips,
+                    unsigned int num_clips);
 void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
                    struct drm_clip_rect *clip);
 void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
index 07a9a11..56e4a91 100644 (file)
@@ -26,6 +26,10 @@ struct tinydrm_device {
        struct drm_simple_display_pipe pipe;
        struct mutex dirty_lock;
        const struct drm_framebuffer_funcs *fb_funcs;
+       int (*fb_dirty)(struct drm_framebuffer *framebuffer,
+                       struct drm_file *file_priv, unsigned flags,
+                       unsigned color, struct drm_clip_rect *clips,
+                       unsigned num_clips);
 };
 
 static inline struct tinydrm_device *
@@ -41,7 +45,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
  * the &drm_driver structure.
  */
 #define TINYDRM_GEM_DRIVER_OPS \
-       .gem_free_object        = tinydrm_gem_cma_free_object, \
+       .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
        .gem_print_info         = drm_gem_cma_print_info, \
        .gem_vm_ops             = &drm_gem_cma_vm_ops, \
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd, \
@@ -91,8 +95,6 @@ void tinydrm_shutdown(struct tinydrm_device *tdev);
 
 void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
                                 struct drm_plane_state *old_state);
-int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
-                                   struct drm_plane_state *plane_state);
 int
 tinydrm_display_pipe_init(struct tinydrm_device *tdev,
                          const struct drm_simple_display_pipe_funcs *funcs,
index ceb71ea..3a09c97 100644 (file)
@@ -40,12 +40,6 @@ expression object;
 - drm_gem_object_unreference_unlocked(object)
 + drm_gem_object_put_unlocked(object)
 |
-- drm_property_reference_blob(object)
-+ drm_property_blob_get(object)
-|
-- drm_property_unreference_blob(object)
-+ drm_property_blob_put(object)
-|
 - drm_dev_unref(object)
 + drm_dev_put(object)
 )
@@ -72,10 +66,6 @@ __drm_gem_object_unreference(object)
 |
 drm_gem_object_unreference_unlocked(object)
 |
-drm_property_unreference_blob@p(object)
-|
-drm_property_reference_blob@p(object)
-|
 drm_dev_unref@p(object)
 )