properties:
compatible:
- const: mediatek,mt7621-memc
+ items:
+ - const: mediatek,mt7621-memc
+ - const: syscon
reg:
maxItems: 1
examples:
- |
memory-controller@5000 {
- compatible = "mediatek,mt7621-memc";
+ compatible = "mediatek,mt7621-memc", "syscon";
reg = <0x5000 0x1000>;
};
gpio-controller:
type: boolean
description:
- If defined, MT7530's LED controller will run on GPIO mode.
+ If defined, LED controller of the MT7530 switch will run on GPIO mode.
+
+ There are 15 controllable pins.
+ port 0 LED 0..2 as GPIO 0..2
+ port 1 LED 0..2 as GPIO 3..5
+ port 2 LED 0..2 as GPIO 6..8
+ port 3 LED 0..2 as GPIO 9..11
+ port 4 LED 0..2 as GPIO 12..14
"#interrupt-cells":
const: 1
then:
$ref: "#/$defs/mt7531-dsa-port"
properties:
+ gpio-controller: false
mediatek,mcm: false
- if:
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7530";
- reg = <0>;
+ reg = <0x1f>;
reset-gpios = <&pio 33 0>;
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7530";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <ðsys MT2701_ETHSYS_MCM_RST>;
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7621";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <&sysc MT7621_RST_MCM>;
reg = <4>;
};
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7621";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <&sysc MT7621_RST_MCM>;
phy-mode = "rgmii";
};
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7621";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <&sysc MT7621_RST_MCM>;
phy-mode = "rgmii";
};
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7621";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <&sysc MT7621_RST_MCM>;
+++ /dev/null
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs (mt7620, mt7621).
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw" or "mediatek,mt7621-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-- resets: Should contain the gigabit switches resets
-- reset-names: Should contain the reset names "gsw"
-
-Example:
-
-gsw@10110000 {
- compatible = "ralink,mt7620-gsw";
- reg = <0x10110000 8000>;
-
- resets = <&rstctrl 23>;
- reset-names = "gsw";
-
- interrupt-parent = <&intc>;
- interrupts = <17>;
-};
+++ /dev/null
-Ralink Frame Engine Ethernet controller
-=======================================
-
-The Ralink frame engine ethernet controller can be found on Ralink and
-Mediatek SoCs (RT288x, RT3x5x, RT366x, RT388x, rt5350, mt7620, mt7621, mt76x8).
-
-Depending on the SoC, there is a number of ports connected to the CPU port
-directly and/or via a (gigabit-)switch.
-
-* Ethernet controller node
-
-Required properties:
-- compatible: Should be one of "ralink,rt2880-eth", "ralink,rt3050-eth",
- "ralink,rt3050-eth", "ralink,rt3883-eth", "ralink,rt5350-eth",
- "mediatek,mt7620-eth", "mediatek,mt7621-eth"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
-- resets: Should contain the frame engines resets
-- reset-names: Should contain the reset names "fe". If a switch is present
- "esw" is also required.
-
-
-* Ethernet port node
-
-Required properties:
-- compatible: Should be "ralink,eth-port"
-- reg: The number of the physical port
-- phy-handle: reference to the node describing the phy
-
-Example:
-
-mdio-bus {
- ...
- phy0: ethernet-phy@0 {
- phy-mode = "mii";
- reg = <0>;
- };
-};
-
-ethernet@400000 {
- compatible = "ralink,rt2880-eth";
- reg = <0x00400000 10000>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- resets = <&rstctrl 18>;
- reset-names = "fe";
-
- interrupt-parent = <&cpuintc>;
- interrupts = <5>;
-
- port@0 {
- compatible = "ralink,eth-port";
- reg = <0>;
- phy-handle = <&phy0>;
- };
-
-};
+++ /dev/null
-Ralink Fast Ethernet Embedded Switch
-====================================
-
-The ralink fast ethernet embedded switch can be found on Ralink and Mediatek
-SoCs (RT3x5x, RT5350, MT76x8).
-
-Required properties:
-- compatible: Should be "ralink,rt3050-esw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the embedded switches interrupt
-- resets: Should contain the embedded switches resets
-- reset-names: Should contain the reset names "esw"
-
-Optional properties:
-- ralink,portmap: can be used to choose if the default switch setup is
- llllw or wllll
-- ralink,led_polarity: override the active high/low settings of the leds
-
-Example:
-
-esw@10110000 {
- compatible = "ralink,rt3050-esw";
- reg = <0x10110000 8000>;
-
- resets = <&rstctrl 23>;
- reset-names = "esw";
-
- interrupt-parent = <&intc>;
- interrupts = <17>;
-};
``ETHTOOL_A_LINKMODES_DUPLEX`` u8 duplex mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG`` u8 Master/slave port mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE`` u8 Master/slave port state
+ ``ETHTOOL_A_LINKMODES_RATE_MATCHING`` u8 PHY rate matching
========================================== ====== ==========================
For ``ETHTOOL_A_LINKMODES_OURS``, value represents advertised modes and mask
``ETHTOOL_A_LINKMODES_SPEED`` u32 link speed (Mb/s)
``ETHTOOL_A_LINKMODES_DUPLEX`` u8 duplex mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG`` u8 Master/slave port mode
+ ``ETHTOOL_A_LINKMODES_RATE_MATCHING`` u8 PHY rate matching
``ETHTOOL_A_LINKMODES_LANES`` u32 lanes
========================================== ====== ==========================
ARM/Microchip Sparx5 SoC support
M: Lars Povlsen <lars.povlsen@microchip.com>
M: Steen Hegelund <Steen.Hegelund@microchip.com>
+M: Daniel Machon <daniel.machon@microchip.com>
M: UNGLinuxDriver@microchip.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
bootargs = "console=ttyS0,57600";
};
- palmbus: palmbus@1e000000 {
- i2c@900 {
- status = "okay";
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
};
};
-&sdhci {
+&mmc {
status = "okay";
};
bootargs = "console=ttyS0,57600";
};
- palmbus: palmbus@1e000000 {
- i2c@900 {
- status = "okay";
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
linux,code = <KEY_RESTART>;
};
};
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ ethblack-green {
+ label = "green:ethblack";
+ gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ };
+
+ ethblue-green {
+ label = "green:ethblue";
+ gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
+ };
+
+ ethyellow-green {
+ label = "green:ethyellow";
+ gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
+ };
+
+ ethyellow-orange {
+ label = "orange:ethyellow";
+ gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ };
+
+ power {
+ label = "green:power";
+ gpios = <&gpio 6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ system {
+ label = "green:system";
+ gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
+ };
+ };
};
-&sdhci {
+&mmc {
status = "okay";
};
&gmac1 {
status = "okay";
- phy-handle = <ðphy7>;
+ phy-handle = <ðphy5>;
};
&mdio {
- ethphy7: ethernet-phy@7 {
- reg = <7>;
+ ethphy5: ethernet-phy@5 {
+ reg = <5>;
phy-mode = "rgmii-rxid";
};
};
compatible = "mti,cpu-interrupt-controller";
};
- aliases {
- serial0 = &uartlite;
- };
-
-
mmc_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "mmc_power";
pinctrl-0 = <&i2c_pins>;
};
- memc: syscon@5000 {
+ memc: memory-controller@5000 {
compatible = "mediatek,mt7621-memc", "syscon";
reg = <0x5000 0x1000>;
};
- uartlite: uartlite@c00 {
+ serial0: serial@c00 {
compatible = "ns16550a";
reg = <0xc00 0x100>;
clocks = <&sysc MT7621_CLK_UART1>;
- clock-names = "uart1";
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 26 IRQ_TYPE_LEVEL_HIGH>;
};
};
- sdhci: sdhci@1e130000 {
+ mmc: mmc@1e130000 {
status = "disabled";
compatible = "mediatek,mt7620-mmc";
interrupts = <GIC_SHARED 20 IRQ_TYPE_LEVEL_HIGH>;
};
- xhci: xhci@1e1c0000 {
- compatible = "mediatek,mt8173-xhci";
+ usb: usb@1e1c0000 {
+ compatible = "mediatek,mt8173-xhci", "mediatek,mtk-xhci";
reg = <0x1e1c0000 0x1000
0x1e1d0700 0x0100>;
reg-names = "mac", "ippc";
gmac1: mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
- status = "off";
- phy-mode = "rgmii-rxid";
+ status = "disabled";
+ phy-mode = "rgmii";
};
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- switch0: switch0@0 {
+ switch0: switch@1f {
compatible = "mediatek,mt7621";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <&sysc MT7621_RST_MCM>;
reset-names = "mcm";
interrupt-controller;
#interrupt-cells = <1>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 23 IRQ_TYPE_LEVEL_HIGH>;
ports {
#size-cells = <0>;
port@0 {
- status = "off";
+ status = "disabled";
reg = <0>;
label = "lan0";
};
port@1 {
- status = "off";
+ status = "disabled";
reg = <1>;
label = "lan1";
};
port@2 {
- status = "off";
+ status = "disabled";
reg = <2>;
label = "lan2";
};
port@3 {
- status = "off";
+ status = "disabled";
reg = <3>;
label = "lan3";
};
port@4 {
- status = "off";
+ status = "disabled";
reg = <4>;
label = "lan4";
};
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
return mt7531_sgmii_setup_mode_force(priv, port, interface);
default:
return -EINVAL;
return;
}
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port))
+ config->mac_capabilities |= MAC_2500FD;
+
/* This driver does not make use of the speed, duplex, pause or the
* advertisement in its mac_config, so it is safe to mark this driver
* as non-legacy.
status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE);
if (state->interface == PHY_INTERFACE_MODE_SGMII &&
(status & MT7531_SGMII_AN_ENABLE)) {
val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
return 0;
}
+static void
+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(val & MT7531_SGMII_LINK_STATUS);
+ if (!state->link)
+ return;
+
+ state->an_complete = state->link;
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+
+ state->duplex = DUPLEX_FULL;
+ state->pause = MLO_PAUSE_NONE;
+}
+
static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
int port = pcs_to_mt753x_pcs(pcs)->port;
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
mt7531_sgmii_pcs_get_state_an(priv, port, state);
- else
- state->link = false;
+ return;
+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) ||
+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ mt7531_sgmii_pcs_get_state_inband(priv, port, state);
+ return;
+ }
+
+ state->link = false;
}
static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
+ if (mt753x_is_mac_port(i))
+ priv->pcs[i].pcs.poll = 1;
}
ret = priv->info->sw_setup(ds);
#define MT7531_SGMII_LINK_STATUS BIT(18)
#define MT7531_SGMII_AN_ENABLE BIT(12)
#define MT7531_SGMII_AN_RESTART BIT(9)
+#define MT7531_SGMII_AN_COMPLETE BIT(21)
/* Register for SGMII PCS_SPPED_ABILITY */
#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
struct mlx5e_macsec *macsec;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
struct net_device *netdev;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
struct mlx5e_macsec *macsec;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
struct mlx5e_macsec *macsec;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
int i;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
int err = 0;
int i;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
struct list_head *list;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
struct list_head *list;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
struct list_head *list;
int err = 0;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
struct mlx5e_macsec *macsec;
int err = 0;
- if (ctx->prepare)
- return 0;
-
if (!mlx5e_macsec_secy_features_validate(ctx))
return -EINVAL;
struct mlx5e_macsec *macsec;
int i, err = 0;
- if (ctx->prepare)
- return 0;
-
if (!mlx5e_macsec_secy_features_validate(ctx))
return -EINVAL;
int err = 0;
int i;
- if (ctx->prepare)
- return 0;
-
mutex_lock(&priv->macsec->lock);
macsec = priv->macsec;
macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
- lan966x_ptp.o lan966x_fdma.o lan966x_lag.o
+ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
+ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
.ndo_eth_ioctl = lan966x_port_ioctl,
+ .ndo_setup_tc = lan966x_tc_setup,
};
bool lan966x_netdevice_check(const struct net_device *dev)
return -EINVAL;
dev = devm_alloc_etherdev_mqs(lan966x->dev,
- sizeof(struct lan966x_port), 8, 1);
+ sizeof(struct lan966x_port),
+ NUM_PRIO_QUEUES, 1);
if (!dev)
return -ENOMEM;
dev->netdev_ops = &lan966x_port_netdev_ops;
dev->ethtool_ops = &lan966x_ethtool_ops;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
dev->needed_headroom = IFH_LEN * sizeof(u32);
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
lan966x, ANA_ANAINTR);
spin_lock_init(&lan966x->tx_lock);
+
+ lan966x_taprio_init(lan966x);
}
static int lan966x_ram_init(struct lan966x *lan966x)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_taprio_deinit(lan966x);
lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/pkt_sched.h>
#include <net/switchdev.h>
#include "lan966x_regs.h"
#define NUM_PHYS_PORTS 8
#define CPU_PORT 8
+#define NUM_PRIO_QUEUES 8
/* Reserved PGIDs */
#define PGID_CPU (PGID_AGGR - 6)
struct sk_buff *skb);
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+u32 lan966x_ptp_get_period_ps(void);
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
unsigned long ageing_clock_t);
void lan966x_update_fwd_mask(struct lan966x *lan966x);
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc);
+int lan966x_mqprio_del(struct lan966x_port *port);
+
+void lan966x_taprio_init(struct lan966x *lan966x);
+void lan966x_taprio_deinit(struct lan966x *lan966x);
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt);
+int lan966x_taprio_del(struct lan966x_port *port);
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed);
+
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
int gbase, int ginst,
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc)
+{
+ u8 i;
+
+ if (num_tc != NUM_PRIO_QUEUES) {
+ netdev_err(port->dev, "Only %d tarffic classes supported\n",
+ NUM_PRIO_QUEUES);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(port->dev, num_tc);
+
+ for (i = 0; i < num_tc; ++i)
+ netdev_set_tc_queue(port->dev, i, 1, i);
+
+ return 0;
+}
+
+int lan966x_mqprio_del(struct lan966x_port *port)
+{
+ netdev_reset_tc(port->dev);
+
+ return 0;
+}
break;
}
+ lan966x_taprio_speed_set(port, config->speed);
+
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
* port speed for QSGMII ports.
*/
return 0;
}
-static int lan966x_ptp_gettime64(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = full_ts_in_ns;
}
+
+u32 lan966x_ptp_get_period_ps(void)
+{
+ /* This represents the system clock period in picoseconds */
+ return 15125;
+}
/* QSYS:RES_CTRL:RES_CFG */
#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */
+#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+
+/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */
+#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4)
+
+#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+
+/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4)
+
+#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+
+/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */
+#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */
+#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4)
+
+#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0)
+#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x)
+#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */
+#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */
+#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4)
+
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */
+#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */
+#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4)
+
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\
+ FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\
+ FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */
+#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\
+ FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\
+ FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */
+#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4)
+
+#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0)
+#define QSYS_TAS_LST_LIST_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x)
+#define QSYS_TAS_LST_LIST_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_LST_LIST_STATE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */
+#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */
+#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */
+#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4)
+
+/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */
+#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+
/* REW:PORT:PORT_VLAN_CFG */
#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define LAN966X_TAPRIO_TIMEOUT_MS 1000
+#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
+
+/* Minimum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
+
+/* Maximum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
+
+/* Total number of TAS GCL entries */
+#define LAN966X_TAPRIO_NUM_GCL 256
+
+/* TAPRIO link speeds for calculation of guard band */
+enum lan966x_taprio_link_speed {
+ LAN966X_TAPRIO_SPEED_NO_GB,
+ LAN966X_TAPRIO_SPEED_10,
+ LAN966X_TAPRIO_SPEED_100,
+ LAN966X_TAPRIO_SPEED_1000,
+ LAN966X_TAPRIO_SPEED_2500,
+};
+
+/* TAPRIO list states */
+enum lan966x_taprio_state {
+ LAN966X_TAPRIO_STATE_ADMIN,
+ LAN966X_TAPRIO_STATE_ADVANCING,
+ LAN966X_TAPRIO_STATE_PENDING,
+ LAN966X_TAPRIO_STATE_OPERATING,
+ LAN966X_TAPRIO_STATE_TERMINATING,
+ LAN966X_TAPRIO_STATE_MAX,
+};
+
+/* TAPRIO GCL command */
+enum lan966x_taprio_gcl_cmd {
+ LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
+};
+
+static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
+{
+ return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
+}
+
+static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, QSYS_TAS_LST);
+ return QSYS_TAS_LST_LIST_STATE_GET(val);
+}
+
+static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ return lan966x_taprio_list_state_get(port);
+}
+
+static void lan966x_taprio_list_state_set(struct lan966x_port *port,
+ u32 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+}
+
+static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool pending, operating;
+ unsigned long end;
+ u32 state;
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ /* It is required to try multiple times to set the state of list,
+ * because the HW can overwrite this.
+ */
+ do {
+ state = lan966x_taprio_list_state_get(port);
+
+ pending = false;
+ operating = false;
+
+ if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
+ state == LAN966X_TAPRIO_STATE_PENDING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_ADMIN);
+ pending = true;
+ }
+
+ if (state == LAN966X_TAPRIO_STATE_OPERATING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_TERMINATING);
+ operating = true;
+ }
+
+ /* If the entry was in pending and now gets in admin, then there
+ * is nothing else to do, so just bail out
+ */
+ state = lan966x_taprio_list_state_get(port);
+ if (pending &&
+ state == LAN966X_TAPRIO_STATE_ADMIN)
+ return 0;
+
+ /* If the list was in operating and now is in terminating or
+ * admin, then is OK to exit but it needs to wait until the list
+ * will get in admin. It is not required to set the state
+ * again.
+ */
+ if (operating &&
+ (state == LAN966X_TAPRIO_STATE_TERMINATING ||
+ state == LAN966X_TAPRIO_STATE_ADMIN))
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ do {
+ state = lan966x_taprio_list_state_get(port);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ /* If the list was in operating mode, it could be stopped while some
+ * queues where closed, so make sure to restore "all-queues-open"
+ */
+ if (operating) {
+ lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
+ lan966x, QSYS_TAS_GS_CTRL);
+
+ lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
+ lan966x, QSYS_TAS_GATE_STATE);
+ }
+
+ return 0;
+}
+
+static int lan966x_taprio_shutdown(struct lan966x_port *port)
+{
+ u32 i, list, state;
+ int err;
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list = lan966x_taprio_list_index(port, i);
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ err = lan966x_taprio_list_shutdown(port, list);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Find a suitable list for a new schedule. First priority is a list in state
+ * pending. Second priority is a list in state admin.
+ */
+static int lan966x_taprio_find_list(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int *new_list, int *obs_list)
+{
+ int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int err, oper = -1;
+ u32 i;
+
+ *new_list = -1;
+ *obs_list = -1;
+
+ /* If there is already an entry in operating mode, return this list in
+ * obs_list, such that when the new list will get activated the
+ * operating list will be stopped. In this way is possible to have
+ * smooth transitions between the lists
+ */
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list[i] = lan966x_taprio_list_index(port, i);
+ state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
+ if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
+ oper = list[i];
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
+ err = lan966x_taprio_shutdown(port);
+ if (err)
+ return err;
+
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
+{
+ u64 total_time = 0;
+ u32 i;
+
+ /* This is not supported by th HW */
+ if (qopt->cycle_time_extension)
+ return -EOPNOTSUPP;
+
+ /* There is a limited number of gcl entries that can be used, they are
+ * shared by all ports
+ */
+ if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
+ return -EINVAL;
+
+ /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
+ if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ for (i = 0; i < qopt->num_entries; ++i) {
+ struct tc_taprio_sched_entry *entry = &qopt->entries[i];
+
+ /* Don't allow intervals bigger than 1 sec or smaller than 1
+ * usec
+ */
+ if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+
+ total_time += qopt->entries[i].interval;
+ }
+
+ /* Don't allow the total time of intervals be bigger than 1 sec */
+ if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ /* The HW expects that the cycle time to be at least as big as sum of
+ * each interval of gcl
+ */
+ if (qopt->cycle_time < total_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
+ unsigned long *free_list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 num_free, state, list;
+ u32 base, next, max_list;
+
+ /* By default everything is free */
+ bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
+ num_free = LAN966X_TAPRIO_NUM_GCL;
+
+ /* Iterate over all gcl entries and find out which are free. And mark
+ * those that are not free.
+ */
+ max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
+ for (list = 0; list < max_list; ++list) {
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
+ base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
+ next = base;
+
+ do {
+ clear_bit(next, free_list);
+ num_free--;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
+ next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
+ } while (base != next);
+ }
+
+ return num_free;
+}
+
+static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
+ struct tc_taprio_sched_entry *entry,
+ u32 next_entry)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Setup a single gcl entry */
+ lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
+ QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
+ lan966x, QSYS_TAS_GCL_CT_CFG);
+
+ lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
+ lan966x, QSYS_TAS_GCL_CT_CFG2);
+
+ lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
+}
+
+static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int list)
+{
+ DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, base, next;
+
+ if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
+ return -ENOSPC;
+
+ /* Select list */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* Setup the address of the first gcl entry */
+ base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
+ lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
+ QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
+ lan966x, QSYS_TAS_LIST_CFG);
+
+ /* Iterate over entries and add them to the gcl list */
+ next = base;
+ for (i = 0; i < qopt->num_entries; ++i) {
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* If the entry is last, point back to the start of the list */
+ if (i == qopt->num_entries - 1)
+ next = base;
+ else
+ next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
+ next + 1);
+
+ lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
+ }
+
+ return 0;
+}
+
+/* Calculate new base_time based on cycle_time. The HW recommends to have the
+ * new base time at least 2 * cycle type + current time
+ */
+static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
+ const u32 cycle_time,
+ const ktime_t org_base_time,
+ ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time;
+ struct timespec64 ts;
+
+ /* Get the current time and calculate the threshold_time */
+ lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+
+ /* If the org_base_time is in enough in future just use it */
+ if (org_base_time >= threshold_time) {
+ *new_base_time = org_base_time;
+ return;
+ }
+
+ /* If the org_base_time is smaller than current_time, calculate the new
+ * base time as following.
+ */
+ if (org_base_time <= current_time) {
+ u64 tmp = current_time - org_base_time;
+ u32 rem = 0;
+
+ if (tmp > cycle_time)
+ div_u64_rem(tmp, cycle_time, &rem);
+ rem = cycle_time - rem;
+ *new_base_time = threshold_time + rem;
+ return;
+ }
+
+ /* The only left place for org_base_time is between current_time and
+ * threshold_time. In this case the new_base_time is calculated like
+ * org_base_time + 2 * cycletime
+ */
+ *new_base_time = org_base_time + 2 * cycle_time;
+}
+
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 taprio_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ taprio_speed = LAN966X_TAPRIO_SPEED_10;
+ break;
+ case SPEED_100:
+ taprio_speed = LAN966X_TAPRIO_SPEED_100;
+ break;
+ case SPEED_1000:
+ taprio_speed = LAN966X_TAPRIO_SPEED_1000;
+ break;
+ case SPEED_2500:
+ taprio_speed = LAN966X_TAPRIO_SPEED_2500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
+ QSYS_TAS_PROFILE_CFG_LINK_SPEED,
+ lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
+
+ return 0;
+}
+
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err, new_list, obs_list;
+ struct timespec64 ts;
+ ktime_t base_time;
+
+ err = lan966x_taprio_check(qopt);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_gcl_setup(port, qopt, new_list);
+ if (err)
+ return err;
+
+ lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
+ qopt->base_time, &base_time);
+
+ ts = ktime_to_timespec64(base_time);
+ lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
+ lan966x, QSYS_TAS_BT_NSEC);
+
+ lan_wr(lower_32_bits(ts.tv_sec),
+ lan966x, QSYS_TAS_BT_SEC_LSB);
+
+ lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
+ lan966x, QSYS_TAS_BT_SEC_MSB);
+
+ lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
+
+ lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
+ QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
+ lan966x, QSYS_TAS_STARTUP_CFG);
+
+ /* Start list processing */
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+
+ return err;
+}
+
+int lan966x_taprio_del(struct lan966x_port *port)
+{
+ return lan966x_taprio_shutdown(port);
+}
+
+void lan966x_taprio_init(struct lan966x *lan966x)
+{
+ int num_taprio_lists;
+ int p;
+
+ lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
+ lan966x_ptp_get_period_ps()),
+ lan966x, QSYS_TAS_STM_CFG);
+
+ num_taprio_lists = lan966x->num_phys_ports *
+ LAN966X_TAPRIO_ENTRIES_PER_PORT;
+
+ /* For now we always use guard band on all queues */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
+ QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ for (p = 0; p < lan966x->num_phys_ports; p++)
+ lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
+ QSYS_TAS_PROFILE_CFG_PORT_NUM,
+ lan966x, QSYS_TAS_PROFILE_CFG(p));
+}
+
+void lan966x_taprio_deinit(struct lan966x *lan966x)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ lan966x_taprio_del(lan966x->ports[p]);
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/pkt_cls.h>
+
+#include "lan966x_main.h"
+
+static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u8 num_tc = mqprio->qopt.num_tc;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return num_tc ? lan966x_mqprio_add(port, num_tc) :
+ lan966x_mqprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ return taprio->enable ? lan966x_taprio_add(port, taprio) :
+ lan966x_taprio_del(port);
+}
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return lan966x_tc_setup_qdisc_mqprio(port, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return lan966x_tc_setup_qdisc_taprio(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
- sparx5_ptp.o sparx5_pgid.o
+ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_qos.h"
#define QLIM_WM(fraction) \
((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
goto cleanup_ports;
}
+ err = sparx5_qos_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to initialize QoS\n");
+ goto cleanup_ports;
+ }
+
err = sparx5_ptp_init(sparx5);
if (err) {
dev_err(sparx5->dev, "PTP failed\n");
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+
+#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
+#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
+#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
+
+#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
+#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
+
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+
+#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
+#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
+#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
+
+#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
+#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
+#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
+
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+
+#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+
+#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
+#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
+#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
+
+#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
+#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_STOP BIT(0)
+#define HSCH_SE_CFG_SE_STOP_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
+#define HSCH_SE_CFG_SE_STOP_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
+
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+
+#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
+ FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
+ FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+
+#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
+#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
+#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
+
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
+ FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
+ FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+
/* HSCH:SYSTEM:FLUSH_CTRL */
#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_tc.h"
/* The IFH bit position of the first VSTAX bit. This is because the
* VSTAX bit positions in Data sheet is starting from zero.
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
.ndo_eth_ioctl = sparx5_port_ioctl,
+ .ndo_setup_tc = sparx5_port_setup_tc,
};
bool sparx5_netdevice_check(const struct net_device *dev)
struct sparx5_port *spx5_port;
struct net_device *ndev;
- ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+ ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port),
+ SPX5_PRIOS, 1);
if (!ndev)
return ERR_PTR(-ENOMEM);
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= NETIF_F_HW_TC;
+
SET_NETDEV_DEV(ndev, sparx5->dev);
spx5_port = netdev_priv(ndev);
spx5_port->ndev = ndev;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+/* Max rates for leak groups */
+static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
+ 1048568, /* 1.049 Gbps */
+ 2621420, /* 2.621 Gbps */
+ 10485680, /* 10.486 Gbps */
+ 26214200 /* 26.214 Gbps */
+};
+
+static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
+
+static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
+ return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
+}
+
+static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
+ HSCH_HSCH_TIMER_CFG(layer, group));
+}
+
+static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
+ return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
+}
+
+static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
+ return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
+}
+
+static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_lg_get_first(sparx5, layer, group);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
+}
+
+static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_first(sparx5, layer, group);
+}
+
+static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
+}
+
+static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ if (sparx5_lg_is_empty(sparx5, layer, group))
+ return false;
+
+ return sparx5_lg_get_first(sparx5, layer, group) ==
+ sparx5_lg_get_last(sparx5, layer, group);
+}
+
+static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
+}
+
+static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, 0);
+}
+
+static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
+ u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ if (sparx5_lg_is_empty(sparx5, layer, i))
+ continue;
+
+ itr = sparx5_lg_get_first(sparx5, layer, i);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
+{
+ struct sparx5_layer *l = &layers[layer];
+ struct sparx5_lg *lg;
+ u32 i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ lg = &l->leak_groups[i];
+ if (rate <= lg->max_rate) {
+ *group = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx, u32 *prev, u32 *next, u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_lg_get_first(sparx5, layer, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_lg_get_next(sparx5, layer, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -1; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 se_first, u32 idx, u32 idx_next, bool empty)
+{
+ u32 leak_time = layers[layer].leak_groups[group].leak_time;
+
+ /* Stop leaking */
+ sparx5_lg_disable(sparx5, layer, group);
+
+ if (empty)
+ return 0;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Link elements */
+ spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
+ HSCH_SE_CONNECT(idx));
+
+ /* Set the first element. */
+ spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
+ HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
+ HSCH_HSCH_LEAK_CFG(layer, group));
+
+ /* Start leaking */
+ sparx5_lg_enable(sparx5, layer, group, leak_time);
+
+ return 0;
+}
+
+static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ /* idx *must* be present in the leak group */
+ WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
+ &first) < 0);
+
+ if (sparx5_lg_is_singular(sparx5, layer, group)) {
+ empty = true;
+ } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
+ empty);
+}
+
+static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
+ u32 idx)
+{
+ u32 first, next, old_group;
+
+ pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
+ idx);
+
+ /* Is this SE already shaping ? */
+ if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
+ if (old_group != new_group) {
+ /* Delete from old group */
+ sparx5_lg_del(sparx5, layer, old_group, idx);
+ } else {
+ /* Nothing to do here */
+ return 0;
+ }
+ }
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_lg_is_empty(sparx5, layer, new_group))
+ next = idx;
+ else
+ next = sparx5_lg_get_first(sparx5, layer, new_group);
+
+ return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
+ false);
+}
+
+static int sparx5_shaper_conf_set(struct sparx5_port *port,
+ const struct sparx5_shaper *sh, u32 layer,
+ u32 idx, u32 group)
+{
+ int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!sh->rate && !sh->burst)
+ sparx5_lg_action = &sparx5_lg_del;
+ else
+ sparx5_lg_action = &sparx5_lg_add;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Set frame mode */
+ spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
+ sparx5, HSCH_SE_CFG(idx));
+
+ /* Set committed rate and burst */
+ spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
+ HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
+ sparx5, HSCH_CIR_CFG(idx));
+
+ /* This has to be done after the shaper configuration has been set */
+ sparx5_lg_action(sparx5, layer, group, idx);
+
+ return 0;
+}
+
+static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
+{
+ return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
+ 1;
+}
+
+static int sparx5_dwrr_conf_set(struct sparx5_port *port,
+ struct sparx5_dwrr *dwrr)
+{
+ int i;
+
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ port->sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Number of *lower* indexes that are arbitrated dwrr */
+ spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
+ HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
+ HSCH_SE_CFG(port->portno));
+
+ for (i = 0; i < dwrr->count; i++) {
+ spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
+ HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
+ HSCH_DWRR_ENTRY(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_leak_groups_init(struct sparx5 *sparx5)
+{
+ struct sparx5_layer *layer;
+ u32 sys_clk_per_100ps;
+ struct sparx5_lg *lg;
+ u32 leak_time_us;
+ int i, ii;
+
+ sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
+
+ for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
+ layer = &layers[i];
+ for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
+ lg = &layer->leak_groups[ii];
+ lg->max_rate = spx5_hsch_max_group_rate[ii];
+
+ /* Calculate the leak time in us, to serve a maximum
+ * rate of 'max_rate' for this group
+ */
+ leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
+
+ /* Hardware wants leak time in ns */
+ lg->leak_time = 1000 * leak_time_us;
+
+ /* Calculate resolution */
+ lg->resolution = 1000 / leak_time_us;
+
+ /* Maximum number of shapers that can be served by
+ * this leak group
+ */
+ lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
+
+ /* Example:
+ * Wanted bandwidth is 100Mbit:
+ *
+ * 100 mbps can be served by leak group zero.
+ *
+ * leak_time is 125000 ns.
+ * resolution is: 8
+ *
+ * cir = 100000 / 8 = 12500
+ * leaks_pr_sec = 125000 / 10^9 = 8000
+ * bw = 12500 * 8000 = 10^8 (100 Mbit)
+ */
+
+ /* Disable by default - this also indicates an empty
+ * leak group
+ */
+ sparx5_lg_disable(sparx5, i, ii);
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_qos_init(struct sparx5 *sparx5)
+{
+ int ret;
+
+ ret = sparx5_leak_groups_init(sparx5);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
+{
+ int i;
+
+ if (num_tc != SPX5_PRIOS) {
+ netdev_err(ndev, "Only %d traffic classes supported\n",
+ SPX5_PRIOS);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_del(struct net_device *ndev)
+{
+ netdev_reset_tc(ndev);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {
+ .mode = SPX5_SE_MODE_DATARATE,
+ .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
+ .burst = params->max_size,
+ };
+ struct sparx5_lg *lg;
+ u32 group;
+
+ /* Find suitable group for this se */
+ if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
+ pr_debug("Could not find leak group for se with rate: %d",
+ sh.rate);
+ return -EINVAL;
+ }
+
+ lg = &layers[layer].leak_groups[group];
+
+ pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
+
+ if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
+ return -EINVAL;
+
+ /* Calculate committed rate and burst */
+ sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
+ sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
+
+ if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
+ return -EINVAL;
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {0};
+ u32 group;
+
+ sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params)
+{
+ struct sparx5_dwrr dwrr = {0};
+ /* Minimum weight for each iteration */
+ unsigned int w_min = 100;
+ int i;
+
+ /* Find minimum weight for all dwrr bands */
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ if (params->quanta[i] == 0)
+ continue;
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ /* Strict band; skip */
+ if (params->quanta[i] == 0)
+ continue;
+
+ dwrr.count++;
+
+ /* On the sparx5, bands with higher indexes are preferred and
+ * arbitrated strict. Strict bands are put in the lower indexes,
+ * by tc, so we reverse the bands here.
+ *
+ * Also convert the weight to something the hardware
+ * understands.
+ */
+ dwrr.cost[SPX5_PRIOS - i - 1] =
+ sparx5_weight_to_hw_cost(w_min, params->weights[i]);
+ }
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
+
+int sparx5_tc_ets_del(struct sparx5_port *port)
+{
+ struct sparx5_dwrr dwrr = {0};
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_QOS_H__
+#define __SPARX5_QOS_H__
+
+#include <linux/netdevice.h>
+
+/* Number of Layers */
+#define SPX5_HSCH_LAYER_CNT 3
+
+/* Scheduling elements per layer */
+#define SPX5_HSCH_L0_SE_CNT 5040
+#define SPX5_HSCH_L1_SE_CNT 64
+#define SPX5_HSCH_L2_SE_CNT 64
+
+/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
+#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
+
+/* Number of leak groups */
+#define SPX5_HSCH_LEAK_GRP_CNT 4
+
+/* Scheduler modes */
+#define SPX5_SE_MODE_LINERATE 0
+#define SPX5_SE_MODE_DATARATE 1
+
+/* Rate and burst */
+#define SPX5_SE_RATE_MAX 262143
+#define SPX5_SE_BURST_MAX 127
+#define SPX5_SE_RATE_MIN 1
+#define SPX5_SE_BURST_MIN 1
+#define SPX5_SE_BURST_UNIT 4096
+
+/* Dwrr */
+#define SPX5_DWRR_COST_MAX 63
+
+struct sparx5_shaper {
+ u32 mode;
+ u32 rate;
+ u32 burst;
+};
+
+struct sparx5_lg {
+ u32 max_rate;
+ u32 resolution;
+ u32 leak_time;
+ u32 max_ses;
+};
+
+struct sparx5_layer {
+ struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
+};
+
+struct sparx5_dwrr {
+ u32 count; /* Number of inputs running dwrr */
+ u8 cost[SPX5_PRIOS];
+};
+
+int sparx5_qos_init(struct sparx5 *sparx5);
+
+/* Multi-Queue Priority */
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
+int sparx5_tc_mqprio_del(struct net_device *ndev);
+
+/* Token Bucket Filter */
+struct tc_tbf_qopt_offload_replace_params;
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx);
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
+
+/* Enhanced Transmission Selection */
+struct tc_ets_qopt_offload_replace_params;
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params);
+
+int sparx5_tc_ets_del(struct sparx5_port *port);
+
+#endif /* __SPARX5_QOS_H__ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_tc.h"
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
+ u32 *idx)
+{
+ if (parent == TC_H_ROOT) {
+ *layer = 2;
+ *idx = portno;
+ } else {
+ u32 queue = TC_H_MIN(parent) - 1;
+ *layer = 0;
+ *idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
+ }
+}
+
+static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
+ struct tc_mqprio_qopt_offload *m)
+{
+ m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ if (m->qopt.num_tc == 0)
+ return sparx5_tc_mqprio_del(ndev);
+ else
+ return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
+}
+
+static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 layer, se_idx;
+
+ sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
+ &se_idx);
+
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
+ se_idx);
+ case TC_TBF_DESTROY:
+ return sparx5_tc_tbf_del(port, layer, se_idx);
+ case TC_TBF_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params =
+ &qopt->replace_params;
+ struct sparx5_port *port = netdev_priv(ndev);
+ int i;
+
+ /* Only allow ets on ports */
+ if (qopt->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+
+ /* We support eight priorities */
+ if (params->bands != SPX5_PRIOS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checks */
+ for (i = 0; i < SPX5_PRIOS; ++i) {
+ /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
+ if (params->priomap[i] != (7 - i))
+ return -EOPNOTSUPP;
+ /* Throw an error if we receive zero weights by tc */
+ if (params->quanta[i] && params->weights[i] == 0) {
+ pr_err("Invalid ets configuration; band %d has weight zero",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ sparx5_tc_ets_add(port, params);
+ break;
+ case TC_ETS_DESTROY:
+
+ sparx5_tc_ets_del(port);
+
+ break;
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return sparx5_tc_setup_qdisc_ets(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_TC_H__
+#define __SPARX5_TC_H__
+
+#include <linux/netdevice.h>
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
+#endif /* __SPARX5_TC_H__ */
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_lock(&ctx->phydev->lock);
- /* Phase I: prepare. The drive should fail here if there are going to be
- * issues in the commit phase.
- */
- ctx->prepare = true;
- ret = (*func)(ctx);
- if (ret)
- goto phy_unlock;
-
- /* Phase II: commit. This step cannot fail. */
- ctx->prepare = false;
ret = (*func)(ctx);
- /* This should never happen: commit is not allowed to fail */
- if (unlikely(ret))
- WARN(1, "MACsec offloading commit failed (%d)\n", ret);
-phy_unlock:
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_unlock(&ctx->phydev->lock);
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX 1
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII 3
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI 4
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI 7
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
#define MDIO_AN_VEND_PROV 0xc400
#define VEND1_GLOBAL_GEN_STAT2 0xc831
#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M 0x0310
+#define VEND1_GLOBAL_CFG_100M 0x031b
+#define VEND1_GLOBAL_CFG_1G 0x031c
+#define VEND1_GLOBAL_CFG_2_5G 0x031d
+#define VEND1_GLOBAL_CFG_5G 0x031e
+#define VEND1_GLOBAL_CFG_10G 0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
static int aqr107_read_rate(struct phy_device *phydev)
{
+ u32 config_reg;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
if (val < 0)
return val;
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
case MDIO_AN_TX_VEND_STATUS1_10BASET:
phydev->speed = SPEED_10;
+ config_reg = VEND1_GLOBAL_CFG_10M;
break;
case MDIO_AN_TX_VEND_STATUS1_100BASETX:
phydev->speed = SPEED_100;
+ config_reg = VEND1_GLOBAL_CFG_100M;
break;
case MDIO_AN_TX_VEND_STATUS1_1000BASET:
phydev->speed = SPEED_1000;
+ config_reg = VEND1_GLOBAL_CFG_1G;
break;
case MDIO_AN_TX_VEND_STATUS1_2500BASET:
phydev->speed = SPEED_2500;
+ config_reg = VEND1_GLOBAL_CFG_2_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_5000BASET:
phydev->speed = SPEED_5000;
+ config_reg = VEND1_GLOBAL_CFG_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_10GBASET:
phydev->speed = SPEED_10000;
+ config_reg = VEND1_GLOBAL_CFG_10G;
break;
default:
phydev->speed = SPEED_UNKNOWN;
- break;
+ return 0;
}
- if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
+ VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
else
- phydev->duplex = DUPLEX_HALF;
+ phydev->rate_matching = RATE_MATCH_NONE;
return 0;
}
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+ phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+ phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+ phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
break;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
- phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
+ phydev->interface != PHY_INTERFACE_MODE_XAUI &&
+ phydev->interface != PHY_INTERFACE_MODE_RXAUI)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
return 0;
}
+static int aqr107_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ if (iface == PHY_INTERFACE_MODE_10GBASER ||
+ iface == PHY_INTERFACE_MODE_2500BASEX ||
+ iface == PHY_INTERFACE_MODE_NA)
+ return RATE_MATCH_PAUSE;
+ return RATE_MATCH_NONE;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
int err;
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
struct phy_device *phydev = ctx->phydev;
struct vsc8531_private *priv = phydev->priv;
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->rx_sa = ctx->sa.rx_sa;
static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
struct macsec_flow *flow, bool update)
{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->tx_sa = ctx->sa.tx_sa;
/* Always match untagged packets on egress */
flow->match.untagged = 1;
- return vsc8584_macsec_add_flow(phydev, flow, update);
+ return vsc8584_macsec_add_flow(ctx->phydev, flow, update);
}
static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_enable(ctx->phydev, flow);
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_disable(ctx->phydev, flow);
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_secy *secy = ctx->secy;
- if (ctx->prepare) {
- if (priv->secy)
- return -EEXIST;
-
- return 0;
- }
+ if (priv->secy)
+ return -EEXIST;
priv->secy = secy;
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_del_flow(ctx->phydev, flow);
static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
{
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
vsc8584_macsec_del_secy(ctx);
return vsc8584_macsec_add_secy(ctx);
}
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
if (flow->bank == MACSEC_INGR && flow->rx_sa &&
flow->rx_sa->sc->sci == ctx->rx_sc->sci)
static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_rxsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_txsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_txsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
EXPORT_SYMBOL_GPL(phy_duplex_to_str);
/**
+ * phy_rate_matching_to_str - Return a string describing the rate matching
+ *
+ * @rate_matching: Type of rate matching to describe
+ */
+const char *phy_rate_matching_to_str(int rate_matching)
+{
+ switch (rate_matching) {
+ case RATE_MATCH_NONE:
+ return "none";
+ case RATE_MATCH_PAUSE:
+ return "pause";
+ case RATE_MATCH_CRS:
+ return "crs";
+ case RATE_MATCH_OPEN_LOOP:
+ return "open-loop";
+ }
+ return "Unsupported (update phy-core.c)";
+}
+EXPORT_SYMBOL_GPL(phy_rate_matching_to_str);
+
+/**
* phy_interface_num_ports - Return the number of links that can be carried by
* a given MAC-PHY physical link. Returns 0 if this is
* unknown, the number of links else.
EXPORT_SYMBOL(phy_print_status);
/**
+ * phy_get_rate_matching - determine if rate matching is supported
+ * @phydev: The phy device to return rate matching for
+ * @iface: The interface mode to use
+ *
+ * This determines the type of rate matching (if any) that @phy supports
+ * using @iface. @iface may be %PHY_INTERFACE_MODE_NA to determine if any
+ * interface supports rate matching.
+ *
+ * Return: The type of rate matching @phy supports for @iface, or
+ * %RATE_MATCH_NONE.
+ */
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int ret = RATE_MATCH_NONE;
+
+ if (phydev->drv->get_rate_matching) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_rate_matching(phydev, iface);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_get_rate_matching);
+
+/**
* phy_config_interrupt - configure the PHY device for the requested interrupts
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
cmd->base.duplex = phydev->duplex;
cmd->base.master_slave_cfg = phydev->master_slave_get;
cmd->base.master_slave_state = phydev->master_slave_state;
+ cmd->base.rate_matching = phydev->rate_matching;
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
-static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
- unsigned long caps)
+/**
+ * phylink_interface_max_speed() - get the maximum speed of a phy interface
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ *
+ * Determine the maximum speed of a phy interface. This is intended to help
+ * determine the correct speed to pass to the MAC when the phy is performing
+ * rate matching.
+ *
+ * Return: The maximum speed of @interface
+ */
+static int phylink_interface_max_speed(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ return SPEED_100;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ return SPEED_1000;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return SPEED_2500;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ return SPEED_5000;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return SPEED_10000;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ return SPEED_25000;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ return SPEED_40000;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ /* No idea! Garbage in, unknown out */
+ return SPEED_UNKNOWN;
+ }
+
+ /* If we get here, someone forgot to add an interface mode above */
+ WARN_ON_ONCE(1);
+ return SPEED_UNKNOWN;
+}
+
+/**
+ * phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @caps: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that are
+ * supported by the @caps. @linkmodes must have been initialised previously.
+ */
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps)
{
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
__set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
}
}
+EXPORT_SYMBOL_GPL(phylink_caps_to_linkmodes);
+
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF },
+};
/**
- * phylink_get_linkmodes() - get acceptable link modes
- * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * phylink_cap_from_speed_duplex - Get mac capability from speed/duplex
+ * @speed: the speed to search for
+ * @duplex: the duplex to search for
+ *
+ * Find the mac capability for a given speed and duplex.
+ *
+ * Return: A mask with the mac capability patching @speed and @duplex, or 0 if
+ * there were no matches.
+ */
+static unsigned long phylink_cap_from_speed_duplex(int speed,
+ unsigned int duplex)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++) {
+ if (speed == phylink_caps_params[i].speed &&
+ duplex == phylink_caps_params[i].duplex)
+ return phylink_caps_params[i].mask;
+ }
+
+ return 0;
+}
+
+/**
+ * phylink_get_capabilities() - get capabilities for a given MAC
* @interface: phy interface mode defined by &typedef phy_interface_t
* @mac_capabilities: bitmask of MAC capabilities
+ * @rate_matching: type of rate matching being performed
*
- * Set all possible pause, speed and duplex linkmodes in @linkmodes that
- * are supported by the @interface mode and @mac_capabilities. @linkmodes
- * must have been initialised previously.
+ * Get the MAC capabilities that are supported by the @interface mode and
+ * @mac_capabilities.
*/
-void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
- unsigned long mac_capabilities)
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching)
{
+ int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ unsigned long matched_caps = 0;
switch (interface) {
case PHY_INTERFACE_MODE_USXGMII:
break;
}
- phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+ switch (rate_matching) {
+ case RATE_MATCH_OPEN_LOOP:
+ /* TODO */
+ fallthrough;
+ case RATE_MATCH_NONE:
+ matched_caps = 0;
+ break;
+ case RATE_MATCH_PAUSE: {
+ /* The MAC must support asymmetric pause towards the local
+ * device for this. We could allow just symmetric pause, but
+ * then we might have to renegotiate if the link partner
+ * doesn't support pause. This is because there's no way to
+ * accept pause frames without transmitting them if we only
+ * support symmetric pause.
+ */
+ if (!(mac_capabilities & MAC_SYM_PAUSE) ||
+ !(mac_capabilities & MAC_ASYM_PAUSE))
+ break;
+
+ /* We can't adapt if the MAC doesn't support the interface's
+ * max speed at full duplex.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_FULL)) {
+ /* Although a duplex-matching phy might exist, we
+ * conservatively remove these modes because the MAC
+ * will not be aware of the half-duplex nature of the
+ * link.
+ */
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
+ }
+ break;
+ }
+ case RATE_MATCH_CRS:
+ /* The MAC must support half duplex at the interface's max
+ * speed.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_HALF)) {
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= mac_capabilities;
+ }
+ break;
+ }
+
+ return (caps & mac_capabilities) | matched_caps;
}
-EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+EXPORT_SYMBOL_GPL(phylink_get_capabilities);
/**
* phylink_generic_validate() - generic validate() callback implementation
struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ unsigned long caps;
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
- phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+ caps = phylink_get_capabilities(state->interface,
+ config->mac_capabilities,
+ state->rate_matching);
+ phylink_caps_to_linkmodes(mask, caps);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
const struct phylink_link_state *state)
{
phylink_dbg(pl,
- "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+ "%s: mode=%s/%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
__func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
+ phy_rate_matching_to_str(state->rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- if (state->an_enabled) {
+ state->rate_matching = pl->link_config.rate_matching;
+ if (state->an_enabled) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
+ int speed, duplex;
+ bool rx_pause;
+
+ speed = link_state.speed;
+ duplex = link_state.duplex;
+ rx_pause = !!(link_state.pause & MLO_PAUSE_RX);
+
+ switch (link_state.rate_matching) {
+ case RATE_MATCH_PAUSE:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will send
+ * pause frames to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_FULL;
+ rx_pause = true;
+ break;
+
+ case RATE_MATCH_CRS:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will cause
+ * collisions to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_HALF;
+ break;
+ }
pl->cur_interface = link_state.interface;
if (pl->pcs && pl->pcs->ops->pcs_link_up)
pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
- pl->cur_interface,
- link_state.speed, link_state.duplex);
+ pl->cur_interface, speed, duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev,
- pl->cur_link_an_mode, pl->cur_interface,
- link_state.speed, link_state.duplex,
- !!(link_state.pause & MLO_PAUSE_TX),
- !!(link_state.pause & MLO_PAUSE_RX));
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->cur_interface, speed, duplex,
+ !!(link_state.pause & MLO_PAUSE_TX), rx_pause);
if (ndev)
netif_carrier_on(ndev);
}
link_state.interface = pl->phy_state.interface;
+ /* If we are doing rate matching, then the
+ * link speed/duplex comes from the PHY
+ */
+ if (pl->phy_state.rate_matching) {
+ link_state.rate_matching =
+ pl->phy_state.rate_matching;
+ link_state.speed = pl->phy_state.speed;
+ link_state.duplex =
+ pl->phy_state.duplex;
+ }
+
/* If we have a PHY, we need to update with
* the PHY flow control bits.
*/
mutex_lock(&pl->state_mutex);
pl->phy_state.speed = phydev->speed;
pl->phy_state.duplex = phydev->duplex;
+ pl->phy_state.rate_matching = phydev->rate_matching;
pl->phy_state.pause = MLO_PAUSE_NONE;
if (tx_pause)
pl->phy_state.pause |= MLO_PAUSE_TX;
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
+ phy_rate_matching_to_str(phydev->rate_matching),
phylink_pause_to_str(pl->phy_state.pause));
}
config.interface = PHY_INTERFACE_MODE_NA;
else
config.interface = interface;
+ config.rate_matching = phy_get_rate_matching(phy, config.interface);
ret = phylink_validate(pl, supported, &config);
if (ret) {
pl->phy_state.pause = MLO_PAUSE_NONE;
pl->phy_state.speed = SPEED_UNKNOWN;
pl->phy_state.duplex = DUPLEX_UNKNOWN;
+ pl->phy_state.rate_matching = RATE_MATCH_NONE;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
{
phylink_merge_link_mode(kset->link_modes.advertising, state->advertising);
linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising);
- kset->base.speed = state->speed;
- kset->base.duplex = state->duplex;
+ if (kset->base.rate_matching == RATE_MATCH_NONE) {
+ kset->base.speed = state->speed;
+ kset->base.duplex = state->duplex;
+ }
kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE :
AUTONEG_DISABLE;
}
* queue-N.
*/
if (num_queues == 1) {
- xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
+ xspath = kstrdup(dev->otherend, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM,
"reading ring references");
return -ENOMEM;
}
- strcpy(xspath, dev->otherend);
} else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kzalloc(xspathsize, GFP_KERNEL);
}
}
-
#define PHY_INIT_TIMEOUT 100000
#define PHY_FORCE_TIMEOUT 10
* @lp_advertising: Current link partner advertised linkmodes
* @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
* @autoneg: Flag autoneg being used
+ * @rate_matching: Current rate matching mode
* @link: Current link state
* @autoneg_complete: Flag auto negotiation of the link has completed
* @mdix: Current crossover
unsigned irq_suspended:1;
unsigned irq_rerun:1;
+ int rate_matching;
+
enum phy_state state;
u32 dev_flags;
*/
int (*get_features)(struct phy_device *phydev);
+ /**
+ * @get_rate_matching: Get the supported type of rate matching for a
+ * particular phy interface. This is used by phy consumers to determine
+ * whether to advertise lower-speed modes for that interface. It is
+ * assumed that if a rate matching mode is supported on an interface,
+ * then that interface's rate can be adapted to all slower link speeds
+ * supported by the phy. If iface is %PHY_INTERFACE_MODE_NA, and the phy
+ * supports any kind of rate matching for any interface, then it must
+ * return that rate matching mode (preferring %RATE_MATCH_PAUSE to
+ * %RATE_MATCH_CRS). If the interface is not supported, this should
+ * return %RATE_MATCH_NONE.
+ */
+ int (*get_rate_matching)(struct phy_device *phydev,
+ phy_interface_t iface);
+
/* PHY Power Management */
/** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
const char *phy_speed_to_str(int speed);
const char *phy_duplex_to_str(unsigned int duplex);
+const char *phy_rate_matching_to_str(int rate_matching);
int phy_interface_num_ports(phy_interface_t interface);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface);
void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+ /* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
+ * autonegotiation advertisement. They correspond to the PAUSE and
+ * ASM_DIR bits defined by 802.3, respectively.
+ *
+ * The following table lists the values of tx_pause and rx_pause which
+ * might be requested in mac_link_up. The exact values depend on either
+ * the results of autonegotation (if MLO_PAUSE_AN is set) or user
+ * configuration (if MLO_PAUSE_AN is not set).
+ *
+ * MAC_SYM_PAUSE MAC_ASYM_PAUSE MLO_PAUSE_AN tx_pause/rx_pause
+ * ============= ============== ============ ==================
+ * 0 0 0 0/0
+ * 0 0 1 0/0
+ * 0 1 0 0/0, 0/1, 1/0, 1/1
+ * 0 1 1 0/0, 1/0
+ * 1 0 0 0/0, 1/1
+ * 1 0 1 0/0, 1/1
+ * 1 1 0 0/0, 0/1, 1/0, 1/1
+ * 1 1 1 0/0, 0/1, 1/1
+ *
+ * If you set MAC_ASYM_PAUSE, the user may request any combination of
+ * tx_pause and rx_pause. You do not have to support these
+ * combinations.
+ *
+ * However, you should support combinations of tx_pause and rx_pause
+ * which might be the result of autonegotation. For example, don't set
+ * MAC_SYM_PAUSE unless your device can support tx_pause and rx_pause
+ * at the same time.
+ */
MAC_SYM_PAUSE = BIT(0),
MAC_ASYM_PAUSE = BIT(1),
MAC_10HD = BIT(2),
* @speed: link speed, one of the SPEED_* constants.
* @duplex: link duplex mode, one of DUPLEX_* constants.
* @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @rate_matching: rate matching being performed, one of the RATE_MATCH_*
+ * constants. If rate matching is taking place, then the speed/duplex of
+ * the medium link mode (@speed and @duplex) and the speed/duplex of the phy
+ * interface mode (@interface) are different.
* @link: true if the link is up.
* @an_enabled: true if autonegotiation is enabled/desired.
* @an_complete: true if autonegotiation has completed.
int speed;
int duplex;
int pause;
+ int rate_matching;
unsigned int link:1;
unsigned int an_enabled:1;
unsigned int an_complete:1;
phy_interface_t interface, int speed, int duplex);
#endif
-void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
- unsigned long mac_capabilities);
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps);
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching);
void phylink_generic_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state);
struct macsec_rx_sa_stats *rx_sa_stats;
struct macsec_dev_stats *dev_stats;
} stats;
-
- u8 prepare:1;
};
/**
#define MASTER_SLAVE_STATE_SLAVE 3
#define MASTER_SLAVE_STATE_ERR 4
+/* These are used to throttle the rate of data on the phy interface when the
+ * native speed of the interface is higher than the link speed. These should
+ * not be used for phy interfaces which natively support multiple speeds (e.g.
+ * MII or SGMII).
+ */
+/* No rate matching performed. */
+#define RATE_MATCH_NONE 0
+/* The phy sends pause frames to throttle the MAC. */
+#define RATE_MATCH_PAUSE 1
+/* The phy asserts CRS to prevent the MAC from transmitting. */
+#define RATE_MATCH_CRS 2
+/* The MAC is programmed with a sufficiently-large IPG. */
+#define RATE_MATCH_OPEN_LOOP 3
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
* reported consistently by PHYLIB. Read-only.
* @master_slave_cfg: Master/slave port mode.
* @master_slave_state: Master/slave port state.
+ * @rate_matching: Rate adaptation performed by the PHY
* @reserved: Reserved for future use; see the note on reserved space.
- * @reserved1: Reserved for future use; see the note on reserved space.
* @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
__u8 transceiver;
__u8 master_slave_cfg;
__u8 master_slave_state;
- __u8 reserved1[1];
+ __u8 rate_matching;
__u32 reserved[7];
__u32 link_mode_masks[];
/* layout of link_mode_masks fields:
ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */
ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */
ETHTOOL_A_LINKMODES_LANES, /* u32 */
+ ETHTOOL_A_LINKMODES_RATE_MATCHING, /* u8 */
/* add new constants above here */
__ETHTOOL_A_LINKMODES_CNT,
= __ETHTOOL_LINK_MODE_MASK_NU32;
link_ksettings.base.master_slave_cfg = MASTER_SLAVE_CFG_UNSUPPORTED;
link_ksettings.base.master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+ link_ksettings.base.rate_matching = RATE_MATCH_NONE;
return store_link_ksettings_for_user(useraddr, &link_ksettings);
}
+ nla_total_size(sizeof(u32)) /* LINKMODES_SPEED */
+ nla_total_size(sizeof(u32)) /* LINKMODES_LANES */
+ nla_total_size(sizeof(u8)) /* LINKMODES_DUPLEX */
+ + nla_total_size(sizeof(u8)) /* LINKMODES_RATE_MATCHING */
+ 0;
ret = ethnl_bitset_size(ksettings->link_modes.advertising,
ksettings->link_modes.supported,
lsettings->master_slave_state))
return -EMSGSIZE;
+ if (nla_put_u8(skb, ETHTOOL_A_LINKMODES_RATE_MATCHING,
+ lsettings->rate_matching))
+ return -EMSGSIZE;
+
return 0;
}
TEST_PROGS := \
bond-arp-interval-causes-panic.sh \
bond-break-lacpdu-tx.sh \
+ bond-lladdr-target.sh \
dev_addr_lists.sh
TEST_FILES := lag_lib.sh