Add support for offloading ets qdisc to sparx5 switch.
The ets qdisc makes it possible to configure a mix og strict and
bandwidth-sharing bands. The ets qdisc must be attached as a root qdisc.
Signed-off-by: Daniel Machon <daniel.machon@microchip.com>
Signed-off-by: Steen Hegelund <steen.hegelund@microchip.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+
+#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
+#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
+#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
+
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+
/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
return 0;
}
+static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
+{
+ return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
+ 1;
+}
+
+static int sparx5_dwrr_conf_set(struct sparx5_port *port,
+ struct sparx5_dwrr *dwrr)
+{
+ int i;
+
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ port->sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Number of *lower* indexes that are arbitrated dwrr */
+ spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
+ HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
+ HSCH_SE_CFG(port->portno));
+
+ for (i = 0; i < dwrr->count; i++) {
+ spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
+ HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
+ HSCH_DWRR_ENTRY(i));
+ }
+
+ return 0;
+}
+
static int sparx5_leak_groups_init(struct sparx5 *sparx5)
{
struct sparx5_layer *layer;
return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
}
+
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params)
+{
+ struct sparx5_dwrr dwrr = {0};
+ /* Minimum weight for each iteration */
+ unsigned int w_min = 100;
+ int i;
+
+ /* Find minimum weight for all dwrr bands */
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ if (params->quanta[i] == 0)
+ continue;
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ /* Strict band; skip */
+ if (params->quanta[i] == 0)
+ continue;
+
+ dwrr.count++;
+
+ /* On the sparx5, bands with higher indexes are preferred and
+ * arbitrated strict. Strict bands are put in the lower indexes,
+ * by tc, so we reverse the bands here.
+ *
+ * Also convert the weight to something the hardware
+ * understands.
+ */
+ dwrr.cost[SPX5_PRIOS - i - 1] =
+ sparx5_weight_to_hw_cost(w_min, params->weights[i]);
+ }
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
+
+int sparx5_tc_ets_del(struct sparx5_port *port)
+{
+ struct sparx5_dwrr dwrr = {0};
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
#define SPX5_SE_BURST_MIN 1
#define SPX5_SE_BURST_UNIT 4096
+/* Dwrr */
+#define SPX5_DWRR_COST_MAX 63
+
struct sparx5_shaper {
u32 mode;
u32 rate;
struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
};
+struct sparx5_dwrr {
+ u32 count; /* Number of inputs running dwrr */
+ u8 cost[SPX5_PRIOS];
+};
+
int sparx5_qos_init(struct sparx5 *sparx5);
/* Multi-Queue Priority */
u32 layer, u32 idx);
int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
+/* Enhanced Transmission Selection */
+struct tc_ets_qopt_offload_replace_params;
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params);
+
+int sparx5_tc_ets_del(struct sparx5_port *port);
+
#endif /* __SPARX5_QOS_H__ */
return -EOPNOTSUPP;
}
+static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params =
+ &qopt->replace_params;
+ struct sparx5_port *port = netdev_priv(ndev);
+ int i;
+
+ /* Only allow ets on ports */
+ if (qopt->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+
+ /* We support eight priorities */
+ if (params->bands != SPX5_PRIOS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checks */
+ for (i = 0; i < SPX5_PRIOS; ++i) {
+ /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
+ if (params->priomap[i] != (7 - i))
+ return -EOPNOTSUPP;
+ /* Throw an error if we receive zero weights by tc */
+ if (params->quanta[i] && params->weights[i] == 0) {
+ pr_err("Invalid ets configuration; band %d has weight zero",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ sparx5_tc_ets_add(port, params);
+ break;
+ case TC_ETS_DESTROY:
+
+ sparx5_tc_ets_del(port);
+
+ break;
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TBF:
return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return sparx5_tc_setup_qdisc_ets(ndev, type_data);
default:
return -EOPNOTSUPP;
}