1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_matchall.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
11 static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 struct tc_cls_matchall_offload *cls)
14 struct netlink_ext_ack *extack = cls->common.extack;
15 struct flow_action *actions = &cls->rule->action;
16 struct port_info *pi = netdev2pinfo(dev);
17 struct flow_action_entry *entry;
18 struct ch_sched_queue qe;
19 struct sched_class *e;
24 if (!flow_action_has_entries(actions)) {
25 NL_SET_ERR_MSG_MOD(extack,
26 "Egress MATCHALL offload needs at least 1 policing action");
28 } else if (!flow_offload_has_one_action(actions)) {
29 NL_SET_ERR_MSG_MOD(extack,
30 "Egress MATCHALL offload only supports 1 policing action");
32 } else if (pi->tc_block_shared) {
33 NL_SET_ERR_MSG_MOD(extack,
34 "Egress MATCHALL offload not supported with shared blocks");
38 ret = t4_get_link_params(pi, NULL, &speed, NULL);
40 NL_SET_ERR_MSG_MOD(extack,
41 "Failed to get max speed supported by the link");
45 /* Convert from Mbps to bps */
46 max_link_rate = (u64)speed * 1000 * 1000;
48 flow_action_for_each(i, entry, actions) {
50 case FLOW_ACTION_POLICE:
51 if (entry->police.rate_pkt_ps) {
52 NL_SET_ERR_MSG_MOD(extack,
53 "QoS offload not support packets per second");
56 /* Convert bytes per second to bits per second */
57 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
58 NL_SET_ERR_MSG_MOD(extack,
59 "Specified policing max rate is larger than underlying link speed");
64 NL_SET_ERR_MSG_MOD(extack,
65 "Only policing action supported with Egress MATCHALL offload");
70 for (i = 0; i < pi->nqsets; i++) {
71 memset(&qe, 0, sizeof(qe));
74 e = cxgb4_sched_queue_lookup(dev, &qe);
75 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
76 NL_SET_ERR_MSG_MOD(extack,
77 "Some queues are already bound to different class");
85 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
87 struct port_info *pi = netdev2pinfo(dev);
88 struct ch_sched_queue qe;
92 for (i = 0; i < pi->nqsets; i++) {
95 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
105 qe.class = SCHED_CLS_NONE;
106 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
112 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
114 struct port_info *pi = netdev2pinfo(dev);
115 struct ch_sched_queue qe;
118 for (i = 0; i < pi->nqsets; i++) {
120 qe.class = SCHED_CLS_NONE;
121 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
125 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
126 struct tc_cls_matchall_offload *cls)
128 struct ch_sched_params p = {
129 .type = SCHED_CLASS_TYPE_PACKET,
130 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
131 .u.params.mode = SCHED_CLASS_MODE_CLASS,
132 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
133 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
134 .u.params.class = SCHED_CLS_NONE,
135 .u.params.minrate = 0,
136 .u.params.weight = 0,
137 .u.params.pktsize = dev->mtu,
139 struct netlink_ext_ack *extack = cls->common.extack;
140 struct cxgb4_tc_port_matchall *tc_port_matchall;
141 struct port_info *pi = netdev2pinfo(dev);
142 struct adapter *adap = netdev2adap(dev);
143 struct flow_action_entry *entry;
144 struct sched_class *e;
148 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
150 flow_action_for_each(i, entry, &cls->rule->action)
151 if (entry->id == FLOW_ACTION_POLICE)
153 if (entry->police.rate_pkt_ps) {
154 NL_SET_ERR_MSG_MOD(extack,
155 "QoS offload not support packets per second");
158 /* Convert from bytes per second to Kbps */
159 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
160 p.u.params.channel = pi->tx_chan;
161 e = cxgb4_sched_class_alloc(dev, &p);
163 NL_SET_ERR_MSG_MOD(extack,
164 "No free traffic class available for policing action");
168 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
170 NL_SET_ERR_MSG_MOD(extack,
171 "Could not bind queues to traffic class");
175 tc_port_matchall->egress.hwtc = e->idx;
176 tc_port_matchall->egress.cookie = cls->cookie;
177 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
181 cxgb4_sched_class_free(dev, e->idx);
185 static void cxgb4_matchall_free_tc(struct net_device *dev)
187 struct cxgb4_tc_port_matchall *tc_port_matchall;
188 struct port_info *pi = netdev2pinfo(dev);
189 struct adapter *adap = netdev2adap(dev);
191 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
192 cxgb4_matchall_tc_unbind_queues(dev);
193 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
195 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
196 tc_port_matchall->egress.cookie = 0;
197 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
200 static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
201 struct tc_cls_matchall_offload *cls)
203 struct netlink_ext_ack *extack = cls->common.extack;
204 struct cxgb4_tc_port_matchall *tc_port_matchall;
205 struct port_info *pi = netdev2pinfo(dev);
206 struct adapter *adap = netdev2adap(dev);
207 struct flow_action_entry *act;
211 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
212 flow_action_for_each(i, act, &cls->rule->action) {
213 if (act->id == FLOW_ACTION_MIRRED) {
214 ret = cxgb4_port_mirror_alloc(dev);
216 NL_SET_ERR_MSG_MOD(extack,
217 "Couldn't allocate mirror");
221 tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
229 static void cxgb4_matchall_mirror_free(struct net_device *dev)
231 struct cxgb4_tc_port_matchall *tc_port_matchall;
232 struct port_info *pi = netdev2pinfo(dev);
233 struct adapter *adap = netdev2adap(dev);
235 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
236 if (!tc_port_matchall->ingress.viid_mirror)
239 cxgb4_port_mirror_free(dev);
240 tc_port_matchall->ingress.viid_mirror = 0;
243 static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
245 struct cxgb4_tc_port_matchall *tc_port_matchall;
246 struct port_info *pi = netdev2pinfo(dev);
247 struct adapter *adap = netdev2adap(dev);
250 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
252 &tc_port_matchall->ingress.fs[filter_type]);
256 tc_port_matchall->ingress.tid[filter_type] = 0;
260 static int cxgb4_matchall_add_filter(struct net_device *dev,
261 struct tc_cls_matchall_offload *cls,
264 struct netlink_ext_ack *extack = cls->common.extack;
265 struct cxgb4_tc_port_matchall *tc_port_matchall;
266 struct port_info *pi = netdev2pinfo(dev);
267 struct adapter *adap = netdev2adap(dev);
268 struct ch_filter_specification *fs;
271 /* Get a free filter entry TID, where we can insert this new
272 * rule. Only insert rule if its prio doesn't conflict with
275 fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
276 false, cls->common.prio);
278 NL_SET_ERR_MSG_MOD(extack,
279 "No free LETCAM index available");
283 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
284 fs = &tc_port_matchall->ingress.fs[filter_type];
285 memset(fs, 0, sizeof(*fs));
287 if (fidx < adap->tids.nhpftids)
289 fs->tc_prio = cls->common.prio;
290 fs->tc_cookie = cls->cookie;
291 fs->type = filter_type;
294 fs->val.pfvf_vld = 1;
295 fs->val.pf = adap->pf;
296 fs->val.vf = pi->vin;
298 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
300 ret = cxgb4_set_filter(dev, fidx, fs);
304 tc_port_matchall->ingress.tid[filter_type] = fidx;
308 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
309 struct tc_cls_matchall_offload *cls)
311 struct cxgb4_tc_port_matchall *tc_port_matchall;
312 struct port_info *pi = netdev2pinfo(dev);
313 struct adapter *adap = netdev2adap(dev);
316 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
318 ret = cxgb4_matchall_mirror_alloc(dev, cls);
322 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
323 ret = cxgb4_matchall_add_filter(dev, cls, i);
328 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
333 cxgb4_matchall_del_filter(dev, i);
335 cxgb4_matchall_mirror_free(dev);
339 static int cxgb4_matchall_free_filter(struct net_device *dev)
341 struct cxgb4_tc_port_matchall *tc_port_matchall;
342 struct port_info *pi = netdev2pinfo(dev);
343 struct adapter *adap = netdev2adap(dev);
347 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
349 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
350 ret = cxgb4_matchall_del_filter(dev, i);
355 cxgb4_matchall_mirror_free(dev);
357 tc_port_matchall->ingress.packets = 0;
358 tc_port_matchall->ingress.bytes = 0;
359 tc_port_matchall->ingress.last_used = 0;
360 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
364 int cxgb4_tc_matchall_replace(struct net_device *dev,
365 struct tc_cls_matchall_offload *cls_matchall,
368 struct netlink_ext_ack *extack = cls_matchall->common.extack;
369 struct cxgb4_tc_port_matchall *tc_port_matchall;
370 struct port_info *pi = netdev2pinfo(dev);
371 struct adapter *adap = netdev2adap(dev);
374 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
376 if (tc_port_matchall->ingress.state ==
377 CXGB4_MATCHALL_STATE_ENABLED) {
378 NL_SET_ERR_MSG_MOD(extack,
379 "Only 1 Ingress MATCHALL can be offloaded");
383 ret = cxgb4_validate_flow_actions(dev,
384 &cls_matchall->rule->action,
389 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
392 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
393 NL_SET_ERR_MSG_MOD(extack,
394 "Only 1 Egress MATCHALL can be offloaded");
398 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
402 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
405 int cxgb4_tc_matchall_destroy(struct net_device *dev,
406 struct tc_cls_matchall_offload *cls_matchall,
409 struct cxgb4_tc_port_matchall *tc_port_matchall;
410 struct port_info *pi = netdev2pinfo(dev);
411 struct adapter *adap = netdev2adap(dev);
413 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
415 /* All the filter types of this matchall rule save the
416 * same cookie. So, checking for the first one is
419 if (cls_matchall->cookie !=
420 tc_port_matchall->ingress.fs[0].tc_cookie)
423 return cxgb4_matchall_free_filter(dev);
426 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
429 cxgb4_matchall_free_tc(dev);
433 int cxgb4_tc_matchall_stats(struct net_device *dev,
434 struct tc_cls_matchall_offload *cls_matchall)
436 u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
437 struct cxgb4_tc_port_matchall *tc_port_matchall;
438 struct cxgb4_matchall_ingress_entry *ingress;
439 struct port_info *pi = netdev2pinfo(dev);
440 struct adapter *adap = netdev2adap(dev);
444 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
445 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
448 ingress = &tc_port_matchall->ingress;
449 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
450 ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
451 &tmp_packets, &tmp_bytes,
452 ingress->fs[i].hash);
456 packets += tmp_packets;
460 if (tc_port_matchall->ingress.packets != packets) {
461 flow_stats_update(&cls_matchall->stats,
462 bytes - tc_port_matchall->ingress.bytes,
463 packets - tc_port_matchall->ingress.packets,
464 0, tc_port_matchall->ingress.last_used,
465 FLOW_ACTION_HW_STATS_IMMEDIATE);
467 tc_port_matchall->ingress.packets = packets;
468 tc_port_matchall->ingress.bytes = bytes;
469 tc_port_matchall->ingress.last_used = jiffies;
475 static void cxgb4_matchall_disable_offload(struct net_device *dev)
477 struct cxgb4_tc_port_matchall *tc_port_matchall;
478 struct port_info *pi = netdev2pinfo(dev);
479 struct adapter *adap = netdev2adap(dev);
481 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
482 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
483 cxgb4_matchall_free_tc(dev);
485 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
486 cxgb4_matchall_free_filter(dev);
489 int cxgb4_init_tc_matchall(struct adapter *adap)
491 struct cxgb4_tc_port_matchall *tc_port_matchall;
492 struct cxgb4_tc_matchall *tc_matchall;
495 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
499 tc_port_matchall = kcalloc(adap->params.nports,
500 sizeof(*tc_port_matchall),
502 if (!tc_port_matchall) {
504 goto out_free_matchall;
507 tc_matchall->port_matchall = tc_port_matchall;
508 adap->tc_matchall = tc_matchall;
516 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
520 if (adap->tc_matchall) {
521 if (adap->tc_matchall->port_matchall) {
522 for (i = 0; i < adap->params.nports; i++) {
523 struct net_device *dev = adap->port[i];
526 cxgb4_matchall_disable_offload(dev);
528 kfree(adap->tc_matchall->port_matchall);
530 kfree(adap->tc_matchall);