1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Driver for Microsemi VSC85xx PHYs - MACsec support
5 * Author: Antoine Tenart
6 * License: Dual MIT/GPL
7 * Copyright (c) 2020 Microsemi Corporation
10 #include <linux/phy.h>
11 #include <dt-bindings/net/mscc-phy-vsc8531.h>
13 #include <crypto/aes.h>
15 #include <net/macsec.h>
19 #include "mscc_macsec.h"
20 #include "mscc_fc_buffer.h"
22 static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
23 enum macsec_bank bank, u32 reg)
25 u32 val, val_l = 0, val_h = 0;
26 unsigned long deadline;
29 rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
33 __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
34 MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
37 /* non-MACsec access */
42 __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
43 MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
44 MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
45 MSCC_PHY_MACSEC_19_TARGET(bank));
47 deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
49 val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
50 } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
52 val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
53 val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
56 phy_restore_page(phydev, rc, rc);
58 return (val_h << 16) | val_l;
61 static void vsc8584_macsec_phy_write(struct phy_device *phydev,
62 enum macsec_bank bank, u32 reg, u32 val)
64 unsigned long deadline;
67 rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
71 __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
72 MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
74 if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
80 __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
81 __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
83 __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
84 MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
85 MSCC_PHY_MACSEC_19_TARGET(bank));
87 deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
89 val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
90 } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
93 phy_restore_page(phydev, rc, rc);
96 static void vsc8584_macsec_classification(struct phy_device *phydev,
97 enum macsec_bank bank)
99 /* enable VLAN tag parsing */
100 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
101 MSCC_MS_SAM_CP_TAG_PARSE_STAG |
102 MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
103 MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
106 static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
107 enum macsec_bank bank,
110 u32 port = (bank == MACSEC_INGR) ?
111 MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
112 u32 action = MSCC_MS_FLOW_BYPASS;
115 action = MSCC_MS_FLOW_DROP;
117 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
118 /* MACsec untagged */
119 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
120 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
121 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
123 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
124 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
125 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
127 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
128 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
129 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
131 MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
132 MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
133 MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
134 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
135 /* MACsec untagged */
136 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
137 MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
138 MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
140 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
141 MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
142 MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
144 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
145 MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
146 MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
148 MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
149 MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
150 MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
153 static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
154 enum macsec_bank bank)
158 if (bank != MACSEC_INGR)
161 /* Set default rules to pass unmatched frames */
162 val = vsc8584_macsec_phy_read(phydev, bank,
163 MSCC_MS_PARAMS2_IG_CC_CONTROL);
164 val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
165 MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
166 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
169 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
170 MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
171 MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
172 MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
175 static void vsc8584_macsec_block_init(struct phy_device *phydev,
176 enum macsec_bank bank)
181 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
182 MSCC_MS_ENA_CFG_SW_RST |
183 MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
185 /* Set the MACsec block out of s/w reset and enable clocks */
186 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
187 MSCC_MS_ENA_CFG_CLK_ENA);
189 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
190 bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
191 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
192 MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
193 MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
195 /* Clear the counters */
196 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
197 val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
198 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
200 /* Enable octet increment mode */
201 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
202 MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
204 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
206 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
207 val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
208 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
211 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
212 MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
213 MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
215 for (i = 0; i < 8; i++)
216 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
217 MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
218 MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
220 if (bank == MACSEC_EGR) {
221 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
222 val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
223 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
225 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
226 MSCC_MS_FC_CFG_FCBUF_ENA |
227 MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
228 MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
229 MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
230 MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
233 vsc8584_macsec_classification(phydev, bank);
234 vsc8584_macsec_flow_default_action(phydev, bank, false);
235 vsc8584_macsec_integrity_checks(phydev, bank);
237 /* Enable the MACsec block */
238 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
239 MSCC_MS_ENA_CFG_CLK_ENA |
240 MSCC_MS_ENA_CFG_MACSEC_ENA |
241 MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
244 static void vsc8584_macsec_mac_init(struct phy_device *phydev,
245 enum macsec_bank bank)
250 /* Clear host & line stats */
251 for (i = 0; i < 36; i++)
252 vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
254 val = vsc8584_macsec_phy_read(phydev, bank,
255 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
256 val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
257 val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
258 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
259 vsc8584_macsec_phy_write(phydev, bank,
260 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
262 val = vsc8584_macsec_phy_read(phydev, bank,
263 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
265 vsc8584_macsec_phy_write(phydev, bank,
266 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
268 val = vsc8584_macsec_phy_read(phydev, bank,
269 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
270 if (bank == HOST_MAC)
271 val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
272 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
274 val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
275 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
276 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
277 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
278 vsc8584_macsec_phy_write(phydev, bank,
279 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
281 vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
282 MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
283 MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
284 MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
285 MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
286 MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
288 MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0) |
289 (IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ?
290 MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(0x8) : 0));
292 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
293 val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
294 vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
296 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
297 val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
298 val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
299 vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
301 vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
302 MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
303 MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
304 MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
305 MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
307 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
308 val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
309 vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
311 vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
312 MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
313 MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
314 MSCC_MAC_CFG_ENA_CFG_RX_ENA |
315 MSCC_MAC_CFG_ENA_CFG_TX_ENA);
318 /* Must be called with mdio_lock taken */
319 static int __vsc8584_macsec_init(struct phy_device *phydev)
321 struct vsc8531_private *priv = phydev->priv;
322 enum macsec_bank proc_bank;
325 vsc8584_macsec_block_init(phydev, MACSEC_INGR);
326 vsc8584_macsec_block_init(phydev, MACSEC_EGR);
327 vsc8584_macsec_mac_init(phydev, HOST_MAC);
328 vsc8584_macsec_mac_init(phydev, LINE_MAC);
330 vsc8584_macsec_phy_write(phydev, FC_BUFFER,
331 MSCC_FCBUF_FC_READ_THRESH_CFG,
332 MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
333 MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
335 val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
336 val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
337 MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
338 MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
339 vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
341 vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
342 MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
343 MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
345 val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
346 MSCC_FCBUF_TX_DATA_QUEUE_CFG);
347 val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
348 MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
349 val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
350 MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
351 vsc8584_macsec_phy_write(phydev, FC_BUFFER,
352 MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
354 val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
355 val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
356 vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
358 proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2;
360 val = vsc8584_macsec_phy_read(phydev, proc_bank,
361 MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL);
362 val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
363 val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
364 vsc8584_macsec_phy_write(phydev, proc_bank,
365 MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
370 static void vsc8584_macsec_flow(struct phy_device *phydev,
371 struct macsec_flow *flow)
373 struct vsc8531_private *priv = phydev->priv;
374 enum macsec_bank bank = flow->bank;
375 u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
377 if (flow->match.tagged)
378 match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
379 if (flow->match.untagged)
380 match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
382 if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
383 match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
384 mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
387 if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
388 u64 sci = (__force u64)flow->rx_sa->sc->sci;
390 match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
391 mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
392 MSCC_MS_SAM_MASK_SCI_MASK;
394 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
396 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
400 if (flow->match.etype) {
401 mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
403 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
404 MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE((__force u32)htons(flow->etype)));
407 match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
409 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
410 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
412 /* Action for matching packets */
413 if (flow->action.drop)
414 action = MSCC_MS_FLOW_DROP;
415 else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
416 action = MSCC_MS_FLOW_BYPASS;
418 action = (bank == MACSEC_INGR) ?
419 MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
421 val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
422 MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
423 MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
425 if (action == MSCC_MS_FLOW_BYPASS)
428 if (bank == MACSEC_INGR) {
429 if (priv->secy->replay_protect)
430 val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
431 if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
432 val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
433 else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
434 val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
435 } else if (bank == MACSEC_EGR) {
436 if (priv->secy->protect_frames)
437 val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
438 if (priv->secy->tx_sc.encrypt)
439 val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
440 if (priv->secy->tx_sc.send_sci)
441 val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
445 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
448 static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
449 enum macsec_bank bank)
451 struct vsc8531_private *priv = ctx->phydev->priv;
452 struct macsec_flow *pos, *tmp;
454 list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
455 if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
458 return ERR_PTR(-ENOENT);
461 static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
462 struct macsec_flow *flow)
464 enum macsec_bank bank = flow->bank;
465 u32 val, idx = flow->index;
467 if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
468 (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
472 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
475 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
476 val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
477 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
480 static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
481 struct macsec_flow *flow)
483 enum macsec_bank bank = flow->bank;
484 u32 val, idx = flow->index;
487 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
490 val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
491 val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
492 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
495 static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
497 if (flow->bank == MACSEC_INGR)
498 return flow->index + MSCC_MS_MAX_FLOWS;
503 /* Derive the AES key to get a key for the hash autentication */
504 static int vsc8584_macsec_derive_key(const u8 *key, u16 key_len, u8 hkey[16])
506 const u8 input[AES_BLOCK_SIZE] = {0};
507 struct crypto_aes_ctx ctx;
510 ret = aes_expandkey(&ctx, key, key_len);
514 aes_encrypt(&ctx, hkey, input);
515 memzero_explicit(&ctx, sizeof(ctx));
519 static int vsc8584_macsec_transformation(struct phy_device *phydev,
520 struct macsec_flow *flow,
523 struct vsc8531_private *priv = phydev->priv;
524 enum macsec_bank bank = flow->bank;
525 int i, ret, index = flow->index;
526 u32 rec = 0, control = 0;
530 ret = vsc8584_macsec_derive_key(key, priv->secy->key_len, hkey);
534 switch (priv->secy->key_len) {
536 control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
539 control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
545 control |= (bank == MACSEC_EGR) ?
546 (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
547 (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
549 control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
550 CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
551 CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
552 CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
554 /* Set the control word */
555 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
558 /* Set the context ID. Must be unique. */
559 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
560 vsc8584_macsec_flow_context_id(flow));
562 /* Set the encryption/decryption key */
563 for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
564 vsc8584_macsec_phy_write(phydev, bank,
565 MSCC_MS_XFORM_REC(index, rec++),
568 /* Set the authentication key */
569 for (i = 0; i < 4; i++)
570 vsc8584_macsec_phy_write(phydev, bank,
571 MSCC_MS_XFORM_REC(index, rec++),
574 /* Initial sequence number */
575 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
576 bank == MACSEC_INGR ?
577 flow->rx_sa->next_pn : flow->tx_sa->next_pn);
579 if (bank == MACSEC_INGR)
580 /* Set the mask (replay window size) */
581 vsc8584_macsec_phy_write(phydev, bank,
582 MSCC_MS_XFORM_REC(index, rec++),
583 priv->secy->replay_window);
585 /* Set the input vectors */
586 sci = (__force u64)(bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci);
587 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
589 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
593 vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
596 flow->has_transformation = true;
600 static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
601 enum macsec_bank bank)
603 unsigned long *bitmap = bank == MACSEC_INGR ?
604 &priv->ingr_flows : &priv->egr_flows;
605 struct macsec_flow *flow;
608 index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
610 if (index == MSCC_MS_MAX_FLOWS)
611 return ERR_PTR(-ENOMEM);
613 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
615 return ERR_PTR(-ENOMEM);
617 set_bit(index, bitmap);
621 flow->assoc_num = -1;
623 list_add_tail(&flow->list, &priv->macsec_flows);
627 static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
628 struct macsec_flow *flow)
630 unsigned long *bitmap = flow->bank == MACSEC_INGR ?
631 &priv->ingr_flows : &priv->egr_flows;
633 list_del(&flow->list);
634 clear_bit(flow->index, bitmap);
638 static void vsc8584_macsec_add_flow(struct phy_device *phydev,
639 struct macsec_flow *flow)
641 flow->port = MSCC_MS_PORT_CONTROLLED;
642 vsc8584_macsec_flow(phydev, flow);
645 static int vsc8584_macsec_default_flows(struct phy_device *phydev)
647 struct macsec_flow *flow;
649 /* Add a rule to let the MKA traffic go through, ingress */
650 flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
652 return PTR_ERR(flow);
655 flow->port = MSCC_MS_PORT_UNCONTROLLED;
656 flow->match.tagged = 1;
657 flow->match.untagged = 1;
658 flow->match.etype = 1;
659 flow->etype = ETH_P_PAE;
660 flow->action.bypass = 1;
662 vsc8584_macsec_flow(phydev, flow);
663 vsc8584_macsec_flow_enable(phydev, flow);
665 /* Add a rule to let the MKA traffic go through, egress */
666 flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
668 return PTR_ERR(flow);
671 flow->port = MSCC_MS_PORT_COMMON;
672 flow->match.untagged = 1;
673 flow->match.etype = 1;
674 flow->etype = ETH_P_PAE;
675 flow->action.bypass = 1;
677 vsc8584_macsec_flow(phydev, flow);
678 vsc8584_macsec_flow_enable(phydev, flow);
683 static void vsc8584_macsec_del_flow(struct phy_device *phydev,
684 struct macsec_flow *flow)
686 vsc8584_macsec_flow_disable(phydev, flow);
687 vsc8584_macsec_free_flow(phydev->priv, flow);
690 static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
691 struct macsec_flow *flow, bool update)
693 struct phy_device *phydev = ctx->phydev;
694 struct vsc8531_private *priv = phydev->priv;
697 flow->assoc_num = ctx->sa.assoc_num;
698 flow->rx_sa = ctx->sa.rx_sa;
700 /* Always match tagged packets on ingress */
701 flow->match.tagged = 1;
704 if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
705 flow->match.untagged = 1;
707 vsc8584_macsec_add_flow(phydev, flow);
712 ret = vsc8584_macsec_transformation(phydev, flow, ctx->sa.key);
714 vsc8584_macsec_free_flow(phydev->priv, flow);
719 static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
720 struct macsec_flow *flow, bool update)
724 flow->assoc_num = ctx->sa.assoc_num;
725 flow->tx_sa = ctx->sa.tx_sa;
727 /* Always match untagged packets on egress */
728 flow->match.untagged = 1;
730 vsc8584_macsec_add_flow(ctx->phydev, flow);
735 ret = vsc8584_macsec_transformation(ctx->phydev, flow, ctx->sa.key);
737 vsc8584_macsec_free_flow(ctx->phydev->priv, flow);
742 static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
744 struct vsc8531_private *priv = ctx->phydev->priv;
745 struct macsec_flow *flow, *tmp;
747 list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
748 vsc8584_macsec_flow_enable(ctx->phydev, flow);
753 static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
755 struct vsc8531_private *priv = ctx->phydev->priv;
756 struct macsec_flow *flow, *tmp;
758 list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
759 vsc8584_macsec_flow_disable(ctx->phydev, flow);
764 static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
766 struct vsc8531_private *priv = ctx->phydev->priv;
767 struct macsec_secy *secy = ctx->secy;
774 vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
775 secy->validate_frames != MACSEC_VALIDATE_DISABLED);
776 vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
777 secy->validate_frames != MACSEC_VALIDATE_DISABLED);
779 return vsc8584_macsec_default_flows(ctx->phydev);
782 static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
784 struct vsc8531_private *priv = ctx->phydev->priv;
785 struct macsec_flow *flow, *tmp;
787 list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
788 vsc8584_macsec_del_flow(ctx->phydev, flow);
790 vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
791 vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
797 static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
799 vsc8584_macsec_del_secy(ctx);
800 return vsc8584_macsec_add_secy(ctx);
803 static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
809 static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
814 static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
816 struct vsc8531_private *priv = ctx->phydev->priv;
817 struct macsec_flow *flow, *tmp;
819 list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
820 if (flow->bank == MACSEC_INGR && flow->rx_sa &&
821 flow->rx_sa->sc->sci == ctx->rx_sc->sci)
822 vsc8584_macsec_del_flow(ctx->phydev, flow);
828 static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
830 struct phy_device *phydev = ctx->phydev;
831 struct vsc8531_private *priv = phydev->priv;
832 struct macsec_flow *flow;
835 flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
837 return PTR_ERR(flow);
839 ret = __vsc8584_macsec_add_rxsa(ctx, flow, false);
843 vsc8584_macsec_flow_enable(phydev, flow);
847 static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
849 struct macsec_flow *flow;
852 if (ctx->sa.update_pn)
855 flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
857 return PTR_ERR(flow);
859 /* Make sure the flow is disabled before updating it */
860 vsc8584_macsec_flow_disable(ctx->phydev, flow);
862 ret = __vsc8584_macsec_add_rxsa(ctx, flow, true);
866 vsc8584_macsec_flow_enable(ctx->phydev, flow);
870 static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
872 struct macsec_flow *flow;
874 flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
876 return PTR_ERR(flow);
878 vsc8584_macsec_del_flow(ctx->phydev, flow);
882 static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
884 struct phy_device *phydev = ctx->phydev;
885 struct vsc8531_private *priv = phydev->priv;
886 struct macsec_flow *flow;
889 flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
891 return PTR_ERR(flow);
893 ret = __vsc8584_macsec_add_txsa(ctx, flow, false);
897 vsc8584_macsec_flow_enable(phydev, flow);
901 static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
903 struct macsec_flow *flow;
906 if (ctx->sa.update_pn)
909 flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
911 return PTR_ERR(flow);
913 /* Make sure the flow is disabled before updating it */
914 vsc8584_macsec_flow_disable(ctx->phydev, flow);
916 ret = __vsc8584_macsec_add_txsa(ctx, flow, true);
920 vsc8584_macsec_flow_enable(ctx->phydev, flow);
924 static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
926 struct macsec_flow *flow;
928 flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
930 return PTR_ERR(flow);
932 vsc8584_macsec_del_flow(ctx->phydev, flow);
936 static const struct macsec_ops vsc8584_macsec_ops = {
937 .mdo_dev_open = vsc8584_macsec_dev_open,
938 .mdo_dev_stop = vsc8584_macsec_dev_stop,
939 .mdo_add_secy = vsc8584_macsec_add_secy,
940 .mdo_upd_secy = vsc8584_macsec_upd_secy,
941 .mdo_del_secy = vsc8584_macsec_del_secy,
942 .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
943 .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
944 .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
945 .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
946 .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
947 .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
948 .mdo_add_txsa = vsc8584_macsec_add_txsa,
949 .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
950 .mdo_del_txsa = vsc8584_macsec_del_txsa,
953 int vsc8584_macsec_init(struct phy_device *phydev)
955 struct vsc8531_private *vsc8531 = phydev->priv;
957 switch (phydev->phy_id & phydev->drv->phy_id_mask) {
961 INIT_LIST_HEAD(&vsc8531->macsec_flows);
962 vsc8531->secy = NULL;
964 phydev->macsec_ops = &vsc8584_macsec_ops;
966 return __vsc8584_macsec_init(phydev);
972 void vsc8584_handle_macsec_interrupt(struct phy_device *phydev)
974 struct vsc8531_private *priv = phydev->priv;
975 struct macsec_flow *flow, *tmp;
978 /* Check MACsec PN rollover */
979 cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
980 MSCC_MS_INTR_CTRL_STATUS);
981 cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
982 if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
985 rec = 6 + priv->secy->key_len / sizeof(u32);
986 list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
989 if (flow->bank != MACSEC_EGR || !flow->has_transformation)
992 val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
993 MSCC_MS_XFORM_REC(flow->index, rec));
994 if (val == 0xffffffff) {
995 vsc8584_macsec_flow_disable(phydev, flow);
996 macsec_pn_wrapped(priv->secy, flow->tx_sa);
1002 void vsc8584_config_macsec_intr(struct phy_device *phydev)
1004 phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
1005 phy_write(phydev, MSCC_PHY_EXTENDED_INT, MSCC_PHY_EXTENDED_INT_MS_EGR);
1006 phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
1008 vsc8584_macsec_phy_write(phydev, MACSEC_EGR, MSCC_MS_AIC_CTRL, 0xf);
1009 vsc8584_macsec_phy_write(phydev, MACSEC_EGR, MSCC_MS_INTR_CTRL_STATUS,
1010 MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));