1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2021 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
24 #include "ipa_power.h"
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
28 /* Hardware is told about receive buffers once a "batch" has been queued */
29 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
31 /* The amount of RX buffer space consumed by standard skb overhead */
32 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
34 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
35 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
37 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
38 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
40 /** enum ipa_status_opcode - status element opcode hardware values */
41 enum ipa_status_opcode {
42 IPA_STATUS_OPCODE_PACKET = 0x01,
43 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
44 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
45 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
48 /** enum ipa_status_exception - status element exception type */
49 enum ipa_status_exception {
50 /* 0 means no exception */
51 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
54 /* Status element provided by hardware */
56 u8 opcode; /* enum ipa_status_opcode */
57 u8 exception; /* enum ipa_status_exception */
69 /* Field masks for struct ipa_status structure fields */
70 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
71 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
72 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
73 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
74 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
76 static u32 aggr_byte_limit_max(enum ipa_version version)
78 if (version < IPA_VERSION_4_5)
79 return field_max(aggr_byte_limit_fmask(true));
81 return field_max(aggr_byte_limit_fmask(false));
84 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
85 const struct ipa_gsi_endpoint_data *all_data,
86 const struct ipa_gsi_endpoint_data *data)
88 const struct ipa_gsi_endpoint_data *other_data;
89 struct device *dev = &ipa->pdev->dev;
90 enum ipa_endpoint_name other_name;
92 if (ipa_gsi_endpoint_data_empty(data))
95 if (!data->toward_ipa) {
99 if (data->endpoint.filter_support) {
100 dev_err(dev, "filtering not supported for "
106 /* Nothing more to check for non-AP RX */
107 if (data->ee_id != GSI_EE_AP)
110 buffer_size = data->endpoint.config.rx.buffer_size;
111 /* The buffer size must hold an MTU plus overhead */
112 limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
113 if (buffer_size < limit) {
114 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
115 data->endpoint_id, buffer_size, limit);
119 /* For an endpoint supporting receive aggregation, the
120 * aggregation byte limit defines the point at which an
121 * aggregation window will close. It is programmed into the
122 * IPA hardware as a number of KB. We don't use "hard byte
123 * limit" aggregation, so we need to supply enough space in
124 * a receive buffer to hold a complete MTU plus normal skb
125 * overhead *after* that aggregation byte limit has been
128 * This check just ensures the receive buffer size doesn't
129 * exceed what's representable in the aggregation limit field.
131 if (data->endpoint.config.aggregation) {
132 limit += SZ_1K * aggr_byte_limit_max(ipa->version);
133 if (buffer_size - NET_SKB_PAD > limit) {
134 dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
136 buffer_size - NET_SKB_PAD, limit);
142 return true; /* Nothing more to check for RX */
145 if (data->endpoint.config.status_enable) {
146 other_name = data->endpoint.config.tx.status_endpoint;
147 if (other_name >= count) {
148 dev_err(dev, "status endpoint name %u out of range "
150 other_name, data->endpoint_id);
154 /* Status endpoint must be defined... */
155 other_data = &all_data[other_name];
156 if (ipa_gsi_endpoint_data_empty(other_data)) {
157 dev_err(dev, "DMA endpoint name %u undefined "
159 other_name, data->endpoint_id);
163 /* ...and has to be an RX endpoint... */
164 if (other_data->toward_ipa) {
166 "status endpoint for endpoint %u not RX\n",
171 /* ...and if it's to be an AP endpoint... */
172 if (other_data->ee_id == GSI_EE_AP) {
173 /* ...make sure it has status enabled. */
174 if (!other_data->endpoint.config.status_enable) {
176 "status not enabled for endpoint %u\n",
177 other_data->endpoint_id);
183 if (data->endpoint.config.dma_mode) {
184 other_name = data->endpoint.config.dma_endpoint;
185 if (other_name >= count) {
186 dev_err(dev, "DMA endpoint name %u out of range "
188 other_name, data->endpoint_id);
192 other_data = &all_data[other_name];
193 if (ipa_gsi_endpoint_data_empty(other_data)) {
194 dev_err(dev, "DMA endpoint name %u undefined "
196 other_name, data->endpoint_id);
204 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
205 const struct ipa_gsi_endpoint_data *data)
207 const struct ipa_gsi_endpoint_data *dp = data;
208 struct device *dev = &ipa->pdev->dev;
209 enum ipa_endpoint_name name;
211 if (count > IPA_ENDPOINT_COUNT) {
212 dev_err(dev, "too many endpoints specified (%u > %u)\n",
213 count, IPA_ENDPOINT_COUNT);
217 /* Make sure needed endpoints have defined data */
218 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
219 dev_err(dev, "command TX endpoint not defined\n");
222 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
223 dev_err(dev, "LAN RX endpoint not defined\n");
226 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
227 dev_err(dev, "AP->modem TX endpoint not defined\n");
230 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
231 dev_err(dev, "AP<-modem RX endpoint not defined\n");
235 for (name = 0; name < count; name++, dp++)
236 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
242 /* Allocate a transaction to use on a non-command endpoint */
243 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
246 struct gsi *gsi = &endpoint->ipa->gsi;
247 u32 channel_id = endpoint->channel_id;
248 enum dma_data_direction direction;
250 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
252 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
255 /* suspend_delay represents suspend for RX, delay for TX endpoints.
256 * Note that suspend is not supported starting with IPA v4.0, and
257 * delay mode should not be used starting with IPA v4.2.
260 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
262 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
263 struct ipa *ipa = endpoint->ipa;
268 if (endpoint->toward_ipa)
269 WARN_ON(ipa->version >= IPA_VERSION_4_2);
271 WARN_ON(ipa->version >= IPA_VERSION_4_0);
273 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
275 val = ioread32(ipa->reg_virt + offset);
276 state = !!(val & mask);
278 /* Don't bother if it's already in the requested state */
279 if (suspend_delay != state) {
281 iowrite32(val, ipa->reg_virt + offset);
287 /* We don't care what the previous state was for delay mode */
289 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
291 /* Delay mode should not be used for IPA v4.2+ */
292 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
293 WARN_ON(!endpoint->toward_ipa);
295 (void)ipa_endpoint_init_ctrl(endpoint, enable);
298 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
300 u32 mask = BIT(endpoint->endpoint_id);
301 struct ipa *ipa = endpoint->ipa;
305 WARN_ON(!(mask & ipa->available));
307 offset = ipa_reg_state_aggr_active_offset(ipa->version);
308 val = ioread32(ipa->reg_virt + offset);
310 return !!(val & mask);
313 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
315 u32 mask = BIT(endpoint->endpoint_id);
316 struct ipa *ipa = endpoint->ipa;
318 WARN_ON(!(mask & ipa->available));
320 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
324 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
325 * @endpoint: Endpoint on which to emulate a suspend
327 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
328 * with an open aggregation frame. This is to work around a hardware
329 * issue in IPA version 3.5.1 where the suspend interrupt will not be
330 * generated when it should be.
332 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
334 struct ipa *ipa = endpoint->ipa;
336 if (!endpoint->data->aggregation)
339 /* Nothing to do if the endpoint doesn't have aggregation open */
340 if (!ipa_endpoint_aggr_active(endpoint))
343 /* Force close aggregation */
344 ipa_endpoint_force_close(endpoint);
346 ipa_interrupt_simulate_suspend(ipa->interrupt);
349 /* Returns previous suspend state (true means suspend was enabled) */
351 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
355 if (endpoint->ipa->version >= IPA_VERSION_4_0)
356 return enable; /* For IPA v4.0+, no change made */
358 WARN_ON(endpoint->toward_ipa);
360 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
362 /* A client suspended with an open aggregation frame will not
363 * generate a SUSPEND IPA interrupt. If enabling suspend, have
364 * ipa_endpoint_suspend_aggr() handle this.
366 if (enable && !suspended)
367 ipa_endpoint_suspend_aggr(endpoint);
372 /* Put all modem RX endpoints into suspend mode, and stop transmission
373 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
374 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
377 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
381 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
382 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
384 if (endpoint->ee_id != GSI_EE_MODEM)
387 if (!endpoint->toward_ipa)
388 (void)ipa_endpoint_program_suspend(endpoint, enable);
389 else if (ipa->version < IPA_VERSION_4_2)
390 ipa_endpoint_program_delay(endpoint, enable);
392 gsi_modem_channel_flow_control(&ipa->gsi,
393 endpoint->channel_id,
398 /* Reset all modem endpoints to use the default exception endpoint */
399 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
401 u32 initialized = ipa->initialized;
402 struct gsi_trans *trans;
405 /* We need one command per modem TX endpoint. We can get an upper
406 * bound on that by assuming all initialized endpoints are modem->IPA.
407 * That won't happen, and we could be more precise, but this is fine
408 * for now. End the transaction with commands to clear the pipeline.
410 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
411 trans = ipa_cmd_trans_alloc(ipa, count);
413 dev_err(&ipa->pdev->dev,
414 "no transaction to reset modem exception endpoints\n");
418 while (initialized) {
419 u32 endpoint_id = __ffs(initialized);
420 struct ipa_endpoint *endpoint;
423 initialized ^= BIT(endpoint_id);
425 /* We only reset modem TX endpoints */
426 endpoint = &ipa->endpoint[endpoint_id];
427 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
430 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
432 /* Value written is 0, and all bits are updated. That
433 * means status is disabled on the endpoint, and as a
434 * result all other fields in the register are ignored.
436 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
439 ipa_cmd_pipeline_clear_add(trans);
441 /* XXX This should have a 1 second timeout */
442 gsi_trans_commit_wait(trans);
444 ipa_cmd_pipeline_clear_wait(ipa);
449 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
451 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
452 enum ipa_cs_offload_en enabled;
455 /* FRAG_OFFLOAD_EN is 0 */
456 if (endpoint->data->checksum) {
457 enum ipa_version version = endpoint->ipa->version;
459 if (endpoint->toward_ipa) {
462 /* Checksum header offset is in 4-byte units */
463 checksum_offset = sizeof(struct rmnet_map_header);
464 checksum_offset /= sizeof(u32);
465 val |= u32_encode_bits(checksum_offset,
466 CS_METADATA_HDR_OFFSET_FMASK);
468 enabled = version < IPA_VERSION_4_5
470 : IPA_CS_OFFLOAD_INLINE;
472 enabled = version < IPA_VERSION_4_5
474 : IPA_CS_OFFLOAD_INLINE;
477 enabled = IPA_CS_OFFLOAD_NONE;
479 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
480 /* CS_GEN_QMB_MASTER_SEL is 0 */
482 iowrite32(val, endpoint->ipa->reg_virt + offset);
485 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
490 if (!endpoint->toward_ipa)
493 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
494 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
496 iowrite32(val, endpoint->ipa->reg_virt + offset);
500 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
502 u32 header_size = sizeof(struct rmnet_map_header);
504 /* Without checksum offload, we just have the MAP header */
505 if (!endpoint->data->checksum)
508 if (version < IPA_VERSION_4_5) {
509 /* Checksum header inserted for AP TX endpoints only */
510 if (endpoint->toward_ipa)
511 header_size += sizeof(struct rmnet_map_ul_csum_header);
513 /* Checksum header is used in both directions */
514 header_size += sizeof(struct rmnet_map_v5_csum_header);
521 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
522 * @endpoint: Endpoint pointer
524 * We program QMAP endpoints so each packet received is preceded by a QMAP
525 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
526 * packet size field, and we have the IPA hardware populate both for each
527 * received packet. The header is configured (in the HDR_EXT register)
528 * to use big endian format.
530 * The packet size is written into the QMAP header's pkt_len field. That
531 * location is defined here using the HDR_OFST_PKT_SIZE field.
533 * The mux_id comes from a 4-byte metadata value supplied with each packet
534 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
535 * value that we want, in its low-order byte. A bitmask defined in the
536 * endpoint's METADATA_MASK register defines which byte within the modem
537 * metadata contains the mux_id. And the OFST_METADATA field programmed
538 * here indicates where the extracted byte should be placed within the QMAP
541 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
543 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
544 struct ipa *ipa = endpoint->ipa;
547 if (endpoint->data->qmap) {
548 enum ipa_version version = ipa->version;
551 header_size = ipa_qmap_header_size(version, endpoint);
552 val = ipa_header_size_encoded(version, header_size);
554 /* Define how to fill fields in a received QMAP header */
555 if (!endpoint->toward_ipa) {
556 u32 offset; /* Field offset within header */
558 /* Where IPA will write the metadata value */
559 offset = offsetof(struct rmnet_map_header, mux_id);
560 val |= ipa_metadata_offset_encoded(version, offset);
562 /* Where IPA will write the length */
563 offset = offsetof(struct rmnet_map_header, pkt_len);
564 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
565 if (version >= IPA_VERSION_4_5)
566 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
568 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
569 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
571 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
572 val |= HDR_OFST_METADATA_VALID_FMASK;
574 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
575 /* HDR_A5_MUX is 0 */
576 /* HDR_LEN_INC_DEAGG_HDR is 0 */
577 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
580 iowrite32(val, ipa->reg_virt + offset);
583 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
585 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
586 u32 pad_align = endpoint->data->rx.pad_align;
587 struct ipa *ipa = endpoint->ipa;
590 val |= HDR_ENDIANNESS_FMASK; /* big endian */
592 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
593 * driver assumes this field is meaningful in packets it receives,
594 * and assumes the header's payload length includes that padding.
595 * The RMNet driver does *not* pad packets it sends, however, so
596 * the pad field (although 0) should be ignored.
598 if (endpoint->data->qmap && !endpoint->toward_ipa) {
599 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
600 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
601 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
602 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
605 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
606 if (!endpoint->toward_ipa)
607 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
609 /* IPA v4.5 adds some most-significant bits to a few fields,
610 * two of which are defined in the HDR (not HDR_EXT) register.
612 if (ipa->version >= IPA_VERSION_4_5) {
613 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
614 if (endpoint->data->qmap && !endpoint->toward_ipa) {
617 offset = offsetof(struct rmnet_map_header, pkt_len);
618 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
619 val |= u32_encode_bits(offset,
620 HDR_OFST_PKT_SIZE_MSB_FMASK);
621 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
624 iowrite32(val, ipa->reg_virt + offset);
627 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
629 u32 endpoint_id = endpoint->endpoint_id;
633 if (endpoint->toward_ipa)
634 return; /* Register not valid for TX endpoints */
636 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
638 /* Note that HDR_ENDIANNESS indicates big endian header fields */
639 if (endpoint->data->qmap)
640 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
642 iowrite32(val, endpoint->ipa->reg_virt + offset);
645 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
647 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
650 if (!endpoint->toward_ipa)
651 return; /* Register not valid for RX endpoints */
653 if (endpoint->data->dma_mode) {
654 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
657 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
659 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
660 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
662 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
664 /* All other bits unspecified (and 0) */
666 iowrite32(val, endpoint->ipa->reg_virt + offset);
669 /* Compute the aggregation size value to use for a given buffer size */
670 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
672 /* We don't use "hard byte limit" aggregation, so we define the
673 * aggregation limit such that our buffer has enough space *after*
674 * that limit to receive a full MTU of data, plus overhead.
676 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
678 return rx_buffer_size / SZ_1K;
681 /* Encoded values for AGGR endpoint register fields */
682 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
684 if (version < IPA_VERSION_4_5)
685 return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
687 return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
690 /* Encode the aggregation timer limit (microseconds) based on IPA version */
691 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
697 if (version < IPA_VERSION_4_5) {
698 /* We set aggregation granularity in ipa_hardware_config() */
699 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
701 return u32_encode_bits(limit, aggr_time_limit_fmask(true));
704 /* IPA v4.5 expresses the time limit using Qtime. The AP has
705 * pulse generators 0 and 1 available, which were configured
706 * in ipa_qtime_config() to have granularity 100 usec and
707 * 1 msec, respectively. Use pulse generator 0 if possible,
708 * otherwise fall back to pulse generator 1.
710 fmask = aggr_time_limit_fmask(false);
711 val = DIV_ROUND_CLOSEST(limit, 100);
712 if (val > field_max(fmask)) {
713 /* Have to use pulse generator 1 (millisecond granularity) */
714 gran_sel = AGGR_GRAN_SEL_FMASK;
715 val = DIV_ROUND_CLOSEST(limit, 1000);
717 /* We can use pulse generator 0 (100 usec granularity) */
721 return gran_sel | u32_encode_bits(val, fmask);
724 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
726 u32 val = enabled ? 1 : 0;
728 if (version < IPA_VERSION_4_5)
729 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
731 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
734 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
736 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
737 enum ipa_version version = endpoint->ipa->version;
740 if (endpoint->data->aggregation) {
741 if (!endpoint->toward_ipa) {
742 const struct ipa_endpoint_rx_data *rx_data;
747 rx_data = &endpoint->data->rx;
748 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
749 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
751 buffer_size = rx_data->buffer_size;
752 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
753 val |= aggr_byte_limit_encoded(version, limit);
755 limit = IPA_AGGR_TIME_LIMIT;
756 val |= aggr_time_limit_encoded(version, limit);
758 /* AGGR_PKT_LIMIT is 0 (unlimited) */
760 close_eof = rx_data->aggr_close_eof;
761 val |= aggr_sw_eof_active_encoded(version, close_eof);
763 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
765 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
767 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
768 /* other fields ignored */
770 /* AGGR_FORCE_CLOSE is 0 */
771 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
773 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
774 /* other fields ignored */
777 iowrite32(val, endpoint->ipa->reg_virt + offset);
780 /* Return the Qtime-based head-of-line blocking timer value that
781 * represents the given number of microseconds. The result
782 * includes both the timer value and the selected timer granularity.
784 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
789 /* IPA v4.5 expresses time limits using Qtime. The AP has
790 * pulse generators 0 and 1 available, which were configured
791 * in ipa_qtime_config() to have granularity 100 usec and
792 * 1 msec, respectively. Use pulse generator 0 if possible,
793 * otherwise fall back to pulse generator 1.
795 val = DIV_ROUND_CLOSEST(microseconds, 100);
796 if (val > field_max(TIME_LIMIT_FMASK)) {
797 /* Have to use pulse generator 1 (millisecond granularity) */
798 gran_sel = GRAN_SEL_FMASK;
799 val = DIV_ROUND_CLOSEST(microseconds, 1000);
801 /* We can use pulse generator 0 (100 usec granularity) */
805 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
808 /* The head-of-line blocking timer is defined as a tick count. For
809 * IPA version 4.5 the tick count is based on the Qtimer, which is
810 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
811 * each tick represents 128 cycles of the IPA core clock.
813 * Return the encoded value that should be written to that register
814 * that represents the timeout period provided. For IPA v4.2 this
815 * encodes a base and scale value, while for earlier versions the
816 * value is a simple tick count.
818 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
828 return 0; /* Nothing to compute if timer period is 0 */
830 if (ipa->version >= IPA_VERSION_4_5)
831 return hol_block_timer_qtime_val(ipa, microseconds);
833 /* Use 64 bit arithmetic to avoid overflow... */
834 rate = ipa_core_clock_rate(ipa);
835 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
836 /* ...but we still need to fit into a 32-bit register */
837 WARN_ON(ticks > U32_MAX);
839 /* IPA v3.5.1 through v4.1 just record the tick count */
840 if (ipa->version < IPA_VERSION_4_2)
843 /* For IPA v4.2, the tick count is represented by base and
844 * scale fields within the 32-bit timer register, where:
845 * ticks = base << scale;
846 * The best precision is achieved when the base value is as
847 * large as possible. Find the highest set bit in the tick
848 * count, and extract the number of bits in the base field
849 * such that high bit is included.
851 high = fls(ticks); /* 1..32 */
852 width = HWEIGHT32(BASE_VALUE_FMASK);
853 scale = high > width ? high - width : 0;
855 /* If we're scaling, round up to get a closer result */
856 ticks += 1 << (scale - 1);
857 /* High bit was set, so rounding might have affected it */
858 if (fls(ticks) != high)
862 val = u32_encode_bits(scale, SCALE_FMASK);
863 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
868 /* If microseconds is 0, timeout is immediate */
869 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
872 u32 endpoint_id = endpoint->endpoint_id;
873 struct ipa *ipa = endpoint->ipa;
877 /* This should only be changed when HOL_BLOCK_EN is disabled */
878 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
879 val = hol_block_timer_val(ipa, microseconds);
880 iowrite32(val, ipa->reg_virt + offset);
884 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
886 u32 endpoint_id = endpoint->endpoint_id;
890 val = enable ? HOL_BLOCK_EN_FMASK : 0;
891 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
892 iowrite32(val, endpoint->ipa->reg_virt + offset);
893 /* When enabling, the register must be written twice for IPA v4.5+ */
894 if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
895 iowrite32(val, endpoint->ipa->reg_virt + offset);
898 /* Assumes HOL_BLOCK is in disabled state */
899 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
902 ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
903 ipa_endpoint_init_hol_block_en(endpoint, true);
906 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
908 ipa_endpoint_init_hol_block_en(endpoint, false);
911 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
915 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
916 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
918 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
921 ipa_endpoint_init_hol_block_disable(endpoint);
922 ipa_endpoint_init_hol_block_enable(endpoint, 0);
926 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
928 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
931 if (!endpoint->toward_ipa)
932 return; /* Register not valid for RX endpoints */
934 /* DEAGGR_HDR_LEN is 0 */
935 /* PACKET_OFFSET_VALID is 0 */
936 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
937 /* MAX_PACKET_LEN is 0 (not enforced) */
939 iowrite32(val, endpoint->ipa->reg_virt + offset);
942 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
944 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
945 struct ipa *ipa = endpoint->ipa;
948 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
949 iowrite32(val, ipa->reg_virt + offset);
952 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
954 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
957 if (!endpoint->toward_ipa)
958 return; /* Register not valid for RX endpoints */
960 /* Low-order byte configures primary packet processing */
961 val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
963 /* Second byte configures replicated packet processing */
964 val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
967 iowrite32(val, endpoint->ipa->reg_virt + offset);
971 * ipa_endpoint_skb_tx() - Transmit a socket buffer
972 * @endpoint: Endpoint pointer
973 * @skb: Socket buffer to send
975 * Returns: 0 if successful, or a negative error code
977 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
979 struct gsi_trans *trans;
983 /* Make sure source endpoint's TLV FIFO has enough entries to
984 * hold the linear portion of the skb and all its fragments.
985 * If not, see if we can linearize it before giving up.
987 nr_frags = skb_shinfo(skb)->nr_frags;
988 if (1 + nr_frags > endpoint->trans_tre_max) {
989 if (skb_linearize(skb))
994 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
998 ret = gsi_trans_skb_add(trans, skb);
1000 goto err_trans_free;
1001 trans->data = skb; /* transaction owns skb now */
1003 gsi_trans_commit(trans, !netdev_xmit_more());
1008 gsi_trans_free(trans);
1013 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1015 u32 endpoint_id = endpoint->endpoint_id;
1016 struct ipa *ipa = endpoint->ipa;
1020 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
1022 if (endpoint->data->status_enable) {
1023 val |= STATUS_EN_FMASK;
1024 if (endpoint->toward_ipa) {
1025 enum ipa_endpoint_name name;
1026 u32 status_endpoint_id;
1028 name = endpoint->data->tx.status_endpoint;
1029 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1031 val |= u32_encode_bits(status_endpoint_id,
1034 /* STATUS_LOCATION is 0, meaning status element precedes
1035 * packet (not present for IPA v4.5)
1037 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1040 iowrite32(val, ipa->reg_virt + offset);
1043 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1044 struct gsi_trans *trans)
1052 buffer_size = endpoint->data->rx.buffer_size;
1053 page = dev_alloc_pages(get_order(buffer_size));
1057 /* Offset the buffer to make space for skb headroom */
1058 offset = NET_SKB_PAD;
1059 len = buffer_size - offset;
1061 ret = gsi_trans_page_add(trans, page, len, offset);
1063 __free_pages(page, get_order(buffer_size));
1065 trans->data = page; /* transaction owns page now */
1071 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1072 * @endpoint: Endpoint to be replenished
1074 * The IPA hardware can hold a fixed number of receive buffers for an RX
1075 * endpoint, based on the number of entries in the underlying channel ring
1076 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1077 * more receive buffers can be supplied to the hardware. Replenishing for
1078 * an endpoint can be disabled, in which case buffers are not queued to
1081 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1083 struct gsi_trans *trans;
1085 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1088 /* Skip it if it's already active */
1089 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1092 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1095 if (ipa_endpoint_replenish_one(endpoint, trans))
1096 goto try_again_later;
1099 /* Ring the doorbell if we've got a full batch */
1100 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1101 gsi_trans_commit(trans, doorbell);
1104 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1109 gsi_trans_free(trans);
1110 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1112 /* Whenever a receive buffer transaction completes we'll try to
1113 * replenish again. It's unlikely, but if we fail to supply even
1114 * one buffer, nothing will trigger another replenish attempt.
1115 * If the hardware has no receive buffers queued, schedule work to
1116 * try replenishing again.
1118 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1119 schedule_delayed_work(&endpoint->replenish_work,
1120 msecs_to_jiffies(1));
1123 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1125 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1127 /* Start replenishing if hardware currently has no buffers */
1128 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1129 ipa_endpoint_replenish(endpoint);
1132 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1134 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1137 static void ipa_endpoint_replenish_work(struct work_struct *work)
1139 struct delayed_work *dwork = to_delayed_work(work);
1140 struct ipa_endpoint *endpoint;
1142 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1144 ipa_endpoint_replenish(endpoint);
1147 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1148 void *data, u32 len, u32 extra)
1150 struct sk_buff *skb;
1152 if (!endpoint->netdev)
1155 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1157 /* Copy the data into the socket buffer and receive it */
1159 memcpy(skb->data, data, len);
1160 skb->truesize += extra;
1163 ipa_modem_skb_rx(endpoint->netdev, skb);
1166 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1167 struct page *page, u32 len)
1169 u32 buffer_size = endpoint->data->rx.buffer_size;
1170 struct sk_buff *skb;
1172 /* Nothing to do if there's no netdev */
1173 if (!endpoint->netdev)
1176 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1178 skb = build_skb(page_address(page), buffer_size);
1180 /* Reserve the headroom and account for the data */
1181 skb_reserve(skb, NET_SKB_PAD);
1185 /* Receive the buffer (or record drop if unable to build it) */
1186 ipa_modem_skb_rx(endpoint->netdev, skb);
1191 /* The format of a packet status element is the same for several status
1192 * types (opcodes). Other types aren't currently supported.
1194 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1197 case IPA_STATUS_OPCODE_PACKET:
1198 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1199 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1200 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1207 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1208 const struct ipa_status *status)
1212 if (!ipa_status_format_packet(status->opcode))
1214 if (!status->pkt_len)
1216 endpoint_id = u8_get_bits(status->endp_dst_idx,
1217 IPA_STATUS_DST_IDX_FMASK);
1218 if (endpoint_id != endpoint->endpoint_id)
1221 return false; /* Don't skip this packet, process it */
1224 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1225 const struct ipa_status *status)
1227 struct ipa_endpoint *command_endpoint;
1228 struct ipa *ipa = endpoint->ipa;
1231 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1232 return false; /* No valid tag */
1234 /* The status contains a valid tag. We know the packet was sent to
1235 * this endpoint (already verified by ipa_endpoint_status_skip()).
1236 * If the packet came from the AP->command TX endpoint we know
1237 * this packet was sent as part of the pipeline clear process.
1239 endpoint_id = u8_get_bits(status->endp_src_idx,
1240 IPA_STATUS_SRC_IDX_FMASK);
1241 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1242 if (endpoint_id == command_endpoint->endpoint_id) {
1243 complete(&ipa->completion);
1245 dev_err(&ipa->pdev->dev,
1246 "unexpected tagged packet from endpoint %u\n",
1253 /* Return whether the status indicates the packet should be dropped */
1254 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1255 const struct ipa_status *status)
1259 /* If the status indicates a tagged transfer, we'll drop the packet */
1260 if (ipa_endpoint_status_tag(endpoint, status))
1263 /* Deaggregation exceptions we drop; all other types we consume */
1264 if (status->exception)
1265 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1267 /* Drop the packet if it fails to match a routing rule; otherwise no */
1268 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1270 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1273 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1274 struct page *page, u32 total_len)
1276 u32 buffer_size = endpoint->data->rx.buffer_size;
1277 void *data = page_address(page) + NET_SKB_PAD;
1278 u32 unused = buffer_size - total_len;
1279 u32 resid = total_len;
1282 const struct ipa_status *status = data;
1286 if (resid < sizeof(*status)) {
1287 dev_err(&endpoint->ipa->pdev->dev,
1288 "short message (%u bytes < %zu byte status)\n",
1289 resid, sizeof(*status));
1293 /* Skip over status packets that lack packet data */
1294 if (ipa_endpoint_status_skip(endpoint, status)) {
1295 data += sizeof(*status);
1296 resid -= sizeof(*status);
1300 /* Compute the amount of buffer space consumed by the packet,
1301 * including the status element. If the hardware is configured
1302 * to pad packet data to an aligned boundary, account for that.
1303 * And if checksum offload is enabled a trailer containing
1304 * computed checksum information will be appended.
1306 align = endpoint->data->rx.pad_align ? : 1;
1307 len = le16_to_cpu(status->pkt_len);
1308 len = sizeof(*status) + ALIGN(len, align);
1309 if (endpoint->data->checksum)
1310 len += sizeof(struct rmnet_map_dl_csum_trailer);
1312 if (!ipa_endpoint_status_drop(endpoint, status)) {
1317 /* Client receives only packet data (no status) */
1318 data2 = data + sizeof(*status);
1319 len2 = le16_to_cpu(status->pkt_len);
1321 /* Have the true size reflect the extra unused space in
1322 * the original receive buffer. Distribute the "cost"
1323 * proportionately across all aggregated packets in the
1326 extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1327 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1330 /* Consume status and the full packet it describes */
1336 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1337 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1338 struct gsi_trans *trans)
1342 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1343 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1344 struct gsi_trans *trans)
1348 if (trans->cancelled)
1351 /* Parse or build a socket buffer using the actual received length */
1353 if (endpoint->data->status_enable)
1354 ipa_endpoint_status_parse(endpoint, page, trans->len);
1355 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1356 trans->data = NULL; /* Pages have been consumed */
1358 ipa_endpoint_replenish(endpoint);
1361 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1362 struct gsi_trans *trans)
1364 if (endpoint->toward_ipa)
1365 ipa_endpoint_tx_complete(endpoint, trans);
1367 ipa_endpoint_rx_complete(endpoint, trans);
1370 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1371 struct gsi_trans *trans)
1373 if (endpoint->toward_ipa) {
1374 struct ipa *ipa = endpoint->ipa;
1376 /* Nothing to do for command transactions */
1377 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1378 struct sk_buff *skb = trans->data;
1381 dev_kfree_skb_any(skb);
1384 struct page *page = trans->data;
1387 u32 buffer_size = endpoint->data->rx.buffer_size;
1389 __free_pages(page, get_order(buffer_size));
1394 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1398 /* ROUTE_DIS is 0 */
1399 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1400 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1401 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1402 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1403 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1405 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1408 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1410 ipa_endpoint_default_route_set(ipa, 0);
1414 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1415 * @endpoint: Endpoint to be reset
1417 * If aggregation is active on an RX endpoint when a reset is performed
1418 * on its underlying GSI channel, a special sequence of actions must be
1419 * taken to ensure the IPA pipeline is properly cleared.
1421 * Return: 0 if successful, or a negative error code
1423 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1425 struct device *dev = &endpoint->ipa->pdev->dev;
1426 struct ipa *ipa = endpoint->ipa;
1427 struct gsi *gsi = &ipa->gsi;
1428 bool suspended = false;
1435 virt = kzalloc(len, GFP_KERNEL);
1439 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1440 if (dma_mapping_error(dev, addr)) {
1445 /* Force close aggregation before issuing the reset */
1446 ipa_endpoint_force_close(endpoint);
1448 /* Reset and reconfigure the channel with the doorbell engine
1449 * disabled. Then poll until we know aggregation is no longer
1450 * active. We'll re-enable the doorbell (if appropriate) when
1451 * we reset again below.
1453 gsi_channel_reset(gsi, endpoint->channel_id, false);
1455 /* Make sure the channel isn't suspended */
1456 suspended = ipa_endpoint_program_suspend(endpoint, false);
1458 /* Start channel and do a 1 byte read */
1459 ret = gsi_channel_start(gsi, endpoint->channel_id);
1461 goto out_suspend_again;
1463 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1465 goto err_endpoint_stop;
1467 /* Wait for aggregation to be closed on the channel */
1468 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1470 if (!ipa_endpoint_aggr_active(endpoint))
1472 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1473 } while (retries--);
1475 /* Check one last time */
1476 if (ipa_endpoint_aggr_active(endpoint))
1477 dev_err(dev, "endpoint %u still active during reset\n",
1478 endpoint->endpoint_id);
1480 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1482 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1484 goto out_suspend_again;
1486 /* Finally, reset and reconfigure the channel again (re-enabling
1487 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1488 * complete the channel reset sequence. Finish by suspending the
1489 * channel again (if necessary).
1491 gsi_channel_reset(gsi, endpoint->channel_id, true);
1493 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1495 goto out_suspend_again;
1498 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1501 (void)ipa_endpoint_program_suspend(endpoint, true);
1502 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1509 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1511 u32 channel_id = endpoint->channel_id;
1512 struct ipa *ipa = endpoint->ipa;
1516 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1517 * is active, we need to handle things specially to recover.
1518 * All other cases just need to reset the underlying GSI channel.
1520 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1521 endpoint->data->aggregation;
1522 if (special && ipa_endpoint_aggr_active(endpoint))
1523 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1525 gsi_channel_reset(&ipa->gsi, channel_id, true);
1528 dev_err(&ipa->pdev->dev,
1529 "error %d resetting channel %u for endpoint %u\n",
1530 ret, endpoint->channel_id, endpoint->endpoint_id);
1533 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1535 if (endpoint->toward_ipa) {
1536 /* Newer versions of IPA use GSI channel flow control
1537 * instead of endpoint DELAY mode to prevent sending data.
1538 * Flow control is disabled for newly-allocated channels,
1539 * and we can assume flow control is not (ever) enabled
1540 * for AP TX channels.
1542 if (endpoint->ipa->version < IPA_VERSION_4_2)
1543 ipa_endpoint_program_delay(endpoint, false);
1545 /* Ensure suspend mode is off on all AP RX endpoints */
1546 (void)ipa_endpoint_program_suspend(endpoint, false);
1548 ipa_endpoint_init_cfg(endpoint);
1549 ipa_endpoint_init_nat(endpoint);
1550 ipa_endpoint_init_hdr(endpoint);
1551 ipa_endpoint_init_hdr_ext(endpoint);
1552 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1553 ipa_endpoint_init_mode(endpoint);
1554 ipa_endpoint_init_aggr(endpoint);
1555 if (!endpoint->toward_ipa)
1556 ipa_endpoint_init_hol_block_disable(endpoint);
1557 ipa_endpoint_init_deaggr(endpoint);
1558 ipa_endpoint_init_rsrc_grp(endpoint);
1559 ipa_endpoint_init_seq(endpoint);
1560 ipa_endpoint_status(endpoint);
1563 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1565 struct ipa *ipa = endpoint->ipa;
1566 struct gsi *gsi = &ipa->gsi;
1569 ret = gsi_channel_start(gsi, endpoint->channel_id);
1571 dev_err(&ipa->pdev->dev,
1572 "error %d starting %cX channel %u for endpoint %u\n",
1573 ret, endpoint->toward_ipa ? 'T' : 'R',
1574 endpoint->channel_id, endpoint->endpoint_id);
1578 if (!endpoint->toward_ipa) {
1579 ipa_interrupt_suspend_enable(ipa->interrupt,
1580 endpoint->endpoint_id);
1581 ipa_endpoint_replenish_enable(endpoint);
1584 ipa->enabled |= BIT(endpoint->endpoint_id);
1589 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1591 u32 mask = BIT(endpoint->endpoint_id);
1592 struct ipa *ipa = endpoint->ipa;
1593 struct gsi *gsi = &ipa->gsi;
1596 if (!(ipa->enabled & mask))
1599 ipa->enabled ^= mask;
1601 if (!endpoint->toward_ipa) {
1602 ipa_endpoint_replenish_disable(endpoint);
1603 ipa_interrupt_suspend_disable(ipa->interrupt,
1604 endpoint->endpoint_id);
1607 /* Note that if stop fails, the channel's state is not well-defined */
1608 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1610 dev_err(&ipa->pdev->dev,
1611 "error %d attempting to stop endpoint %u\n", ret,
1612 endpoint->endpoint_id);
1615 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1617 struct device *dev = &endpoint->ipa->pdev->dev;
1618 struct gsi *gsi = &endpoint->ipa->gsi;
1621 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1624 if (!endpoint->toward_ipa) {
1625 ipa_endpoint_replenish_disable(endpoint);
1626 (void)ipa_endpoint_program_suspend(endpoint, true);
1629 ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1631 dev_err(dev, "error %d suspending channel %u\n", ret,
1632 endpoint->channel_id);
1635 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1637 struct device *dev = &endpoint->ipa->pdev->dev;
1638 struct gsi *gsi = &endpoint->ipa->gsi;
1641 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1644 if (!endpoint->toward_ipa)
1645 (void)ipa_endpoint_program_suspend(endpoint, false);
1647 ret = gsi_channel_resume(gsi, endpoint->channel_id);
1649 dev_err(dev, "error %d resuming channel %u\n", ret,
1650 endpoint->channel_id);
1651 else if (!endpoint->toward_ipa)
1652 ipa_endpoint_replenish_enable(endpoint);
1655 void ipa_endpoint_suspend(struct ipa *ipa)
1657 if (!ipa->setup_complete)
1660 if (ipa->modem_netdev)
1661 ipa_modem_suspend(ipa->modem_netdev);
1663 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1664 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1667 void ipa_endpoint_resume(struct ipa *ipa)
1669 if (!ipa->setup_complete)
1672 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1673 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1675 if (ipa->modem_netdev)
1676 ipa_modem_resume(ipa->modem_netdev);
1679 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1681 struct gsi *gsi = &endpoint->ipa->gsi;
1682 u32 channel_id = endpoint->channel_id;
1684 /* Only AP endpoints get set up */
1685 if (endpoint->ee_id != GSI_EE_AP)
1688 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1689 if (!endpoint->toward_ipa) {
1690 /* RX transactions require a single TRE, so the maximum
1691 * backlog is the same as the maximum outstanding TREs.
1693 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1694 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1695 INIT_DELAYED_WORK(&endpoint->replenish_work,
1696 ipa_endpoint_replenish_work);
1699 ipa_endpoint_program(endpoint);
1701 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1704 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1706 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1708 if (!endpoint->toward_ipa)
1709 cancel_delayed_work_sync(&endpoint->replenish_work);
1711 ipa_endpoint_reset(endpoint);
1714 void ipa_endpoint_setup(struct ipa *ipa)
1716 u32 initialized = ipa->initialized;
1719 while (initialized) {
1720 u32 endpoint_id = __ffs(initialized);
1722 initialized ^= BIT(endpoint_id);
1724 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1728 void ipa_endpoint_teardown(struct ipa *ipa)
1730 u32 set_up = ipa->set_up;
1733 u32 endpoint_id = __fls(set_up);
1735 set_up ^= BIT(endpoint_id);
1737 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1742 int ipa_endpoint_config(struct ipa *ipa)
1744 struct device *dev = &ipa->pdev->dev;
1753 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1754 * Furthermore, the endpoints were not grouped such that TX
1755 * endpoint numbers started with 0 and RX endpoints had numbers
1756 * higher than all TX endpoints, so we can't do the simple
1757 * direction check used for newer hardware below.
1759 * For hardware that doesn't support the FLAVOR_0 register,
1760 * just set the available mask to support any endpoint, and
1761 * assume the configuration is valid.
1763 if (ipa->version < IPA_VERSION_3_5) {
1764 ipa->available = ~0;
1768 /* Find out about the endpoints supplied by the hardware, and ensure
1769 * the highest one doesn't exceed the number we support.
1771 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1773 /* Our RX is an IPA producer */
1774 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1775 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1776 if (max > IPA_ENDPOINT_MAX) {
1777 dev_err(dev, "too many endpoints (%u > %u)\n",
1778 max, IPA_ENDPOINT_MAX);
1781 rx_mask = GENMASK(max - 1, rx_base);
1783 /* Our TX is an IPA consumer */
1784 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1785 tx_mask = GENMASK(max - 1, 0);
1787 ipa->available = rx_mask | tx_mask;
1789 /* Check for initialized endpoints not supported by the hardware */
1790 if (ipa->initialized & ~ipa->available) {
1791 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1792 ipa->initialized & ~ipa->available);
1793 ret = -EINVAL; /* Report other errors too */
1796 initialized = ipa->initialized;
1797 while (initialized) {
1798 u32 endpoint_id = __ffs(initialized);
1799 struct ipa_endpoint *endpoint;
1801 initialized ^= BIT(endpoint_id);
1803 /* Make sure it's pointing in the right direction */
1804 endpoint = &ipa->endpoint[endpoint_id];
1805 if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1806 dev_err(dev, "endpoint id %u wrong direction\n",
1815 void ipa_endpoint_deconfig(struct ipa *ipa)
1817 ipa->available = 0; /* Nothing more to do */
1820 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1821 const struct ipa_gsi_endpoint_data *data)
1823 struct ipa_endpoint *endpoint;
1825 endpoint = &ipa->endpoint[data->endpoint_id];
1827 if (data->ee_id == GSI_EE_AP)
1828 ipa->channel_map[data->channel_id] = endpoint;
1829 ipa->name_map[name] = endpoint;
1831 endpoint->ipa = ipa;
1832 endpoint->ee_id = data->ee_id;
1833 endpoint->channel_id = data->channel_id;
1834 endpoint->endpoint_id = data->endpoint_id;
1835 endpoint->toward_ipa = data->toward_ipa;
1836 endpoint->data = &data->endpoint.config;
1838 ipa->initialized |= BIT(endpoint->endpoint_id);
1841 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1843 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1845 memset(endpoint, 0, sizeof(*endpoint));
1848 void ipa_endpoint_exit(struct ipa *ipa)
1850 u32 initialized = ipa->initialized;
1852 while (initialized) {
1853 u32 endpoint_id = __fls(initialized);
1855 initialized ^= BIT(endpoint_id);
1857 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1859 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1860 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1863 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1864 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1865 const struct ipa_gsi_endpoint_data *data)
1867 enum ipa_endpoint_name name;
1870 BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1872 if (!ipa_endpoint_data_valid(ipa, count, data))
1873 return 0; /* Error */
1875 ipa->initialized = 0;
1878 for (name = 0; name < count; name++, data++) {
1879 if (ipa_gsi_endpoint_data_empty(data))
1880 continue; /* Skip over empty slots */
1882 ipa_endpoint_init_one(ipa, name, data);
1884 if (data->endpoint.filter_support)
1885 filter_map |= BIT(data->endpoint_id);
1888 if (!ipa_filter_map_valid(ipa, filter_map))
1889 goto err_endpoint_exit;
1891 return filter_map; /* Non-zero bitmask */
1894 ipa_endpoint_exit(ipa);
1896 return 0; /* Error */