static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
{
- struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
+ struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
return true;
if (gq->gptp) {
dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
- (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
- gq->ts_ring = NULL;
+ (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
+ gq->rx_ring = NULL;
} else {
dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) *
- (gq->ring_size + 1), gq->ring, gq->ring_dma);
- gq->ring = NULL;
+ (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
+ gq->tx_ring = NULL;
}
if (!gq->dir_tx) {
rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
if (gptp)
- gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
+ gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
else
- gq->ring = dma_alloc_coherent(ndev->dev.parent,
- sizeof(struct rswitch_ext_desc) *
- (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- if (!gq->ts_ring && !gq->ring)
+ gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ if (!gq->rx_ring && !gq->tx_ring)
goto out;
i = gq->index / 32;
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
struct rswitch_ext_desc *desc;
struct rswitch_desc *linkfix;
dma_addr_t dma_addr;
int i;
- memset(gq->ring, 0, tx_ring_size);
- for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
+ memset(gq->tx_ring, 0, ring_size);
+ for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[i]->data, PKT_BUF_SZ,
err:
if (!gq->dir_tx) {
- for (i--, desc = gq->ring; i >= 0; i--, desc++) {
+ for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
DMA_FROM_DEVICE);
return -ENOMEM;
}
-static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
- struct rswitch_gwca_queue *gq,
- int start_index, int num)
+static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq,
+ int start_index, int num)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_ext_ts_desc *desc;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
- desc = &gq->ts_ring[index];
+ desc = &gq->rx_ring[index];
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[index]->data, PKT_BUF_SZ,
if (!gq->dir_tx) {
for (i--; i >= 0; i--) {
index = (i + start_index) % gq->ring_size;
- desc = &gq->ts_ring[index];
+ desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
DMA_FROM_DEVICE);
return -ENOMEM;
}
-static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
- struct rswitch_private *priv,
- struct rswitch_gwca_queue *gq)
+static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
+ struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq)
{
- int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
struct rswitch_ext_ts_desc *desc;
struct rswitch_desc *linkfix;
int err;
- memset(gq->ts_ring, 0, tx_ts_ring_size);
- err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
+ memset(gq->rx_ring, 0, ring_size);
+ err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
if (err < 0)
return err;
- desc = &gq->ts_ring[gq->ring_size]; /* Last */
+ desc = &gq->rx_ring[gq->ring_size]; /* Last */
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
desc->desc.die_dt = DT_LINKFIX;
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
- return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
+ return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
}
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
boguscnt = min_t(int, gq->ring_size, *quota);
limit = boguscnt;
- desc = &gq->ts_ring[gq->cur];
+ desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
if (--boguscnt < 0)
break;
rdev->ndev->stats.rx_bytes += pkt_len;
gq->cur = rswitch_next_queue_index(gq, true, 1);
- desc = &gq->ts_ring[gq->cur];
+ desc = &gq->rx_ring[gq->cur];
}
num = rswitch_get_num_cur_queues(gq);
ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
if (ret < 0)
goto err;
- ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
+ ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
if (ret < 0)
goto err;
gq->dirty = rswitch_next_queue_index(gq, false, num);
for (; rswitch_get_num_cur_queues(gq) > 0;
gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->ring[gq->dirty];
+ desc = &gq->tx_ring[gq->dirty];
if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
break;
}
gq->skbs[gq->cur] = skb;
- desc = &gq->ring[gq->cur];
+ desc = &gq->tx_ring[gq->cur];
rswitch_desc_set_dptr(&desc->desc, dma_addr);
desc->desc.info_ds = cpu_to_le16(skb->len);