#include "sh_eth.h"
+/* CPU <-> EDMAC endian convert */
+static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return cpu_to_le32(x);
+ case EDMAC_BIG_ENDIAN:
+ return cpu_to_be32(x);
+ }
+ return x;
+}
+
+static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return le32_to_cpu(x);
+ case EDMAC_BIG_ENDIAN:
+ return be32_to_cpu(x);
+ }
+ return x;
+}
+
/*
* Program the hardware MAC address from dev->dev_addr.
*/
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
rxdesc->addr = (u32)skb->data & ~0x3UL;
- rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
+ rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_le32(RD_RDEL);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
memset(mdp->tx_ring, 0, tx_ringsize);
for (i = 0; i < TX_RING_SIZE; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
- txdesc->status = cpu_to_le32(TD_TFP);
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
txdesc->buffer_length = 0;
if (i == 0) {
- /* Rx descriptor address set */
+ /* Tx descriptor address set */
ctrl_outl((u32)txdesc, ioaddr + TDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
ctrl_outl((u32)txdesc, ioaddr + TDFAR);
}
}
- /* Rx descriptor address set */
+ /* Tx descriptor address set */
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
ctrl_outl((u32)txdesc, ioaddr + TDFXR);
ctrl_outl(0x1, ioaddr + TDFFR);
#endif
- txdesc->status |= cpu_to_le32(TD_TDLE);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
}
/* Get skb and descriptor buffer */
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
entry = mdp->dirty_tx % TX_RING_SIZE;
txdesc = &mdp->tx_ring[entry];
- if (txdesc->status & cpu_to_le32(TD_TACT))
+ if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
mdp->tx_skbuff[entry] = NULL;
freeNum++;
}
- txdesc->status = cpu_to_le32(TD_TFP);
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
if (entry >= TX_RING_SIZE - 1)
- txdesc->status |= cpu_to_le32(TD_TDLE);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
mdp->stats.tx_packets++;
mdp->stats.tx_bytes += txdesc->buffer_length;
u32 desc_status, reserve = 0;
rxdesc = &mdp->rx_ring[entry];
- while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
- desc_status = le32_to_cpu(rxdesc->status);
+ while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
if (--boguscnt < 0)
mdp->stats.rx_packets++;
mdp->stats.rx_bytes += pkt_len;
}
- rxdesc->status |= cpu_to_le32(RD_RACT);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % RX_RING_SIZE;
}
}
if (entry >= RX_RING_SIZE - 1)
rxdesc->status |=
- cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
rxdesc->status |=
- cpu_to_le32(RD_RACT | RD_RFP);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP);
}
/* Restart Rx engine if stopped. */
txdesc->buffer_length = skb->len;
if (entry >= TX_RING_SIZE - 1)
- txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
- txdesc->status |= cpu_to_le32(TD_TACT);
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
mdp->cur_tx++;
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp;
+ struct sh_eth_plat_data *pd;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdp = netdev_priv(ndev);
spin_lock_init(&mdp->lock);
+ pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
/* get PHY ID */
- mdp->phy_id = (int)pdev->dev.platform_data;
+ mdp->phy_id = pd->phy;
+ /* EDMAC endian */
+ mdp->edmac_endian = pd->edmac_endian;
/* set function */
ndev->open = sh_eth_open;
/* First device only init */
if (!devno) {
+#if defined(ARSTR)
/* reset device */
ctrl_outl(ARSTR_ARSTR, ARSTR);
mdelay(1);
+#endif
+#if defined(SH_TSU_ADDR)
/* TSU init (Init only)*/
sh_eth_tsu_init(SH_TSU_ADDR);
+#endif
}
/* network device register */
ndev->name, CARDNAME, (u32) ndev->base_addr);
for (i = 0; i < 5; i++)
- printk(KERN_INFO "%02X:", ndev->dev_addr[i]);
- printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
+ printk("%02X:", ndev->dev_addr[i]);
+ printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
platform_set_drvdata(pdev, ndev);
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <asm/sh_eth.h>
+
#define CARDNAME "sh-eth"
#define TX_TIMEOUT (5*HZ)
#define TX_RING_SIZE 64 /* Tx ring size */
#else /* CONFIG_CPU_SUBTYPE_SH7763 */
# define RX_OFFSET 2 /* skb offset */
+#ifndef CONFIG_CPU_SUBTYPE_SH7619
/* Chip base address */
# define SH_TSU_ADDR 0xA7000804
# define ARSTR 0xA7000800
-
+#endif
/* Chip Registers */
/* E-DMAC */
# define EDMR 0x0000
FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
};
#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
+#ifndef CONFIG_CPU_SUBTYPE_SH7619
#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
+#else
+#define FIFO_F_D_RFD (FCFTR_RFD0)
+#endif
/* Transfer descriptor bit */
enum TD_STS_BIT {
#ifdef CONFIG_CPU_SUBTYPE_SH7763
#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\
ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
+#elif CONFIG_CPU_SUBTYPE_SH7619
+#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF)
#else
-#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR ECMR_RXF | ECMR_TXF | ECMR_MCT)
+#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
#endif
/* ECSR */
/* FDR */
enum FIFO_SIZE_BIT {
+#ifndef CONFIG_CPU_SUBTYPE_SH7619
FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
+#else
+ FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001,
+#endif
};
enum phy_offsets {
PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
#endif
u32 addr; /* TD2 */
u32 pad1; /* padding data */
-};
+} __attribute__((aligned(2), packed));
/*
* The sh ether Rx buffer descriptors.
#endif
u32 addr; /* RD2 */
u32 pad0; /* padding data */
-};
+} __attribute__((aligned(2), packed));
struct sh_eth_private {
dma_addr_t rx_desc_dma;
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
u32 rx_buf_sz; /* Based on MTU+slack. */
+ int edmac_endian;
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */