4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
36 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
37 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
38 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
39 DEBUG_RXFILTER, DEBUG_NOTYET,
41 #define DBGBIT(x) (1<<DEBUG_##x)
42 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
44 #define DBGOUT(what, fmt, ...) do { \
45 if (debugflags & DBGBIT(what)) \
46 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
49 #define DBGOUT(what, fmt, ...) do {} while (0)
52 #define IOPORT_SIZE 0x40
53 #define PNPMMIO_SIZE 0x20000
57 * E1000_DEV_ID_82540EM works with Windows and Linux
58 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
59 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
60 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
63 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
66 * May need to specify additional MAC-to-PHY entries --
67 * Intel's Windows driver refuses to initialize unless they match
70 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
71 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
72 /* default to E1000_DEV_ID_82540EM */ 0xc20
75 typedef struct E1000State_st {
80 uint32_t mac_reg[0x8000];
81 uint16_t phy_reg[0x20];
82 uint16_t eeprom_data[64];
85 uint32_t rxbuf_min_shift;
88 unsigned char header[256];
89 unsigned char vlan_header[4];
90 unsigned char vlan[4];
91 unsigned char data[0x10000];
93 unsigned char sum_needed;
94 unsigned char vlan_needed;
108 char cptse; // current packet tse bit
112 uint32_t val_in; // shifted in from guest driver
120 #define defreg(x) x = (E1000_##x>>2)
122 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
123 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
124 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
125 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
126 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
127 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
128 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
129 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
130 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
131 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
135 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
136 static const char phy_regcap[0x20] = {
137 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
138 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
139 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
140 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
141 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
142 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
146 ioport_map(PCIDevice *pci_dev, int region_num, uint32_t addr,
147 uint32_t size, int type)
149 DBGOUT(IO, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr, size);
153 update_irqs(E1000State *s)
155 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
159 set_interrupt_cause(E1000State *s, int index, uint32_t val)
162 val |= E1000_ICR_INT_ASSERTED;
163 s->mac_reg[ICR] = val;
168 set_ics(E1000State *s, int index, uint32_t val)
170 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
172 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
176 rxbufsize(uint32_t v)
178 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
179 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
180 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
182 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
184 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
186 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
188 case E1000_RCTL_SZ_1024:
190 case E1000_RCTL_SZ_512:
192 case E1000_RCTL_SZ_256:
199 set_ctrl(E1000State *s, int index, uint32_t val)
201 /* RST is self clearing */
202 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
206 set_rx_control(E1000State *s, int index, uint32_t val)
208 s->mac_reg[RCTL] = val;
209 s->rxbuf_size = rxbufsize(val);
210 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
211 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
216 set_mdic(E1000State *s, int index, uint32_t val)
218 uint32_t data = val & E1000_MDIC_DATA_MASK;
219 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
221 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
222 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
223 else if (val & E1000_MDIC_OP_READ) {
224 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
225 if (!(phy_regcap[addr] & PHY_R)) {
226 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
227 val |= E1000_MDIC_ERROR;
229 val = (val ^ data) | s->phy_reg[addr];
230 } else if (val & E1000_MDIC_OP_WRITE) {
231 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
232 if (!(phy_regcap[addr] & PHY_W)) {
233 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
234 val |= E1000_MDIC_ERROR;
236 s->phy_reg[addr] = data;
238 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
239 set_ics(s, 0, E1000_ICR_MDAC);
243 get_eecd(E1000State *s, int index)
245 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
247 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
248 s->eecd_state.bitnum_out, s->eecd_state.reading);
249 if (!s->eecd_state.reading ||
250 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
251 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
252 ret |= E1000_EECD_DO;
257 set_eecd(E1000State *s, int index, uint32_t val)
259 uint32_t oldval = s->eecd_state.old_eecd;
261 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
262 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
263 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
265 if (!(E1000_EECD_SK & val)) { // falling edge
266 s->eecd_state.bitnum_out++;
269 if (!(val & E1000_EECD_CS)) { // rising, no CS (EEPROM reset)
270 memset(&s->eecd_state, 0, sizeof s->eecd_state);
273 s->eecd_state.val_in <<= 1;
274 if (val & E1000_EECD_DI)
275 s->eecd_state.val_in |= 1;
276 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
277 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
278 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
279 EEPROM_READ_OPCODE_MICROWIRE);
281 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
282 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
283 s->eecd_state.reading);
287 flash_eerd_read(E1000State *s, int x)
289 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
291 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
293 return (s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
294 E1000_EEPROM_RW_REG_DONE | r;
298 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
305 sum = net_checksum_add(n-css, data+css);
306 cpu_to_be16wu((uint16_t *)(data + sloc),
307 net_checksum_finish(sum));
312 vlan_enabled(E1000State *s)
314 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
318 vlan_rx_filter_enabled(E1000State *s)
320 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
324 is_vlan_packet(E1000State *s, const uint8_t *buf)
326 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
327 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
331 is_vlan_txd(uint32_t txd_lower)
333 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
337 xmit_seg(E1000State *s)
340 unsigned int frames = s->tx.tso_frames, css, sofar, n;
341 struct e1000_tx *tp = &s->tx;
343 if (tp->tse && tp->cptse) {
345 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
346 frames, tp->size, css);
347 if (tp->ip) { // IPv4
348 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
350 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
351 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
353 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
356 len = tp->size - css;
357 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
359 sofar = frames * tp->mss;
360 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
361 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
362 if (tp->paylen - sofar > tp->mss)
363 tp->data[css + 13] &= ~9; // PSH, FIN
365 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
366 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
367 // add pseudo-header length before checksum calculation
368 sp = (uint16_t *)(tp->data + tp->tucso);
369 cpu_to_be16wu(sp, be16_to_cpup(sp) + len);
374 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
375 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
376 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
377 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
378 if (tp->vlan_needed) {
379 memmove(tp->vlan, tp->data, 12);
380 memcpy(tp->data + 8, tp->vlan_header, 4);
381 qemu_send_packet(s->vc, tp->vlan, tp->size + 4);
383 qemu_send_packet(s->vc, tp->data, tp->size);
386 n = s->mac_reg[TOTL];
387 if ((s->mac_reg[TOTL] += s->tx.size) < n)
392 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
394 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
395 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
396 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
397 unsigned int msh = 0xfffff, hdr = 0;
399 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
400 struct e1000_tx *tp = &s->tx;
402 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
403 op = le32_to_cpu(xp->cmd_and_length);
404 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
405 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
406 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
407 tp->tucss = xp->upper_setup.tcp_fields.tucss;
408 tp->tucso = xp->upper_setup.tcp_fields.tucso;
409 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
410 tp->paylen = op & 0xfffff;
411 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
412 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
413 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
414 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
415 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
417 if (tp->tucso == 0) { // this is probably wrong
418 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
419 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
422 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
424 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
425 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
430 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
431 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
433 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
434 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
435 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
436 le16_to_cpu(dp->upper.fields.special));
439 addr = le64_to_cpu(dp->buffer_addr);
440 if (tp->tse && tp->cptse) {
445 if (tp->size + bytes > msh)
446 bytes = msh - tp->size;
447 cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
448 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
449 memmove(tp->header, tp->data, hdr);
454 memmove(tp->data, tp->header, hdr);
457 } while (split_size -= bytes);
458 } else if (!tp->tse && tp->cptse) {
459 // context descriptor TSE is not set, while data descriptor TSE is set
460 DBGOUT(TXERR, "TCP segmentaion Error\n");
462 cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
463 tp->size += split_size;
466 if (!(txd_lower & E1000_TXD_CMD_EOP))
468 if (!(tp->tse && tp->cptse && tp->size < hdr))
478 txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
480 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
482 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
484 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
485 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
486 dp->upper.data = cpu_to_le32(txd_upper);
487 cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
488 (void *)&dp->upper, sizeof(dp->upper));
489 return E1000_ICR_TXDW;
493 start_xmit(E1000State *s)
495 target_phys_addr_t base;
496 struct e1000_tx_desc desc;
497 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
499 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
500 DBGOUT(TX, "tx disabled\n");
504 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
505 base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
506 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
507 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
509 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
510 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
513 process_tx_desc(s, &desc);
514 cause |= txdesc_writeback(base, &desc);
516 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
519 * the following could happen only if guest sw assigns
520 * bogus values to TDT/TDLEN.
521 * there's nothing too intelligent we could do about this.
523 if (s->mac_reg[TDH] == tdh_start) {
524 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
525 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
529 set_ics(s, 0, cause);
533 receive_filter(E1000State *s, const uint8_t *buf, int size)
535 static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
536 static int mta_shift[] = {4, 3, 2, 0};
537 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
539 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
540 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
541 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
542 ((vid >> 5) & 0x7f));
543 if ((vfta & (1 << (vid & 0x1f))) == 0)
547 if (rctl & E1000_RCTL_UPE) // promiscuous
550 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
553 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
556 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
557 if (!(rp[1] & E1000_RAH_AV))
559 ra[0] = cpu_to_le32(rp[0]);
560 ra[1] = cpu_to_le32(rp[1]);
561 if (!memcmp(buf, (uint8_t *)ra, 6)) {
563 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
564 (int)(rp - s->mac_reg - RA)/2,
565 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
569 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
570 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
572 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
573 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
574 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
577 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
578 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
579 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
580 s->mac_reg[MTA + (f >> 5)]);
586 e1000_set_link_status(VLANClientState *vc)
588 E1000State *s = vc->opaque;
589 uint32_t old_status = s->mac_reg[STATUS];
592 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
594 s->mac_reg[STATUS] |= E1000_STATUS_LU;
596 if (s->mac_reg[STATUS] != old_status)
597 set_ics(s, 0, E1000_ICR_LSC);
601 e1000_can_receive(void *opaque)
603 E1000State *s = opaque;
605 return (s->mac_reg[RCTL] & E1000_RCTL_EN);
609 e1000_receive(void *opaque, const uint8_t *buf, int size)
611 E1000State *s = opaque;
612 struct e1000_rx_desc desc;
613 target_phys_addr_t base;
616 uint16_t vlan_special = 0;
617 uint8_t vlan_status = 0, vlan_offset = 0;
619 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
622 if (size > s->rxbuf_size) {
623 DBGOUT(RX, "packet too large for buffers (%d > %d)\n", size,
628 if (!receive_filter(s, buf, size))
631 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
632 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
633 memmove((void *)(buf + 4), buf, 12);
634 vlan_status = E1000_RXD_STAT_VP;
639 rdh_start = s->mac_reg[RDH];
640 size += 4; // for the header
642 if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) {
643 set_ics(s, 0, E1000_ICS_RXO);
646 base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
647 sizeof(desc) * s->mac_reg[RDH];
648 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
649 desc.special = vlan_special;
650 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
651 if (desc.buffer_addr) {
652 cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
653 (void *)(buf + vlan_offset), size);
654 desc.length = cpu_to_le16(size);
655 desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
656 } else // as per intel docs; skip descriptors with null buf addr
657 DBGOUT(RX, "Null RX descriptor!!\n");
658 cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
660 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
663 /* see comment in start_xmit; same here */
664 if (s->mac_reg[RDH] == rdh_start) {
665 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
666 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
667 set_ics(s, 0, E1000_ICS_RXO);
670 } while (desc.buffer_addr == 0);
674 n = s->mac_reg[TORL];
675 if ((s->mac_reg[TORL] += size) < n)
679 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
680 rdt += s->mac_reg[RDLEN] / sizeof(desc);
681 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
683 n |= E1000_ICS_RXDMT0;
689 mac_readreg(E1000State *s, int index)
691 return s->mac_reg[index];
695 mac_icr_read(E1000State *s, int index)
697 uint32_t ret = s->mac_reg[ICR];
699 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
700 set_interrupt_cause(s, 0, 0);
705 mac_read_clr4(E1000State *s, int index)
707 uint32_t ret = s->mac_reg[index];
709 s->mac_reg[index] = 0;
714 mac_read_clr8(E1000State *s, int index)
716 uint32_t ret = s->mac_reg[index];
718 s->mac_reg[index] = 0;
719 s->mac_reg[index-1] = 0;
724 mac_writereg(E1000State *s, int index, uint32_t val)
726 s->mac_reg[index] = val;
730 set_rdt(E1000State *s, int index, uint32_t val)
733 s->mac_reg[index] = val & 0xffff;
737 set_16bit(E1000State *s, int index, uint32_t val)
739 s->mac_reg[index] = val & 0xffff;
743 set_dlen(E1000State *s, int index, uint32_t val)
745 s->mac_reg[index] = val & 0xfff80;
749 set_tctl(E1000State *s, int index, uint32_t val)
751 s->mac_reg[index] = val;
752 s->mac_reg[TDT] &= 0xffff;
757 set_icr(E1000State *s, int index, uint32_t val)
759 DBGOUT(INTERRUPT, "set_icr %x\n", val);
760 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
764 set_imc(E1000State *s, int index, uint32_t val)
766 s->mac_reg[IMS] &= ~val;
771 set_ims(E1000State *s, int index, uint32_t val)
773 s->mac_reg[IMS] |= val;
777 #define getreg(x) [x] = mac_readreg
778 static uint32_t (*macreg_readops[])(E1000State *, int) = {
779 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
780 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
781 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
782 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
783 getreg(RDH), getreg(RDT), getreg(VET),
785 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
786 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
787 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
788 [CRCERRS ... MPC] = &mac_readreg,
789 [RA ... RA+31] = &mac_readreg,
790 [MTA ... MTA+127] = &mac_readreg,
791 [VFTA ... VFTA+127] = &mac_readreg,
793 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
795 #define putreg(x) [x] = mac_writereg
796 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
797 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
798 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
799 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
800 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
801 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
802 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
803 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
804 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
805 [RA ... RA+31] = &mac_writereg,
806 [MTA ... MTA+127] = &mac_writereg,
807 [VFTA ... VFTA+127] = &mac_writereg,
809 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
812 e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
814 E1000State *s = opaque;
815 unsigned int index = (addr & 0x1ffff) >> 2;
817 #ifdef TARGET_WORDS_BIGENDIAN
820 if (index < NWRITEOPS && macreg_writeops[index])
821 macreg_writeops[index](s, index, val);
822 else if (index < NREADOPS && macreg_readops[index])
823 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
825 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
830 e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
832 // emulate hw without byte enables: no RMW
833 e1000_mmio_writel(opaque, addr & ~3,
834 (val & 0xffff) << (8*(addr & 3)));
838 e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
840 // emulate hw without byte enables: no RMW
841 e1000_mmio_writel(opaque, addr & ~3,
842 (val & 0xff) << (8*(addr & 3)));
846 e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
848 E1000State *s = opaque;
849 unsigned int index = (addr & 0x1ffff) >> 2;
851 if (index < NREADOPS && macreg_readops[index])
853 uint32_t val = macreg_readops[index](s, index);
854 #ifdef TARGET_WORDS_BIGENDIAN
859 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
864 e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
866 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
867 (8 * (addr & 3))) & 0xff;
871 e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
873 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
874 (8 * (addr & 3))) & 0xffff;
877 static const int mac_regtosave[] = {
878 CTRL, EECD, EERD, GPRC, GPTC, ICR, ICS, IMC, IMS,
879 LEDCTL, MANC, MDIC, MPC, PBA, RCTL, RDBAH, RDBAL, RDH,
880 RDLEN, RDT, STATUS, SWSM, TCTL, TDBAH, TDBAL, TDH, TDLEN,
881 TDT, TORH, TORL, TOTH, TOTL, TPR, TPT, TXDCTL, WUFC,
884 enum { MAC_NSAVE = ARRAY_SIZE(mac_regtosave) };
886 static const struct {
889 } mac_regarraystosave[] = { {32, RA}, {128, MTA}, {128, VFTA} };
890 enum { MAC_NARRAYS = ARRAY_SIZE(mac_regarraystosave) };
893 nic_save(QEMUFile *f, void *opaque)
895 E1000State *s = (E1000State *)opaque;
898 pci_device_save(&s->dev, f);
900 qemu_put_be32s(f, &s->rxbuf_size);
901 qemu_put_be32s(f, &s->rxbuf_min_shift);
902 qemu_put_be32s(f, &s->eecd_state.val_in);
903 qemu_put_be16s(f, &s->eecd_state.bitnum_in);
904 qemu_put_be16s(f, &s->eecd_state.bitnum_out);
905 qemu_put_be16s(f, &s->eecd_state.reading);
906 qemu_put_be32s(f, &s->eecd_state.old_eecd);
907 qemu_put_8s(f, &s->tx.ipcss);
908 qemu_put_8s(f, &s->tx.ipcso);
909 qemu_put_be16s(f, &s->tx.ipcse);
910 qemu_put_8s(f, &s->tx.tucss);
911 qemu_put_8s(f, &s->tx.tucso);
912 qemu_put_be16s(f, &s->tx.tucse);
913 qemu_put_be32s(f, &s->tx.paylen);
914 qemu_put_8s(f, &s->tx.hdr_len);
915 qemu_put_be16s(f, &s->tx.mss);
916 qemu_put_be16s(f, &s->tx.size);
917 qemu_put_be16s(f, &s->tx.tso_frames);
918 qemu_put_8s(f, &s->tx.sum_needed);
919 qemu_put_s8s(f, &s->tx.ip);
920 qemu_put_s8s(f, &s->tx.tcp);
921 qemu_put_buffer(f, s->tx.header, sizeof s->tx.header);
922 qemu_put_buffer(f, s->tx.data, sizeof s->tx.data);
923 for (i = 0; i < 64; i++)
924 qemu_put_be16s(f, s->eeprom_data + i);
925 for (i = 0; i < 0x20; i++)
926 qemu_put_be16s(f, s->phy_reg + i);
927 for (i = 0; i < MAC_NSAVE; i++)
928 qemu_put_be32s(f, s->mac_reg + mac_regtosave[i]);
929 for (i = 0; i < MAC_NARRAYS; i++)
930 for (j = 0; j < mac_regarraystosave[i].size; j++)
932 s->mac_reg + mac_regarraystosave[i].array0 + j);
936 nic_load(QEMUFile *f, void *opaque, int version_id)
938 E1000State *s = (E1000State *)opaque;
941 if ((ret = pci_device_load(&s->dev, f)) < 0)
944 qemu_get_sbe32s(f, &i); /* once some unused instance id */
945 qemu_get_be32(f); /* Ignored. Was mmio_base. */
946 qemu_get_be32s(f, &s->rxbuf_size);
947 qemu_get_be32s(f, &s->rxbuf_min_shift);
948 qemu_get_be32s(f, &s->eecd_state.val_in);
949 qemu_get_be16s(f, &s->eecd_state.bitnum_in);
950 qemu_get_be16s(f, &s->eecd_state.bitnum_out);
951 qemu_get_be16s(f, &s->eecd_state.reading);
952 qemu_get_be32s(f, &s->eecd_state.old_eecd);
953 qemu_get_8s(f, &s->tx.ipcss);
954 qemu_get_8s(f, &s->tx.ipcso);
955 qemu_get_be16s(f, &s->tx.ipcse);
956 qemu_get_8s(f, &s->tx.tucss);
957 qemu_get_8s(f, &s->tx.tucso);
958 qemu_get_be16s(f, &s->tx.tucse);
959 qemu_get_be32s(f, &s->tx.paylen);
960 qemu_get_8s(f, &s->tx.hdr_len);
961 qemu_get_be16s(f, &s->tx.mss);
962 qemu_get_be16s(f, &s->tx.size);
963 qemu_get_be16s(f, &s->tx.tso_frames);
964 qemu_get_8s(f, &s->tx.sum_needed);
965 qemu_get_s8s(f, &s->tx.ip);
966 qemu_get_s8s(f, &s->tx.tcp);
967 qemu_get_buffer(f, s->tx.header, sizeof s->tx.header);
968 qemu_get_buffer(f, s->tx.data, sizeof s->tx.data);
969 for (i = 0; i < 64; i++)
970 qemu_get_be16s(f, s->eeprom_data + i);
971 for (i = 0; i < 0x20; i++)
972 qemu_get_be16s(f, s->phy_reg + i);
973 for (i = 0; i < MAC_NSAVE; i++)
974 qemu_get_be32s(f, s->mac_reg + mac_regtosave[i]);
975 for (i = 0; i < MAC_NARRAYS; i++)
976 for (j = 0; j < mac_regarraystosave[i].size; j++)
978 s->mac_reg + mac_regarraystosave[i].array0 + j);
983 static const uint16_t e1000_eeprom_template[64] = {
984 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
985 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
986 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
987 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
988 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
989 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
990 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
991 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
994 static const uint16_t phy_reg_init[] = {
995 [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
996 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
997 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
998 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
999 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
1000 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
1003 static const uint32_t mac_reg_init[] = {
1006 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
1007 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
1008 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
1009 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
1010 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
1012 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
1013 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
1019 static CPUWriteMemoryFunc *e1000_mmio_write[] = {
1020 e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
1023 static CPUReadMemoryFunc *e1000_mmio_read[] = {
1024 e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
1028 e1000_mmio_map(PCIDevice *pci_dev, int region_num,
1029 uint32_t addr, uint32_t size, int type)
1031 E1000State *d = (E1000State *)pci_dev;
1033 const uint32_t excluded_regs[] = {
1034 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1035 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1039 DBGOUT(MMIO, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr, size);
1041 cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
1042 qemu_register_coalesced_mmio(addr, excluded_regs[0]);
1044 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1045 qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
1046 excluded_regs[i + 1] -
1047 excluded_regs[i] - 4);
1051 e1000_cleanup(VLANClientState *vc)
1053 E1000State *d = vc->opaque;
1055 unregister_savevm("e1000", d);
1059 pci_e1000_uninit(PCIDevice *dev)
1061 E1000State *d = (E1000State *) dev;
1063 cpu_unregister_io_memory(d->mmio_index);
1068 static void e1000_reset(void *opaque)
1070 E1000State *d = opaque;
1072 memset(d->phy_reg, 0, sizeof d->phy_reg);
1073 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
1074 memset(d->mac_reg, 0, sizeof d->mac_reg);
1075 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
1076 d->rxbuf_min_shift = 1;
1077 memset(&d->tx, 0, sizeof d->tx);
1081 static void pci_e1000_init(PCIDevice *pci_dev)
1083 E1000State *d = (E1000State *)pci_dev;
1085 uint16_t checksum = 0;
1086 static const char info_str[] = "e1000";
1090 pci_conf = d->dev.config;
1092 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
1093 pci_config_set_device_id(pci_conf, E1000_DEVID);
1094 *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407);
1095 *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010);
1096 pci_conf[0x08] = 0x03;
1097 pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
1098 pci_conf[0x0c] = 0x10;
1100 pci_conf[0x3d] = 1; // interrupt pin 0
1102 d->mmio_index = cpu_register_io_memory(0, e1000_mmio_read,
1103 e1000_mmio_write, d);
1105 pci_register_io_region((PCIDevice *)d, 0, PNPMMIO_SIZE,
1106 PCI_ADDRESS_SPACE_MEM, e1000_mmio_map);
1108 pci_register_io_region((PCIDevice *)d, 1, IOPORT_SIZE,
1109 PCI_ADDRESS_SPACE_IO, ioport_map);
1111 memmove(d->eeprom_data, e1000_eeprom_template,
1112 sizeof e1000_eeprom_template);
1113 qdev_get_macaddr(&d->dev.qdev, macaddr);
1114 for (i = 0; i < 3; i++)
1115 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1116 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1117 checksum += d->eeprom_data[i];
1118 checksum = (uint16_t) EEPROM_SUM - checksum;
1119 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1121 d->vc = qdev_get_vlan_client(&d->dev.qdev,
1122 e1000_receive, e1000_can_receive,
1124 d->vc->link_status_changed = e1000_set_link_status;
1126 qemu_format_nic_info_str(d->vc, macaddr);
1128 register_savevm(info_str, -1, 2, nic_save, nic_load, d);
1129 d->dev.unregister = pci_e1000_uninit;
1130 qemu_register_reset(e1000_reset, 0, d);
1134 static void e1000_register_devices(void)
1136 pci_qdev_register("e1000", sizeof(E1000State), pci_e1000_init);
1139 device_init(e1000_register_devices)