4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
31 #include "net/checksum.h"
42 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
43 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
44 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
45 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
47 #define DBGBIT(x) (1<<DEBUG_##x)
48 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 #define DBGOUT(what, fmt, ...) do { \
51 if (debugflags & DBGBIT(what)) \
52 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
55 #define DBGOUT(what, fmt, ...) do {} while (0)
58 #define IOPORT_SIZE 0x40
59 #define PNPMMIO_SIZE 0x20000
60 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
64 * E1000_DEV_ID_82540EM works with Windows and Linux
65 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
66 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
67 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
70 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
73 * May need to specify additional MAC-to-PHY entries --
74 * Intel's Windows driver refuses to initialize unless they match
77 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
78 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
79 /* default to E1000_DEV_ID_82540EM */ 0xc20
82 typedef struct E1000State_st {
89 uint32_t mac_reg[0x8000];
90 uint16_t phy_reg[0x20];
91 uint16_t eeprom_data[64];
94 uint32_t rxbuf_min_shift;
97 unsigned char header[256];
98 unsigned char vlan_header[4];
99 /* Fields vlan and data must not be reordered or separated. */
100 unsigned char vlan[4];
101 unsigned char data[0x10000];
103 unsigned char sum_needed;
104 unsigned char vlan_needed;
118 char cptse; // current packet tse bit
122 uint32_t val_in; // shifted in from guest driver
129 QEMUTimer *autoneg_timer;
132 #define defreg(x) x = (E1000_##x>>2)
134 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
135 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
136 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
137 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
138 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
139 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
140 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
141 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
142 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
143 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
148 e1000_link_down(E1000State *s)
150 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
151 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
155 e1000_link_up(E1000State *s)
157 s->mac_reg[STATUS] |= E1000_STATUS_LU;
158 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
162 set_phy_ctrl(E1000State *s, int index, uint16_t val)
164 if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
165 s->nic->nc.link_down = true;
167 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
168 DBGOUT(PHY, "Start link auto negotiation\n");
169 qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
174 e1000_autoneg_timer(void *opaque)
176 E1000State *s = opaque;
177 s->nic->nc.link_down = false;
179 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
180 DBGOUT(PHY, "Auto negotiation is completed\n");
183 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
184 [PHY_CTRL] = set_phy_ctrl,
187 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
189 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
190 static const char phy_regcap[0x20] = {
191 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
192 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
193 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
194 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
195 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
196 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
199 static const uint16_t phy_reg_init[] = {
201 [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
202 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
203 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
204 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
205 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
206 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
209 static const uint32_t mac_reg_init[] = {
212 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
213 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
214 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
215 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
216 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
218 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
219 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
224 set_interrupt_cause(E1000State *s, int index, uint32_t val)
226 if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
228 val |= E1000_ICR_INT_ASSERTED;
230 s->mac_reg[ICR] = val;
231 s->mac_reg[ICS] = val;
232 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
236 set_ics(E1000State *s, int index, uint32_t val)
238 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
240 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
244 rxbufsize(uint32_t v)
246 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
247 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
248 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
250 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
252 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
254 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
256 case E1000_RCTL_SZ_1024:
258 case E1000_RCTL_SZ_512:
260 case E1000_RCTL_SZ_256:
266 static void e1000_reset(void *opaque)
268 E1000State *d = opaque;
270 qemu_del_timer(d->autoneg_timer);
271 memset(d->phy_reg, 0, sizeof d->phy_reg);
272 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
273 memset(d->mac_reg, 0, sizeof d->mac_reg);
274 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
275 d->rxbuf_min_shift = 1;
276 memset(&d->tx, 0, sizeof d->tx);
278 if (d->nic->nc.link_down) {
284 set_ctrl(E1000State *s, int index, uint32_t val)
286 /* RST is self clearing */
287 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
291 set_rx_control(E1000State *s, int index, uint32_t val)
293 s->mac_reg[RCTL] = val;
294 s->rxbuf_size = rxbufsize(val);
295 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
296 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
301 set_mdic(E1000State *s, int index, uint32_t val)
303 uint32_t data = val & E1000_MDIC_DATA_MASK;
304 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
306 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
307 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
308 else if (val & E1000_MDIC_OP_READ) {
309 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
310 if (!(phy_regcap[addr] & PHY_R)) {
311 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
312 val |= E1000_MDIC_ERROR;
314 val = (val ^ data) | s->phy_reg[addr];
315 } else if (val & E1000_MDIC_OP_WRITE) {
316 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
317 if (!(phy_regcap[addr] & PHY_W)) {
318 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
319 val |= E1000_MDIC_ERROR;
321 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
322 phyreg_writeops[addr](s, index, data);
324 s->phy_reg[addr] = data;
327 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
329 if (val & E1000_MDIC_INT_EN) {
330 set_ics(s, 0, E1000_ICR_MDAC);
335 get_eecd(E1000State *s, int index)
337 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
339 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
340 s->eecd_state.bitnum_out, s->eecd_state.reading);
341 if (!s->eecd_state.reading ||
342 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
343 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
344 ret |= E1000_EECD_DO;
349 set_eecd(E1000State *s, int index, uint32_t val)
351 uint32_t oldval = s->eecd_state.old_eecd;
353 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
354 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
355 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
357 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
358 s->eecd_state.val_in = 0;
359 s->eecd_state.bitnum_in = 0;
360 s->eecd_state.bitnum_out = 0;
361 s->eecd_state.reading = 0;
363 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
365 if (!(E1000_EECD_SK & val)) { // falling edge
366 s->eecd_state.bitnum_out++;
369 s->eecd_state.val_in <<= 1;
370 if (val & E1000_EECD_DI)
371 s->eecd_state.val_in |= 1;
372 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
373 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
374 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
375 EEPROM_READ_OPCODE_MICROWIRE);
377 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
378 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
379 s->eecd_state.reading);
383 flash_eerd_read(E1000State *s, int x)
385 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
387 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
388 return (s->mac_reg[EERD]);
390 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
391 return (E1000_EEPROM_RW_REG_DONE | r);
393 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
394 E1000_EEPROM_RW_REG_DONE | r);
398 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
405 sum = net_checksum_add(n-css, data+css);
406 cpu_to_be16wu((uint16_t *)(data + sloc),
407 net_checksum_finish(sum));
412 vlan_enabled(E1000State *s)
414 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
418 vlan_rx_filter_enabled(E1000State *s)
420 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
424 is_vlan_packet(E1000State *s, const uint8_t *buf)
426 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
427 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
431 is_vlan_txd(uint32_t txd_lower)
433 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
436 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
437 * fill it in, just pad descriptor length by 4 bytes unless guest
438 * told us to strip it off the packet. */
440 fcs_len(E1000State *s)
442 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
446 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
448 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
449 s->nic->nc.info->receive(&s->nic->nc, buf, size);
451 qemu_send_packet(&s->nic->nc, buf, size);
456 xmit_seg(E1000State *s)
459 unsigned int frames = s->tx.tso_frames, css, sofar, n;
460 struct e1000_tx *tp = &s->tx;
462 if (tp->tse && tp->cptse) {
464 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
465 frames, tp->size, css);
466 if (tp->ip) { // IPv4
467 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
469 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
470 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
472 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
475 len = tp->size - css;
476 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
478 sofar = frames * tp->mss;
479 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
480 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
481 if (tp->paylen - sofar > tp->mss)
482 tp->data[css + 13] &= ~9; // PSH, FIN
484 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
485 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
487 // add pseudo-header length before checksum calculation
488 sp = (uint16_t *)(tp->data + tp->tucso);
489 phsum = be16_to_cpup(sp) + len;
490 phsum = (phsum >> 16) + (phsum & 0xffff);
491 cpu_to_be16wu(sp, phsum);
496 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
497 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
498 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
499 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
500 if (tp->vlan_needed) {
501 memmove(tp->vlan, tp->data, 4);
502 memmove(tp->data, tp->data + 4, 8);
503 memcpy(tp->data + 8, tp->vlan_header, 4);
504 e1000_send_packet(s, tp->vlan, tp->size + 4);
506 e1000_send_packet(s, tp->data, tp->size);
509 n = s->mac_reg[TOTL];
510 if ((s->mac_reg[TOTL] += s->tx.size) < n)
515 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
517 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
518 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
519 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
520 unsigned int msh = 0xfffff, hdr = 0;
522 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
523 struct e1000_tx *tp = &s->tx;
525 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
526 op = le32_to_cpu(xp->cmd_and_length);
527 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
528 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
529 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
530 tp->tucss = xp->upper_setup.tcp_fields.tucss;
531 tp->tucso = xp->upper_setup.tcp_fields.tucso;
532 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
533 tp->paylen = op & 0xfffff;
534 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
535 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
536 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
537 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
538 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
540 if (tp->tucso == 0) { // this is probably wrong
541 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
542 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
545 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
548 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
550 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
556 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
557 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
559 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
560 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
561 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
562 le16_to_cpu(dp->upper.fields.special));
565 addr = le64_to_cpu(dp->buffer_addr);
566 if (tp->tse && tp->cptse) {
571 if (tp->size + bytes > msh)
572 bytes = msh - tp->size;
574 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
575 pci_dma_read(&s->dev, addr, tp->data + tp->size, bytes);
576 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
577 memmove(tp->header, tp->data, hdr);
582 memmove(tp->data, tp->header, hdr);
585 } while (split_size -= bytes);
586 } else if (!tp->tse && tp->cptse) {
587 // context descriptor TSE is not set, while data descriptor TSE is set
588 DBGOUT(TXERR, "TCP segmentation error\n");
590 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
591 pci_dma_read(&s->dev, addr, tp->data + tp->size, split_size);
592 tp->size += split_size;
595 if (!(txd_lower & E1000_TXD_CMD_EOP))
597 if (!(tp->tse && tp->cptse && tp->size < hdr))
607 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
609 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
611 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
613 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
614 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
615 dp->upper.data = cpu_to_le32(txd_upper);
616 pci_dma_write(&s->dev, base + ((char *)&dp->upper - (char *)dp),
617 &dp->upper, sizeof(dp->upper));
618 return E1000_ICR_TXDW;
621 static uint64_t tx_desc_base(E1000State *s)
623 uint64_t bah = s->mac_reg[TDBAH];
624 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
626 return (bah << 32) + bal;
630 start_xmit(E1000State *s)
633 struct e1000_tx_desc desc;
634 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
636 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
637 DBGOUT(TX, "tx disabled\n");
641 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
642 base = tx_desc_base(s) +
643 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
644 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
646 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
647 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
650 process_tx_desc(s, &desc);
651 cause |= txdesc_writeback(s, base, &desc);
653 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
656 * the following could happen only if guest sw assigns
657 * bogus values to TDT/TDLEN.
658 * there's nothing too intelligent we could do about this.
660 if (s->mac_reg[TDH] == tdh_start) {
661 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
662 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
666 set_ics(s, 0, cause);
670 receive_filter(E1000State *s, const uint8_t *buf, int size)
672 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
673 static const int mta_shift[] = {4, 3, 2, 0};
674 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
676 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
677 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
678 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
679 ((vid >> 5) & 0x7f));
680 if ((vfta & (1 << (vid & 0x1f))) == 0)
684 if (rctl & E1000_RCTL_UPE) // promiscuous
687 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
690 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
693 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
694 if (!(rp[1] & E1000_RAH_AV))
696 ra[0] = cpu_to_le32(rp[0]);
697 ra[1] = cpu_to_le32(rp[1]);
698 if (!memcmp(buf, (uint8_t *)ra, 6)) {
700 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
701 (int)(rp - s->mac_reg - RA)/2,
702 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
706 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
707 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
709 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
710 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
711 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
714 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
715 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
716 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
717 s->mac_reg[MTA + (f >> 5)]);
723 e1000_set_link_status(VLANClientState *nc)
725 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
726 uint32_t old_status = s->mac_reg[STATUS];
734 if (s->mac_reg[STATUS] != old_status)
735 set_ics(s, 0, E1000_ICR_LSC);
738 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
741 /* Fast-path short packets */
742 if (total_size <= s->rxbuf_size) {
743 return s->mac_reg[RDH] != s->mac_reg[RDT] || !s->check_rxov;
745 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
746 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
747 } else if (s->mac_reg[RDH] > s->mac_reg[RDT] || !s->check_rxov) {
748 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
749 s->mac_reg[RDT] - s->mac_reg[RDH];
753 return total_size <= bufs * s->rxbuf_size;
757 e1000_can_receive(VLANClientState *nc)
759 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
761 return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
764 static uint64_t rx_desc_base(E1000State *s)
766 uint64_t bah = s->mac_reg[RDBAH];
767 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
769 return (bah << 32) + bal;
773 e1000_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
775 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
776 struct e1000_rx_desc desc;
780 uint16_t vlan_special = 0;
781 uint8_t vlan_status = 0, vlan_offset = 0;
782 uint8_t min_buf[MIN_BUF_SIZE];
787 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
790 /* Pad to minimum Ethernet frame length */
791 if (size < sizeof(min_buf)) {
792 memcpy(min_buf, buf, size);
793 memset(&min_buf[size], 0, sizeof(min_buf) - size);
795 size = sizeof(min_buf);
798 if (!receive_filter(s, buf, size))
801 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
802 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
803 memmove((uint8_t *)buf + 4, buf, 12);
804 vlan_status = E1000_RXD_STAT_VP;
809 rdh_start = s->mac_reg[RDH];
811 total_size = size + fcs_len(s);
812 if (!e1000_has_rxbufs(s, total_size)) {
813 set_ics(s, 0, E1000_ICS_RXO);
817 desc_size = total_size - desc_offset;
818 if (desc_size > s->rxbuf_size) {
819 desc_size = s->rxbuf_size;
821 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
822 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
823 desc.special = vlan_special;
824 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
825 if (desc.buffer_addr) {
826 if (desc_offset < size) {
827 size_t copy_size = size - desc_offset;
828 if (copy_size > s->rxbuf_size) {
829 copy_size = s->rxbuf_size;
831 pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr),
832 buf + desc_offset + vlan_offset, copy_size);
834 desc_offset += desc_size;
835 desc.length = cpu_to_le16(desc_size);
836 if (desc_offset >= total_size) {
837 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
839 /* Guest zeroing out status is not a hardware requirement.
840 Clear EOP in case guest didn't do it. */
841 desc.status &= ~E1000_RXD_STAT_EOP;
843 } else { // as per intel docs; skip descriptors with null buf addr
844 DBGOUT(RX, "Null RX descriptor!!\n");
846 pci_dma_write(&s->dev, base, &desc, sizeof(desc));
848 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
851 /* see comment in start_xmit; same here */
852 if (s->mac_reg[RDH] == rdh_start) {
853 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
854 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
855 set_ics(s, 0, E1000_ICS_RXO);
858 } while (desc_offset < total_size);
862 /* TOR - Total Octets Received:
863 * This register includes bytes received in a packet from the <Destination
864 * Address> field through the <CRC> field, inclusively.
866 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
867 if (n < s->mac_reg[TORL])
869 s->mac_reg[TORL] = n;
872 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
873 rdt += s->mac_reg[RDLEN] / sizeof(desc);
874 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
876 n |= E1000_ICS_RXDMT0;
884 mac_readreg(E1000State *s, int index)
886 return s->mac_reg[index];
890 mac_icr_read(E1000State *s, int index)
892 uint32_t ret = s->mac_reg[ICR];
894 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
895 set_interrupt_cause(s, 0, 0);
900 mac_read_clr4(E1000State *s, int index)
902 uint32_t ret = s->mac_reg[index];
904 s->mac_reg[index] = 0;
909 mac_read_clr8(E1000State *s, int index)
911 uint32_t ret = s->mac_reg[index];
913 s->mac_reg[index] = 0;
914 s->mac_reg[index-1] = 0;
919 mac_writereg(E1000State *s, int index, uint32_t val)
921 s->mac_reg[index] = val;
925 set_rdt(E1000State *s, int index, uint32_t val)
928 s->mac_reg[index] = val & 0xffff;
932 set_16bit(E1000State *s, int index, uint32_t val)
934 s->mac_reg[index] = val & 0xffff;
938 set_dlen(E1000State *s, int index, uint32_t val)
940 s->mac_reg[index] = val & 0xfff80;
944 set_tctl(E1000State *s, int index, uint32_t val)
946 s->mac_reg[index] = val;
947 s->mac_reg[TDT] &= 0xffff;
952 set_icr(E1000State *s, int index, uint32_t val)
954 DBGOUT(INTERRUPT, "set_icr %x\n", val);
955 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
959 set_imc(E1000State *s, int index, uint32_t val)
961 s->mac_reg[IMS] &= ~val;
966 set_ims(E1000State *s, int index, uint32_t val)
968 s->mac_reg[IMS] |= val;
972 #define getreg(x) [x] = mac_readreg
973 static uint32_t (*macreg_readops[])(E1000State *, int) = {
974 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
975 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
976 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
977 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
978 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
979 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
980 getreg(TDLEN), getreg(RDLEN),
982 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
983 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
984 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
985 [CRCERRS ... MPC] = &mac_readreg,
986 [RA ... RA+31] = &mac_readreg,
987 [MTA ... MTA+127] = &mac_readreg,
988 [VFTA ... VFTA+127] = &mac_readreg,
990 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
992 #define putreg(x) [x] = mac_writereg
993 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
994 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
995 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
996 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
997 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
998 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
999 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1000 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1001 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1002 [RA ... RA+31] = &mac_writereg,
1003 [MTA ... MTA+127] = &mac_writereg,
1004 [VFTA ... VFTA+127] = &mac_writereg,
1007 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1010 e1000_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val,
1013 E1000State *s = opaque;
1014 unsigned int index = (addr & 0x1ffff) >> 2;
1016 if (index < NWRITEOPS && macreg_writeops[index]) {
1017 macreg_writeops[index](s, index, val);
1018 } else if (index < NREADOPS && macreg_readops[index]) {
1019 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1021 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1027 e1000_mmio_read(void *opaque, target_phys_addr_t addr, unsigned size)
1029 E1000State *s = opaque;
1030 unsigned int index = (addr & 0x1ffff) >> 2;
1032 if (index < NREADOPS && macreg_readops[index])
1034 return macreg_readops[index](s, index);
1036 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1040 static const MemoryRegionOps e1000_mmio_ops = {
1041 .read = e1000_mmio_read,
1042 .write = e1000_mmio_write,
1043 .endianness = DEVICE_LITTLE_ENDIAN,
1045 .min_access_size = 4,
1046 .max_access_size = 4,
1050 static uint64_t e1000_io_read(void *opaque, target_phys_addr_t addr,
1053 E1000State *s = opaque;
1059 static void e1000_io_write(void *opaque, target_phys_addr_t addr,
1060 uint64_t val, unsigned size)
1062 E1000State *s = opaque;
1067 static const MemoryRegionOps e1000_io_ops = {
1068 .read = e1000_io_read,
1069 .write = e1000_io_write,
1070 .endianness = DEVICE_LITTLE_ENDIAN,
1073 static bool is_version_1(void *opaque, int version_id)
1075 return version_id == 1;
1078 static const VMStateDescription vmstate_e1000 = {
1081 .minimum_version_id = 1,
1082 .minimum_version_id_old = 1,
1083 .fields = (VMStateField []) {
1084 VMSTATE_PCI_DEVICE(dev, E1000State),
1085 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1086 VMSTATE_UNUSED(4), /* Was mmio_base. */
1087 VMSTATE_UINT32(rxbuf_size, E1000State),
1088 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1089 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1090 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1091 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1092 VMSTATE_UINT16(eecd_state.reading, E1000State),
1093 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1094 VMSTATE_UINT8(tx.ipcss, E1000State),
1095 VMSTATE_UINT8(tx.ipcso, E1000State),
1096 VMSTATE_UINT16(tx.ipcse, E1000State),
1097 VMSTATE_UINT8(tx.tucss, E1000State),
1098 VMSTATE_UINT8(tx.tucso, E1000State),
1099 VMSTATE_UINT16(tx.tucse, E1000State),
1100 VMSTATE_UINT32(tx.paylen, E1000State),
1101 VMSTATE_UINT8(tx.hdr_len, E1000State),
1102 VMSTATE_UINT16(tx.mss, E1000State),
1103 VMSTATE_UINT16(tx.size, E1000State),
1104 VMSTATE_UINT16(tx.tso_frames, E1000State),
1105 VMSTATE_UINT8(tx.sum_needed, E1000State),
1106 VMSTATE_INT8(tx.ip, E1000State),
1107 VMSTATE_INT8(tx.tcp, E1000State),
1108 VMSTATE_BUFFER(tx.header, E1000State),
1109 VMSTATE_BUFFER(tx.data, E1000State),
1110 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1111 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1112 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1113 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1114 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1115 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1116 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1117 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1118 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1119 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1120 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1121 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1122 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1123 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1124 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1125 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1126 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1127 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1128 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1129 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1130 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1131 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1132 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1133 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1134 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1135 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1136 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1137 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1138 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1139 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1140 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1141 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1142 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1143 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1144 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1145 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1146 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1147 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1148 VMSTATE_UINT32(mac_reg[VET], E1000State),
1149 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1150 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1151 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1152 VMSTATE_END_OF_LIST()
1156 static const uint16_t e1000_eeprom_template[64] = {
1157 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1158 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
1159 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1160 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1161 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1162 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1163 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1164 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1170 e1000_mmio_setup(E1000State *d)
1173 const uint32_t excluded_regs[] = {
1174 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1175 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1178 memory_region_init_io(&d->mmio, &e1000_mmio_ops, d, "e1000-mmio",
1180 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1181 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1182 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1183 excluded_regs[i+1] - excluded_regs[i] - 4);
1184 memory_region_init_io(&d->io, &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1188 e1000_cleanup(VLANClientState *nc)
1190 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
1196 pci_e1000_uninit(PCIDevice *dev)
1198 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1200 qemu_del_timer(d->autoneg_timer);
1201 qemu_free_timer(d->autoneg_timer);
1202 memory_region_destroy(&d->mmio);
1203 memory_region_destroy(&d->io);
1204 qemu_del_vlan_client(&d->nic->nc);
1208 static NetClientInfo net_e1000_info = {
1209 .type = NET_CLIENT_TYPE_NIC,
1210 .size = sizeof(NICState),
1211 .can_receive = e1000_can_receive,
1212 .receive = e1000_receive,
1213 .cleanup = e1000_cleanup,
1214 .link_status_changed = e1000_set_link_status,
1217 static int pci_e1000_init(PCIDevice *pci_dev)
1219 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1221 uint16_t checksum = 0;
1225 pci_conf = d->dev.config;
1227 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1228 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1230 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1232 e1000_mmio_setup(d);
1234 pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1236 pci_register_bar(&d->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1238 memmove(d->eeprom_data, e1000_eeprom_template,
1239 sizeof e1000_eeprom_template);
1240 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1241 macaddr = d->conf.macaddr.a;
1242 for (i = 0; i < 3; i++)
1243 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1244 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1245 checksum += d->eeprom_data[i];
1246 checksum = (uint16_t) EEPROM_SUM - checksum;
1247 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1249 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1250 object_get_typename(OBJECT(d)), d->dev.qdev.id, d);
1252 qemu_format_nic_info_str(&d->nic->nc, macaddr);
1254 add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
1256 d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
1261 static void qdev_e1000_reset(DeviceState *dev)
1263 E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
1267 static Property e1000_properties[] = {
1268 DEFINE_NIC_PROPERTIES(E1000State, conf),
1269 DEFINE_PROP_END_OF_LIST(),
1272 static void e1000_class_init(ObjectClass *klass, void *data)
1274 DeviceClass *dc = DEVICE_CLASS(klass);
1275 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1277 k->init = pci_e1000_init;
1278 k->exit = pci_e1000_uninit;
1279 k->romfile = "pxe-e1000.rom";
1280 k->vendor_id = PCI_VENDOR_ID_INTEL;
1281 k->device_id = E1000_DEVID;
1283 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1284 dc->desc = "Intel Gigabit Ethernet";
1285 dc->reset = qdev_e1000_reset;
1286 dc->vmsd = &vmstate_e1000;
1287 dc->props = e1000_properties;
1290 static TypeInfo e1000_info = {
1292 .parent = TYPE_PCI_DEVICE,
1293 .instance_size = sizeof(E1000State),
1294 .class_init = e1000_class_init,
1297 static void e1000_register_types(void)
1299 type_register_static(&e1000_info);
1302 type_init(e1000_register_types)