Merge branch 'kmap_atomic' of git://github.com/congwang/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Mar 2012 16:40:26 +0000 (09:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 21 Mar 2012 16:40:26 +0000 (09:40 -0700)
Pull kmap_atomic cleanup from Cong Wang.

It's been in -next for a long time, and it gets rid of the (no longer
used) second argument to k[un]map_atomic().

Fix up a few trivial conflicts in various drivers, and do an "evil
merge" to catch some new uses that have come in since Cong's tree.

* 'kmap_atomic' of git://github.com/congwang/linux: (59 commits)
  feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal
  highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename]
  drbd: remove the second argument of k[un]map_atomic()
  zcache: remove the second argument of k[un]map_atomic()
  gma500: remove the second argument of k[un]map_atomic()
  dm: remove the second argument of k[un]map_atomic()
  tomoyo: remove the second argument of k[un]map_atomic()
  sunrpc: remove the second argument of k[un]map_atomic()
  rds: remove the second argument of k[un]map_atomic()
  net: remove the second argument of k[un]map_atomic()
  mm: remove the second argument of k[un]map_atomic()
  lib: remove the second argument of k[un]map_atomic()
  power: remove the second argument of k[un]map_atomic()
  kdb: remove the second argument of k[un]map_atomic()
  udf: remove the second argument of k[un]map_atomic()
  ubifs: remove the second argument of k[un]map_atomic()
  squashfs: remove the second argument of k[un]map_atomic()
  reiserfs: remove the second argument of k[un]map_atomic()
  ocfs2: remove the second argument of k[un]map_atomic()
  ntfs: remove the second argument of k[un]map_atomic()
  ...

15 files changed:
1  2 
Documentation/feature-removal-schedule.txt
arch/x86/crypto/aesni-intel_glue.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/sun/cassini.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/storvsc_drv.c
drivers/staging/ramster/xvmalloc.c
drivers/staging/ramster/zcache-main.c
drivers/staging/zcache/zcache-main.c
drivers/staging/zram/zram_drv.c
fs/exec.c
fs/namei.c
net/rds/ib_recv.c
net/rds/iw_recv.c

@@@ -527,11 -527,8 +527,19 @@@ Who:     Nicolas Ferre <nicolas.ferre@atmel
  
  ----------------------------
  
 +What: Low Performance USB Block driver ("CONFIG_BLK_DEV_UB")
 +When: 3.6
 +Why:  This driver provides support for USB storage devices like "USB
 +      sticks". As of now, it is deactivated in Debian, Fedora and
 +        Ubuntu. All current users can switch over to usb-storage
 +        (CONFIG_USB_STORAGE) which only drawback is the additional SCSI
 +        stack.
 +Who:  Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
++
++----------------------------
++
+ What: kmap_atomic(page, km_type)
+ When: 3.5
+ Why:  The old kmap_atomic() with two arguments is deprecated, we only
+       keep it for backward compatibility for few cycles and then drop it.
+ Who:  Cong Wang <amwang@redhat.com>
@@@ -28,7 -28,6 +28,7 @@@
  #include <crypto/aes.h>
  #include <crypto/cryptd.h>
  #include <crypto/ctr.h>
 +#include <asm/cpu_device_id.h>
  #include <asm/i387.h>
  #include <asm/aes.h>
  #include <crypto/scatterwalk.h>
@@@ -1108,12 -1107,12 +1108,12 @@@ static int __driver_rfc4106_encrypt(str
                one_entry_in_sg = 1;
                scatterwalk_start(&src_sg_walk, req->src);
                scatterwalk_start(&assoc_sg_walk, req->assoc);
-               src = scatterwalk_map(&src_sg_walk, 0);
-               assoc = scatterwalk_map(&assoc_sg_walk, 0);
+               src = scatterwalk_map(&src_sg_walk);
+               assoc = scatterwalk_map(&assoc_sg_walk);
                dst = src;
                if (unlikely(req->src != req->dst)) {
                        scatterwalk_start(&dst_sg_walk, req->dst);
-                       dst = scatterwalk_map(&dst_sg_walk, 0);
+                       dst = scatterwalk_map(&dst_sg_walk);
                }
  
        } else {
         * back to the packet. */
        if (one_entry_in_sg) {
                if (unlikely(req->src != req->dst)) {
-                       scatterwalk_unmap(dst, 0);
+                       scatterwalk_unmap(dst);
                        scatterwalk_done(&dst_sg_walk, 0, 0);
                }
-               scatterwalk_unmap(src, 0);
-               scatterwalk_unmap(assoc, 0);
+               scatterwalk_unmap(src);
+               scatterwalk_unmap(assoc);
                scatterwalk_done(&src_sg_walk, 0, 0);
                scatterwalk_done(&assoc_sg_walk, 0, 0);
        } else {
@@@ -1190,12 -1189,12 +1190,12 @@@ static int __driver_rfc4106_decrypt(str
                one_entry_in_sg = 1;
                scatterwalk_start(&src_sg_walk, req->src);
                scatterwalk_start(&assoc_sg_walk, req->assoc);
-               src = scatterwalk_map(&src_sg_walk, 0);
-               assoc = scatterwalk_map(&assoc_sg_walk, 0);
+               src = scatterwalk_map(&src_sg_walk);
+               assoc = scatterwalk_map(&assoc_sg_walk);
                dst = src;
                if (unlikely(req->src != req->dst)) {
                        scatterwalk_start(&dst_sg_walk, req->dst);
-                       dst = scatterwalk_map(&dst_sg_walk, 0);
+                       dst = scatterwalk_map(&dst_sg_walk);
                }
  
        } else {
  
        if (one_entry_in_sg) {
                if (unlikely(req->src != req->dst)) {
-                       scatterwalk_unmap(dst, 0);
+                       scatterwalk_unmap(dst);
                        scatterwalk_done(&dst_sg_walk, 0, 0);
                }
-               scatterwalk_unmap(src, 0);
-               scatterwalk_unmap(assoc, 0);
+               scatterwalk_unmap(src);
+               scatterwalk_unmap(assoc);
                scatterwalk_done(&src_sg_walk, 0, 0);
                scatterwalk_done(&assoc_sg_walk, 0, 0);
        } else {
@@@ -1254,19 -1253,14 +1254,19 @@@ static struct crypto_alg __rfc4106_alg 
  };
  #endif
  
 +
 +static const struct x86_cpu_id aesni_cpu_id[] = {
 +      X86_FEATURE_MATCH(X86_FEATURE_AES),
 +      {}
 +};
 +MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
 +
  static int __init aesni_init(void)
  {
        int err;
  
 -      if (!cpu_has_aes) {
 -              printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
 +      if (!x86_match_cpu(aesni_cpu_id))
                return -ENODEV;
 -      }
  
        if ((err = crypto_fpu_init()))
                goto fpu_err;
@@@ -730,8 -730,10 +730,8 @@@ static void e1000_dump_eeprom(struct e1
        eeprom.offset = 0;
  
        data = kmalloc(eeprom.len, GFP_KERNEL);
 -      if (!data) {
 -              pr_err("Unable to allocate memory to dump EEPROM data\n");
 +      if (!data)
                return;
 -      }
  
        ops->get_eeprom(netdev, &eeprom, data);
  
@@@ -1067,11 -1069,8 +1067,11 @@@ static int __devinit e1000_probe(struc
           (hw->mac_type != e1000_82547))
                netdev->hw_features |= NETIF_F_TSO;
  
 +      netdev->priv_flags |= IFF_SUPP_NOFCS;
 +
        netdev->features |= netdev->hw_features;
        netdev->hw_features |= NETIF_F_RXCSUM;
 +      netdev->hw_features |= NETIF_F_RXFCS;
  
        if (pci_using_dac) {
                netdev->features |= NETIF_F_HIGHDMA;
@@@ -2695,7 -2694,6 +2695,7 @@@ set_itr_now
  #define E1000_TX_FLAGS_VLAN           0x00000002
  #define E1000_TX_FLAGS_TSO            0x00000004
  #define E1000_TX_FLAGS_IPV4           0x00000008
 +#define E1000_TX_FLAGS_NO_FCS         0x00000010
  #define E1000_TX_FLAGS_VLAN_MASK      0xffff0000
  #define E1000_TX_FLAGS_VLAN_SHIFT     16
  
@@@ -2997,9 -2995,6 +2997,9 @@@ static void e1000_tx_queue(struct e1000
                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
        }
  
 +      if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
 +              txd_lower &= ~(E1000_TXD_CMD_IFCS);
 +
        i = tx_ring->next_to_use;
  
        while (count--) {
  
        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  
 +      /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
 +      if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
 +              tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
 +
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@@ -3233,9 -3224,6 +3233,9 @@@ static netdev_tx_t e1000_xmit_frame(str
        if (likely(skb->protocol == htons(ETH_P_IP)))
                tx_flags |= E1000_TX_FLAGS_IPV4;
  
 +      if (unlikely(skb->no_fcs))
 +              tx_flags |= E1000_TX_FLAGS_NO_FCS;
 +
        count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
                             nr_frags, mss);
  
        return NETDEV_TX_OK;
  }
  
 +#define NUM_REGS 38 /* 1 based count */
 +static void e1000_regdump(struct e1000_adapter *adapter)
 +{
 +      struct e1000_hw *hw = &adapter->hw;
 +      u32 regs[NUM_REGS];
 +      u32 *regs_buff = regs;
 +      int i = 0;
 +
 +      static const char * const reg_name[] = {
 +              "CTRL",  "STATUS",
 +              "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
 +              "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
 +              "TIDV", "TXDCTL", "TADV", "TARC0",
 +              "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
 +              "TXDCTL1", "TARC1",
 +              "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
 +              "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
 +              "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
 +      };
 +
 +      regs_buff[0]  = er32(CTRL);
 +      regs_buff[1]  = er32(STATUS);
 +
 +      regs_buff[2]  = er32(RCTL);
 +      regs_buff[3]  = er32(RDLEN);
 +      regs_buff[4]  = er32(RDH);
 +      regs_buff[5]  = er32(RDT);
 +      regs_buff[6]  = er32(RDTR);
 +
 +      regs_buff[7]  = er32(TCTL);
 +      regs_buff[8]  = er32(TDBAL);
 +      regs_buff[9]  = er32(TDBAH);
 +      regs_buff[10] = er32(TDLEN);
 +      regs_buff[11] = er32(TDH);
 +      regs_buff[12] = er32(TDT);
 +      regs_buff[13] = er32(TIDV);
 +      regs_buff[14] = er32(TXDCTL);
 +      regs_buff[15] = er32(TADV);
 +      regs_buff[16] = er32(TARC0);
 +
 +      regs_buff[17] = er32(TDBAL1);
 +      regs_buff[18] = er32(TDBAH1);
 +      regs_buff[19] = er32(TDLEN1);
 +      regs_buff[20] = er32(TDH1);
 +      regs_buff[21] = er32(TDT1);
 +      regs_buff[22] = er32(TXDCTL1);
 +      regs_buff[23] = er32(TARC1);
 +      regs_buff[24] = er32(CTRL_EXT);
 +      regs_buff[25] = er32(ERT);
 +      regs_buff[26] = er32(RDBAL0);
 +      regs_buff[27] = er32(RDBAH0);
 +      regs_buff[28] = er32(TDFH);
 +      regs_buff[29] = er32(TDFT);
 +      regs_buff[30] = er32(TDFHS);
 +      regs_buff[31] = er32(TDFTS);
 +      regs_buff[32] = er32(TDFPC);
 +      regs_buff[33] = er32(RDFH);
 +      regs_buff[34] = er32(RDFT);
 +      regs_buff[35] = er32(RDFHS);
 +      regs_buff[36] = er32(RDFTS);
 +      regs_buff[37] = er32(RDFPC);
 +
 +      pr_info("Register dump\n");
 +      for (i = 0; i < NUM_REGS; i++)
 +              pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
 +}
 +
 +/*
 + * e1000_dump: Print registers, tx ring and rx ring
 + */
 +static void e1000_dump(struct e1000_adapter *adapter)
 +{
 +      /* this code doesn't handle multiple rings */
 +      struct e1000_tx_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_rx_ring *rx_ring = adapter->rx_ring;
 +      int i;
 +
 +      if (!netif_msg_hw(adapter))
 +              return;
 +
 +      /* Print Registers */
 +      e1000_regdump(adapter);
 +
 +      /*
 +       * transmit dump
 +       */
 +      pr_info("TX Desc ring0 dump\n");
 +
 +      /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 +       *
 +       * Legacy Transmit Descriptor
 +       *   +--------------------------------------------------------------+
 +       * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
 +       *   +--------------------------------------------------------------+
 +       * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
 +       *   +--------------------------------------------------------------+
 +       *   63       48 47        36 35    32 31     24 23    16 15        0
 +       *
 +       * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
 +       *   63      48 47    40 39       32 31             16 15    8 7      0
 +       *   +----------------------------------------------------------------+
 +       * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
 +       *   +----------------------------------------------------------------+
 +       * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
 +       *   +----------------------------------------------------------------+
 +       *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
 +       *
 +       * Extended Data Descriptor (DTYP=0x1)
 +       *   +----------------------------------------------------------------+
 +       * 0 |                     Buffer Address [63:0]                      |
 +       *   +----------------------------------------------------------------+
 +       * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
 +       *   +----------------------------------------------------------------+
 +       *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 +       */
 +      pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
 +      pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
 +
 +      if (!netif_msg_tx_done(adapter))
 +              goto rx_ring_summary;
 +
 +      for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 +              struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
 +              struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
 +              struct my_u { u64 a; u64 b; };
 +              struct my_u *u = (struct my_u *)tx_desc;
 +              const char *type;
 +
 +              if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
 +                      type = "NTC/U";
 +              else if (i == tx_ring->next_to_use)
 +                      type = "NTU";
 +              else if (i == tx_ring->next_to_clean)
 +                      type = "NTC";
 +              else
 +                      type = "";
 +
 +              pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
 +                      ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
 +                      le64_to_cpu(u->a), le64_to_cpu(u->b),
 +                      (u64)buffer_info->dma, buffer_info->length,
 +                      buffer_info->next_to_watch,
 +                      (u64)buffer_info->time_stamp, buffer_info->skb, type);
 +      }
 +
 +rx_ring_summary:
 +      /*
 +       * receive dump
 +       */
 +      pr_info("\nRX Desc ring dump\n");
 +
 +      /* Legacy Receive Descriptor Format
 +       *
 +       * +-----------------------------------------------------+
 +       * |                Buffer Address [63:0]                |
 +       * +-----------------------------------------------------+
 +       * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
 +       * +-----------------------------------------------------+
 +       * 63       48 47    40 39      32 31         16 15      0
 +       */
 +      pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
 +
 +      if (!netif_msg_rx_status(adapter))
 +              goto exit;
 +
 +      for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 +              struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
 +              struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
 +              struct my_u { u64 a; u64 b; };
 +              struct my_u *u = (struct my_u *)rx_desc;
 +              const char *type;
 +
 +              if (i == rx_ring->next_to_use)
 +                      type = "NTU";
 +              else if (i == rx_ring->next_to_clean)
 +                      type = "NTC";
 +              else
 +                      type = "";
 +
 +              pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
 +                      i, le64_to_cpu(u->a), le64_to_cpu(u->b),
 +                      (u64)buffer_info->dma, buffer_info->skb, type);
 +      } /* for */
 +
 +      /* dump the descriptor caches */
 +      /* rx */
 +      pr_info("Rx descriptor cache in 64bit format\n");
 +      for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
 +              pr_info("R%04X: %08X|%08X %08X|%08X\n",
 +                      i,
 +                      readl(adapter->hw.hw_addr + i+4),
 +                      readl(adapter->hw.hw_addr + i),
 +                      readl(adapter->hw.hw_addr + i+12),
 +                      readl(adapter->hw.hw_addr + i+8));
 +      }
 +      /* tx */
 +      pr_info("Tx descriptor cache in 64bit format\n");
 +      for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
 +              pr_info("T%04X: %08X|%08X %08X|%08X\n",
 +                      i,
 +                      readl(adapter->hw.hw_addr + i+4),
 +                      readl(adapter->hw.hw_addr + i),
 +                      readl(adapter->hw.hw_addr + i+12),
 +                      readl(adapter->hw.hw_addr + i+8));
 +      }
 +exit:
 +      return;
 +}
 +
  /**
   * e1000_tx_timeout - Respond to a Tx Hang
   * @netdev: network interface device structure
@@@ -3483,7 -3262,6 +3483,7 @@@ static void e1000_reset_task(struct wor
  
        if (test_bit(__E1000_DOWN, &adapter->flags))
                return;
 +      e_err(drv, "Reset adapter\n");
        e1000_reinit_safe(adapter);
  }
  
@@@ -3901,7 -3679,6 +3901,7 @@@ static bool e1000_clean_tx_irq(struct e
                                eop,
                                jiffies,
                                eop_desc->upper.fields.status);
 +                      e1000_dump(adapter);
                        netif_stop_queue(netdev);
                }
        }
@@@ -4101,11 -3878,9 +4101,9 @@@ static bool e1000_clean_jumbo_rx_irq(st
                                if (length <= copybreak &&
                                    skb_tailroom(skb) >= length) {
                                        u8 *vaddr;
-                                       vaddr = kmap_atomic(buffer_info->page,
-                                                           KM_SKB_DATA_SOFTIRQ);
+                                       vaddr = kmap_atomic(buffer_info->page);
                                        memcpy(skb_tail_pointer(skb), vaddr, length);
-                                       kunmap_atomic(vaddr,
-                                                     KM_SKB_DATA_SOFTIRQ);
+                                       kunmap_atomic(vaddr);
                                        /* re-use the page, so don't erase
                                         * buffer_info->page */
                                        skb_put(skb, length);
                                  ((u32)(rx_desc->errors) << 24),
                                  le16_to_cpu(rx_desc->csum), skb);
  
 -              pskb_trim(skb, skb->len - 4);
 -
 -              /* probably a little skewed due to removing CRC */
 -              total_rx_bytes += skb->len;
 +              total_rx_bytes += (skb->len - 4); /* don't count FCS */
 +              if (likely(!(netdev->features & NETIF_F_RXFCS)))
 +                      pskb_trim(skb, skb->len - 4);
                total_rx_packets++;
  
                /* eth type trans needs skb->data to point to something */
@@@ -4281,15 -4057,14 +4279,15 @@@ static bool e1000_clean_rx_irq(struct e
                        }
                }
  
 -              /* adjust length to remove Ethernet CRC, this must be
 -               * done after the TBI_ACCEPT workaround above */
 -              length -= 4;
 -
 -              /* probably a little skewed due to removing CRC */
 -              total_rx_bytes += length;
 +              total_rx_bytes += (length - 4); /* don't count FCS */
                total_rx_packets++;
  
 +              if (likely(!(netdev->features & NETIF_F_RXFCS)))
 +                      /* adjust length to remove Ethernet CRC, this must be
 +                       * done after the TBI_ACCEPT workaround above
 +                       */
 +                      length -= 4;
 +
                e1000_check_copybreak(netdev, buffer_info, length, &skb);
  
                skb_put(skb, length);
@@@ -1,7 -1,7 +1,7 @@@
  /*******************************************************************************
  
    Intel PRO/1000 Linux driver
 -  Copyright(c) 1999 - 2011 Intel Corporation.
 +  Copyright(c) 1999 - 2012 Intel Corporation.
  
    This program is free software; you can redistribute it and/or modify it
    under the terms and conditions of the GNU General Public License,
@@@ -56,7 -56,7 +56,7 @@@
  
  #define DRV_EXTRAVERSION "-k"
  
 -#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
 +#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
  char e1000e_driver_name[] = "e1000e";
  const char e1000e_driver_version[] = DRV_VERSION;
  
@@@ -137,7 -137,7 +137,7 @@@ static const struct e1000_reg_info e100
        {E1000_TDFPC, "TDFPC"},
  
        /* List Terminator */
 -      {}
 +      {0, NULL}
  };
  
  /*
@@@ -183,18 -183,18 +183,18 @@@ static void e1000e_dump(struct e1000_ad
        struct e1000_ring *tx_ring = adapter->tx_ring;
        struct e1000_tx_desc *tx_desc;
        struct my_u0 {
 -              u64 a;
 -              u64 b;
 +              __le64 a;
 +              __le64 b;
        } *u0;
        struct e1000_buffer *buffer_info;
        struct e1000_ring *rx_ring = adapter->rx_ring;
        union e1000_rx_desc_packet_split *rx_desc_ps;
        union e1000_rx_desc_extended *rx_desc;
        struct my_u1 {
 -              u64 a;
 -              u64 b;
 -              u64 c;
 -              u64 d;
 +              __le64 a;
 +              __le64 b;
 +              __le64 c;
 +              __le64 d;
        } *u1;
        u32 staterr;
        int i = 0;
  
        /* Print Tx Ring Summary */
        if (!netdev || !netif_running(netdev))
 -              goto exit;
 +              return;
  
        dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
        pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
@@@ -308,7 -308,7 +308,7 @@@ rx_ring_summary
  
        /* Print Rx Ring */
        if (!netif_msg_rx_status(adapter))
 -              goto exit;
 +              return;
  
        dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
        switch (adapter->rx_ps_pages) {
                        }
                }
        }
 -
 -exit:
 -      return;
  }
  
  /**
@@@ -484,27 -487,22 +484,27 @@@ static void e1000_receive_skb(struct e1
  
  /**
   * e1000_rx_checksum - Receive Checksum Offload
 - * @adapter:     board private structure
 - * @status_err:  receive descriptor status and error fields
 - * @csum:     receive descriptor csum field
 - * @sk_buff:     socket buffer with received data
 + * @adapter: board private structure
 + * @status_err: receive descriptor status and error fields
 + * @csum: receive descriptor csum field
 + * @sk_buff: socket buffer with received data
   **/
  static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 -                            u32 csum, struct sk_buff *skb)
 +                            __le16 csum, struct sk_buff *skb)
  {
        u16 status = (u16)status_err;
        u8 errors = (u8)(status_err >> 24);
  
        skb_checksum_none_assert(skb);
  
 +      /* Rx checksum disabled */
 +      if (!(adapter->netdev->features & NETIF_F_RXCSUM))
 +              return;
 +
        /* Ignore Checksum bit is set */
        if (status & E1000_RXD_STAT_IXSM)
                return;
 +
        /* TCP/UDP checksum error bit is set */
        if (errors & E1000_RXD_ERR_TCPE) {
                /* let the stack verify checksum errors */
                 * Hardware complements the payload checksum, so we undo it
                 * and then put the value in host order for further stack use.
                 */
 -              __sum16 sum = (__force __sum16)htons(csum);
 +              __sum16 sum = (__force __sum16)swab16((__force u16)csum);
                skb->csum = csum_unfold(~sum);
                skb->ip_summed = CHECKSUM_COMPLETE;
        }
   * which has bit 24 set while ME is accessing Host CSR registers, wait
   * if it is set and try again a number of times.
   **/
 -static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
 +static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
                                        unsigned int i)
  {
        unsigned int j = 0;
        return 0;
  }
  
 -static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
 +static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
  {
 -      u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct e1000_hw *hw = &adapter->hw;
  
 -      if (e1000e_update_tail_wa(hw, tail, i)) {
 +      if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) {
                u32 rctl = er32(RCTL);
                ew32(RCTL, rctl & ~E1000_RCTL_EN);
                e_err("ME firmware caused invalid RDT - resetting\n");
        }
  }
  
 -static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
 +static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
  {
 -      u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct e1000_hw *hw = &adapter->hw;
  
 -      if (e1000e_update_tail_wa(hw, tail, i)) {
 +      if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) {
                u32 tctl = er32(TCTL);
                ew32(TCTL, tctl & ~E1000_TCTL_EN);
                e_err("ME firmware caused invalid TDT - resetting\n");
  
  /**
   * e1000_alloc_rx_buffers - Replace used receive buffers
 - * @adapter: address of board private structure
 + * @rx_ring: Rx descriptor ring
   **/
 -static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 +static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
                                   int cleaned_count, gfp_t gfp)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        union e1000_rx_desc_extended *rx_desc;
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
@@@ -646,9 -644,9 +646,9 @@@ map_skb
                         */
                        wmb();
                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 -                              e1000e_update_rdt_wa(adapter, i);
 +                              e1000e_update_rdt_wa(rx_ring, i);
                        else
 -                              writel(i, adapter->hw.hw_addr + rx_ring->tail);
 +                              writel(i, rx_ring->tail);
                }
                i++;
                if (i == rx_ring->count)
  
  /**
   * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
 - * @adapter: address of board private structure
 + * @rx_ring: Rx descriptor ring
   **/
 -static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 +static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
                                      int cleaned_count, gfp_t gfp)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union e1000_rx_desc_packet_split *rx_desc;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        struct e1000_buffer *buffer_info;
        struct e1000_ps_page *ps_page;
        struct sk_buff *skb;
                         */
                        wmb();
                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 -                              e1000e_update_rdt_wa(adapter, i << 1);
 +                              e1000e_update_rdt_wa(rx_ring, i << 1);
                        else
 -                              writel(i << 1,
 -                                     adapter->hw.hw_addr + rx_ring->tail);
 +                              writel(i << 1, rx_ring->tail);
                }
  
                i++;
@@@ -766,17 -765,17 +766,17 @@@ no_buffers
  
  /**
   * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
 - * @adapter: address of board private structure
 + * @rx_ring: Rx descriptor ring
   * @cleaned_count: number of buffers to allocate this pass
   **/
  
 -static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 +static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
                                         int cleaned_count, gfp_t gfp)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union e1000_rx_desc_extended *rx_desc;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        struct e1000_buffer *buffer_info;
        struct sk_buff *skb;
        unsigned int i;
@@@ -835,33 -834,26 +835,33 @@@ check_page
                 * such as IA-64). */
                wmb();
                if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 -                      e1000e_update_rdt_wa(adapter, i);
 +                      e1000e_update_rdt_wa(rx_ring, i);
                else
 -                      writel(i, adapter->hw.hw_addr + rx_ring->tail);
 +                      writel(i, rx_ring->tail);
        }
  }
  
 +static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
 +                               struct sk_buff *skb)
 +{
 +      if (netdev->features & NETIF_F_RXHASH)
 +              skb->rxhash = le32_to_cpu(rss);
 +}
 +
  /**
 - * e1000_clean_rx_irq - Send received data up the network stack; legacy
 - * @adapter: board private structure
 + * e1000_clean_rx_irq - Send received data up the network stack
 + * @rx_ring: Rx descriptor ring
   *
   * the return value indicates whether actual cleaning was done, there
   * is no guarantee that everything was cleaned
   **/
 -static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 -                             int *work_done, int work_to_do)
 +static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
 +                             int work_to_do)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_hw *hw = &adapter->hw;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        union e1000_rx_desc_extended *rx_desc, *next_rxd;
        struct e1000_buffer *buffer_info, *next_buffer;
        u32 length, staterr;
                        goto next_desc;
                }
  
 -              if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
 +              if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
 +                           !(netdev->features & NETIF_F_RXALL))) {
                        /* recycle */
                        buffer_info->skb = skb;
                        goto next_desc;
                }
  
                /* adjust length to remove Ethernet CRC */
 -              if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
 -                      length -= 4;
 +              if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
 +                      /* If configured to store CRC, don't subtract FCS,
 +                       * but keep the FCS bytes out of the total_rx_bytes
 +                       * counter
 +                       */
 +                      if (netdev->features & NETIF_F_RXFCS)
 +                              total_rx_bytes -= 4;
 +                      else
 +                              length -= 4;
 +              }
  
                total_rx_bytes += length;
                total_rx_packets++;
  
                /* Receive Checksum Offload */
                e1000_rx_checksum(adapter, staterr,
 -                                le16_to_cpu(rx_desc->wb.lower.hi_dword.
 -                                            csum_ip.csum), skb);
 +                                rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
 +
 +              e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
  
                e1000_receive_skb(adapter, netdev, skb, staterr,
                                  rx_desc->wb.upper.vlan);
@@@ -986,7 -968,7 +986,7 @@@ next_desc
  
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
 -                      adapter->alloc_rx_buf(adapter, cleaned_count,
 +                      adapter->alloc_rx_buf(rx_ring, cleaned_count,
                                              GFP_ATOMIC);
                        cleaned_count = 0;
                }
  
        cleaned_count = e1000_desc_unused(rx_ring);
        if (cleaned_count)
 -              adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
 +              adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
  
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
        return cleaned;
  }
  
 -static void e1000_put_txbuf(struct e1000_adapter *adapter,
 -                           struct e1000_buffer *buffer_info)
 +static void e1000_put_txbuf(struct e1000_ring *tx_ring,
 +                          struct e1000_buffer *buffer_info)
  {
 +      struct e1000_adapter *adapter = tx_ring->adapter;
 +
        if (buffer_info->dma) {
                if (buffer_info->mapped_as_page)
                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
@@@ -1083,8 -1063,8 +1083,8 @@@ static void e1000_print_hw_hang(struct 
              "PHY 1000BASE-T Status  <%x>\n"
              "PHY Extended Status    <%x>\n"
              "PCI Status             <%x>\n",
 -            readl(adapter->hw.hw_addr + tx_ring->head),
 -            readl(adapter->hw.hw_addr + tx_ring->tail),
 +            readl(tx_ring->head),
 +            readl(tx_ring->tail),
              tx_ring->next_to_use,
              tx_ring->next_to_clean,
              tx_ring->buffer_info[eop].time_stamp,
  
  /**
   * e1000_clean_tx_irq - Reclaim resources after transmit completes
 - * @adapter: board private structure
 + * @tx_ring: Tx descriptor ring
   *
   * the return value indicates whether actual cleaning was done, there
   * is no guarantee that everything was cleaned
   **/
 -static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 +static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
  {
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
        struct e1000_tx_desc *tx_desc, *eop_desc;
        struct e1000_buffer *buffer_info;
        unsigned int i, eop;
                                }
                        }
  
 -                      e1000_put_txbuf(adapter, buffer_info);
 +                      e1000_put_txbuf(tx_ring, buffer_info);
                        tx_desc->upper.data = 0;
  
                        i++;
  
  /**
   * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
 - * @adapter: board private structure
 + * @rx_ring: Rx descriptor ring
   *
   * the return value indicates whether actual cleaning was done, there
   * is no guarantee that everything was cleaned
   **/
 -static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 -                                int *work_done, int work_to_do)
 +static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
 +                                int work_to_do)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct e1000_hw *hw = &adapter->hw;
        union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        struct e1000_buffer *buffer_info, *next_buffer;
        struct e1000_ps_page *ps_page;
        struct sk_buff *skb;
                        goto next_desc;
                }
  
 -              if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
 +              if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
 +                           !(netdev->features & NETIF_F_RXALL))) {
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
                skb_put(skb, length);
  
                {
 -              /*
 -               * this looks ugly, but it seems compiler issues make it
 -               * more efficient than reusing j
 -               */
 -              int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
 -
 -              /*
 -               * page alloc/put takes too long and effects small packet
 -               * throughput, so unsplit small packets and save the alloc/put
 -               * only valid in softirq (napi) context to call kmap_*
 -               */
 -              if (l1 && (l1 <= copybreak) &&
 -                  ((length + l1) <= adapter->rx_ps_bsize0)) {
 -                      u8 *vaddr;
 -
 -                      ps_page = &buffer_info->ps_pages[0];
 +                      /*
 +                       * this looks ugly, but it seems compiler issues make
 +                       * it more efficient than reusing j
 +                       */
 +                      int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
  
                        /*
 -                       * there is no documentation about how to call
 -                       * kmap_atomic, so we can't hold the mapping
 -                       * very long
 +                       * page alloc/put takes too long and effects small
 +                       * packet throughput, so unsplit small packets and
 +                       * save the alloc/put only valid in softirq (napi)
 +                       * context to call kmap_*
                         */
 -                      dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
 -                                              PAGE_SIZE, DMA_FROM_DEVICE);
 -                      vaddr = kmap_atomic(ps_page->page);
 -                      memcpy(skb_tail_pointer(skb), vaddr, l1);
 -                      kunmap_atomic(vaddr);
 -                      dma_sync_single_for_device(&pdev->dev, ps_page->dma,
 -                                                 PAGE_SIZE, DMA_FROM_DEVICE);
 -
 -                      /* remove the CRC */
 -                      if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
 -                              l1 -= 4;
 -
 -                      skb_put(skb, l1);
 -                      goto copydone;
 -              } /* if */
 +                      if (l1 && (l1 <= copybreak) &&
 +                          ((length + l1) <= adapter->rx_ps_bsize0)) {
 +                              u8 *vaddr;
 +
 +                              ps_page = &buffer_info->ps_pages[0];
 +
 +                              /*
 +                               * there is no documentation about how to call
 +                               * kmap_atomic, so we can't hold the mapping
 +                               * very long
 +                               */
 +                              dma_sync_single_for_cpu(&pdev->dev,
 +                                                      ps_page->dma,
 +                                                      PAGE_SIZE,
 +                                                      DMA_FROM_DEVICE);
-                               vaddr = kmap_atomic(ps_page->page,
-                                                   KM_SKB_DATA_SOFTIRQ);
++                              vaddr = kmap_atomic(ps_page->page);
 +                              memcpy(skb_tail_pointer(skb), vaddr, l1);
-                               kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
++                              kunmap_atomic(vaddr);
 +                              dma_sync_single_for_device(&pdev->dev,
 +                                                         ps_page->dma,
 +                                                         PAGE_SIZE,
 +                                                         DMA_FROM_DEVICE);
 +
 +                              /* remove the CRC */
 +                              if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
 +                                      if (!(netdev->features & NETIF_F_RXFCS))
 +                                              l1 -= 4;
 +                              }
 +
 +                              skb_put(skb, l1);
 +                              goto copydone;
 +                      } /* if */
                }
  
                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
                /* strip the ethernet crc, problem is we're using pages now so
                 * this whole operation can get a little cpu intensive
                 */
 -              if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
 -                      pskb_trim(skb, skb->len - 4);
 +              if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
 +                      if (!(netdev->features & NETIF_F_RXFCS))
 +                              pskb_trim(skb, skb->len - 4);
 +              }
  
  copydone:
                total_rx_bytes += skb->len;
                total_rx_packets++;
  
 -              e1000_rx_checksum(adapter, staterr, le16_to_cpu(
 -                      rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
 +              e1000_rx_checksum(adapter, staterr,
 +                                rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
 +
 +              e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
  
                if (rx_desc->wb.upper.header_status &
                           cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
@@@ -1367,7 -1334,7 +1366,7 @@@ next_desc
  
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
 -                      adapter->alloc_rx_buf(adapter, cleaned_count,
 +                      adapter->alloc_rx_buf(rx_ring, cleaned_count,
                                              GFP_ATOMIC);
                        cleaned_count = 0;
                }
  
        cleaned_count = e1000_desc_unused(rx_ring);
        if (cleaned_count)
 -              adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
 +              adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
  
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
@@@ -1408,12 -1375,13 +1407,12 @@@ static void e1000_consume_page(struct e
   * the return value indicates whether actual cleaning was done, there
   * is no guarantee that everything was cleaned
   **/
 -
 -static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 -                                     int *work_done, int work_to_do)
 +static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
 +                                   int work_to_do)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        union e1000_rx_desc_extended *rx_desc, *next_rxd;
        struct e1000_buffer *buffer_info, *next_buffer;
        u32 length, staterr;
  
                /* errors is only valid for DD + EOP descriptors */
                if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
 -                           (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
 +                           ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
 +                            !(netdev->features & NETIF_F_RXALL)))) {
                        /* recycle both page and skb */
                        buffer_info->skb = skb;
                        /* an error means any chain goes out the window too */
                                if (length <= copybreak &&
                                    skb_tailroom(skb) >= length) {
                                        u8 *vaddr;
-                                       vaddr = kmap_atomic(buffer_info->page,
-                                                          KM_SKB_DATA_SOFTIRQ);
+                                       vaddr = kmap_atomic(buffer_info->page);
                                        memcpy(skb_tail_pointer(skb), vaddr,
                                               length);
-                                       kunmap_atomic(vaddr,
-                                                     KM_SKB_DATA_SOFTIRQ);
+                                       kunmap_atomic(vaddr);
                                        /* re-use the page, so don't erase
                                         * buffer_info->page */
                                        skb_put(skb, length);
  
                /* Receive Checksum Offload XXX recompute due to CRC strip? */
                e1000_rx_checksum(adapter, staterr,
 -                                le16_to_cpu(rx_desc->wb.lower.hi_dword.
 -                                            csum_ip.csum), skb);
 +                                rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
 +
 +              e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
  
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
@@@ -1547,7 -1511,7 +1544,7 @@@ next_desc
  
                /* return some buffers to hardware, one at a time is too slow */
                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
 -                      adapter->alloc_rx_buf(adapter, cleaned_count,
 +                      adapter->alloc_rx_buf(rx_ring, cleaned_count,
                                              GFP_ATOMIC);
                        cleaned_count = 0;
                }
  
        cleaned_count = e1000_desc_unused(rx_ring);
        if (cleaned_count)
 -              adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
 +              adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
  
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
  
  /**
   * e1000_clean_rx_ring - Free Rx Buffers per Queue
 - * @adapter: board private structure
 + * @rx_ring: Rx descriptor ring
   **/
 -static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
 +static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
  {
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct e1000_buffer *buffer_info;
        struct e1000_ps_page *ps_page;
        struct pci_dev *pdev = adapter->pdev;
        rx_ring->next_to_use = 0;
        adapter->flags2 &= ~FLAG2_IS_DISCARDING;
  
 -      writel(0, adapter->hw.hw_addr + rx_ring->head);
 -      writel(0, adapter->hw.hw_addr + rx_ring->tail);
 +      writel(0, rx_ring->head);
 +      writel(0, rx_ring->tail);
  }
  
  static void e1000e_downshift_workaround(struct work_struct *work)
@@@ -1667,7 -1631,7 +1664,7 @@@ static irqreturn_t e1000_intr_msi(int i
         */
  
        if (icr & E1000_ICR_LSC) {
 -              hw->mac.get_link_status = 1;
 +              hw->mac.get_link_status = true;
                /*
                 * ICH8 workaround-- Call gig speed drop workaround on cable
                 * disconnect (LSC) before accessing any PHY registers
@@@ -1733,7 -1697,7 +1730,7 @@@ static irqreturn_t e1000_intr(int irq, 
         */
  
        if (icr & E1000_ICR_LSC) {
 -              hw->mac.get_link_status = 1;
 +              hw->mac.get_link_status = true;
                /*
                 * ICH8 workaround-- Call gig speed drop workaround on cable
                 * disconnect (LSC) before accessing any PHY registers
@@@ -1790,7 -1754,7 +1787,7 @@@ static irqreturn_t e1000_msix_other(in
        if (icr & E1000_ICR_OTHER) {
                if (!(icr & E1000_ICR_LSC))
                        goto no_link_interrupt;
 -              hw->mac.get_link_status = 1;
 +              hw->mac.get_link_status = true;
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->state))
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@@ -1815,7 -1779,7 +1812,7 @@@ static irqreturn_t e1000_intr_msix_tx(i
        adapter->total_tx_bytes = 0;
        adapter->total_tx_packets = 0;
  
 -      if (!e1000_clean_tx_irq(adapter))
 +      if (!e1000_clean_tx_irq(tx_ring))
                /* Ring was not completely cleaned, so fire another interrupt */
                ew32(ICS, tx_ring->ims_val);
  
@@@ -1826,15 -1790,14 +1823,15 @@@ static irqreturn_t e1000_intr_msix_rx(i
  {
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_ring *rx_ring = adapter->rx_ring;
  
        /* Write the ITR value calculated at the end of the
         * previous interrupt.
         */
 -      if (adapter->rx_ring->set_itr) {
 -              writel(1000000000 / (adapter->rx_ring->itr_val * 256),
 -                     adapter->hw.hw_addr + adapter->rx_ring->itr_register);
 -              adapter->rx_ring->set_itr = 0;
 +      if (rx_ring->set_itr) {
 +              writel(1000000000 / (rx_ring->itr_val * 256),
 +                     rx_ring->itr_register);
 +              rx_ring->set_itr = 0;
        }
  
        if (napi_schedule_prep(&adapter->napi)) {
@@@ -1874,9 -1837,9 +1871,9 @@@ static void e1000_configure_msix(struc
        adapter->eiac_mask |= rx_ring->ims_val;
        if (rx_ring->itr_val)
                writel(1000000000 / (rx_ring->itr_val * 256),
 -                     hw->hw_addr + rx_ring->itr_register);
 +                     rx_ring->itr_register);
        else
 -              writel(1, hw->hw_addr + rx_ring->itr_register);
 +              writel(1, rx_ring->itr_register);
        ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
  
        /* Configure Tx vector */
        vector++;
        if (tx_ring->itr_val)
                writel(1000000000 / (tx_ring->itr_val * 256),
 -                     hw->hw_addr + tx_ring->itr_register);
 +                     tx_ring->itr_register);
        else
 -              writel(1, hw->hw_addr + tx_ring->itr_register);
 +              writel(1, tx_ring->itr_register);
        adapter->eiac_mask |= tx_ring->ims_val;
        ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
  
@@@ -2000,9 -1963,8 +1997,9 @@@ static int e1000_request_msix(struct e1
                          e1000_intr_msix_rx, 0, adapter->rx_ring->name,
                          netdev);
        if (err)
 -              goto out;
 -      adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
 +              return err;
 +      adapter->rx_ring->itr_register = adapter->hw.hw_addr +
 +          E1000_EITR_82574(vector);
        adapter->rx_ring->itr_val = adapter->itr;
        vector++;
  
                          e1000_intr_msix_tx, 0, adapter->tx_ring->name,
                          netdev);
        if (err)
 -              goto out;
 -      adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
 +              return err;
 +      adapter->tx_ring->itr_register = adapter->hw.hw_addr +
 +          E1000_EITR_82574(vector);
        adapter->tx_ring->itr_val = adapter->itr;
        vector++;
  
        err = request_irq(adapter->msix_entries[vector].vector,
                          e1000_msix_other, 0, netdev->name, netdev);
        if (err)
 -              goto out;
 +              return err;
  
        e1000_configure_msix(adapter);
 +
        return 0;
 -out:
 -      return err;
  }
  
  /**
@@@ -2198,13 -2160,13 +2195,13 @@@ static int e1000_alloc_ring_dma(struct 
  
  /**
   * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
 - * @adapter: board private structure
 + * @tx_ring: Tx descriptor ring
   *
   * Return 0 on success, negative on failure
   **/
 -int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
 +int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
  {
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        int err = -ENOMEM, size;
  
        size = sizeof(struct e1000_buffer) * tx_ring->count;
  
  /**
   * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
 - * @adapter: board private structure
 + * @rx_ring: Rx descriptor ring
   *
   * Returns 0 on success, negative on failure
   **/
 -int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
 +int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
  {
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct e1000_buffer *buffer_info;
        int i, size, desc_len, err = -ENOMEM;
  
  
  /**
   * e1000_clean_tx_ring - Free Tx Buffers
 - * @adapter: board private structure
 + * @tx_ring: Tx descriptor ring
   **/
 -static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
 +static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
  {
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct e1000_buffer *buffer_info;
        unsigned long size;
        unsigned int i;
  
        for (i = 0; i < tx_ring->count; i++) {
                buffer_info = &tx_ring->buffer_info[i];
 -              e1000_put_txbuf(adapter, buffer_info);
 +              e1000_put_txbuf(tx_ring, buffer_info);
        }
  
        netdev_reset_queue(adapter->netdev);
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
  
 -      writel(0, adapter->hw.hw_addr + tx_ring->head);
 -      writel(0, adapter->hw.hw_addr + tx_ring->tail);
 +      writel(0, tx_ring->head);
 +      writel(0, tx_ring->tail);
  }
  
  /**
   * e1000e_free_tx_resources - Free Tx Resources per Queue
 - * @adapter: board private structure
 + * @tx_ring: Tx descriptor ring
   *
   * Free all transmit software resources
   **/
 -void e1000e_free_tx_resources(struct e1000_adapter *adapter)
 +void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
  {
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
  
 -      e1000_clean_tx_ring(adapter);
 +      e1000_clean_tx_ring(tx_ring);
  
        vfree(tx_ring->buffer_info);
        tx_ring->buffer_info = NULL;
  
  /**
   * e1000e_free_rx_resources - Free Rx Resources
 - * @adapter: board private structure
 + * @rx_ring: Rx descriptor ring
   *
   * Free all receive software resources
   **/
 -
 -void e1000e_free_rx_resources(struct e1000_adapter *adapter)
 +void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
  {
 +      struct e1000_adapter *adapter = rx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
 -      struct e1000_ring *rx_ring = adapter->rx_ring;
        int i;
  
 -      e1000_clean_rx_ring(adapter);
 +      e1000_clean_rx_ring(rx_ring);
  
        for (i = 0; i < rx_ring->count; i++)
                kfree(rx_ring->buffer_info[i].ps_pages);
@@@ -2381,7 -2344,7 +2378,7 @@@ static unsigned int e1000_update_itr(st
        unsigned int retval = itr_setting;
  
        if (packets == 0)
 -              goto update_itr_done;
 +              return itr_setting;
  
        switch (itr_setting) {
        case lowest_latency:
                break;
        }
  
 -update_itr_done:
        return retval;
  }
  
@@@ -2498,19 -2462,13 +2495,19 @@@ set_itr_now
   **/
  static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
  {
 -      adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 +      int size = sizeof(struct e1000_ring);
 +
 +      adapter->tx_ring = kzalloc(size, GFP_KERNEL);
        if (!adapter->tx_ring)
                goto err;
 +      adapter->tx_ring->count = adapter->tx_ring_count;
 +      adapter->tx_ring->adapter = adapter;
  
 -      adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
 +      adapter->rx_ring = kzalloc(size, GFP_KERNEL);
        if (!adapter->rx_ring)
                goto err;
 +      adapter->rx_ring->count = adapter->rx_ring_count;
 +      adapter->rx_ring->adapter = adapter;
  
        return 0;
  err:
@@@ -2538,10 -2496,10 +2535,10 @@@ static int e1000_clean(struct napi_stru
            !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
                goto clean_rx;
  
 -      tx_cleaned = e1000_clean_tx_irq(adapter);
 +      tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
  
  clean_rx:
 -      adapter->clean_rx(adapter, &work_done, budget);
 +      adapter->clean_rx(adapter->rx_ring, &work_done, budget);
  
        if (!tx_cleaned)
                work_done = budget;
@@@ -2786,7 -2744,8 +2783,7 @@@ static void e1000_configure_tx(struct e
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_ring *tx_ring = adapter->tx_ring;
        u64 tdba;
 -      u32 tdlen, tctl, tipg, tarc;
 -      u32 ipgr1, ipgr2;
 +      u32 tdlen, tarc;
  
        /* Setup the HW Tx Head and Tail descriptor pointers */
        tdba = tx_ring->dma;
        ew32(TDLEN, tdlen);
        ew32(TDH, 0);
        ew32(TDT, 0);
 -      tx_ring->head = E1000_TDH;
 -      tx_ring->tail = E1000_TDT;
 -
 -      /* Set the default values for the Tx Inter Packet Gap timer */
 -      tipg = DEFAULT_82543_TIPG_IPGT_COPPER;          /*  8  */
 -      ipgr1 = DEFAULT_82543_TIPG_IPGR1;               /*  8  */
 -      ipgr2 = DEFAULT_82543_TIPG_IPGR2;               /*  6  */
 -
 -      if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
 -              ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /*  7  */
 -
 -      tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
 -      tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
 -      ew32(TIPG, tipg);
 +      tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
 +      tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
  
        /* Set the Tx Interrupt Delay register */
        ew32(TIDV, adapter->tx_int_delay);
                 */
                txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
                ew32(TXDCTL(0), txdctl);
 -              /* erratum work around: set txdctl the same for both queues */
 -              ew32(TXDCTL(1), txdctl);
        }
 -
 -      /* Program the Transmit Control Register */
 -      tctl = er32(TCTL);
 -      tctl &= ~E1000_TCTL_CT;
 -      tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
 -              (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
 +      /* erratum work around: set txdctl the same for both queues */
 +      ew32(TXDCTL(1), er32(TXDCTL(0)));
  
        if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
                tarc = er32(TARC(0));
        /* enable Report Status bit */
        adapter->txd_cmd |= E1000_TXD_CMD_RS;
  
 -      ew32(TCTL, tctl);
 -
 -      e1000e_config_collision_dist(hw);
 +      hw->mac.ops.config_collision_dist(hw);
  }
  
  /**
@@@ -2963,7 -2942,8 +2960,7 @@@ static void e1000_setup_rctl(struct e10
         * per packet.
         */
        pages = PAGE_USE_COUNT(adapter->netdev->mtu);
 -      if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
 -          (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
 +      if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
                adapter->rx_ps_pages = pages;
        else
                adapter->rx_ps_pages = 0;
                ew32(PSRCTL, psrctl);
        }
  
 +      /* This is useful for sniffing bad packets. */
 +      if (adapter->netdev->features & NETIF_F_RXALL) {
 +              /* UPE and MPE will be handled by normal PROMISC logic
 +               * in e1000e_set_rx_mode */
 +              rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
 +                       E1000_RCTL_BAM | /* RX All Bcast Pkts */
 +                       E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
 +
 +              rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
 +                        E1000_RCTL_DPF | /* Allow filtered pause */
 +                        E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
 +              /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
 +               * and that breaks VLANs.
 +               */
 +      }
 +
        ew32(RFCTL, rfctl);
        ew32(RCTL, rctl);
        /* just started the receive unit, no need to restart */
@@@ -3106,8 -3070,8 +3103,8 @@@ static void e1000_configure_rx(struct e
        ew32(RDLEN, rdlen);
        ew32(RDH, 0);
        ew32(RDT, 0);
 -      rx_ring->head = E1000_RDH;
 -      rx_ring->tail = E1000_RDT;
 +      rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
 +      rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
  
        /* Enable Receive Checksum Offload for TCP and UDP */
        rxcsum = er32(RXCSUM);
        }
        ew32(RXCSUM, rxcsum);
  
 -      /*
 -       * Enable early receives on supported devices, only takes effect when
 -       * packet size is equal or larger than the specified value (in 8 byte
 -       * units), e.g. using jumbo frames when setting to E1000_ERT_2048
 -       */
 -      if ((adapter->flags & FLAG_HAS_ERT) ||
 -          (adapter->hw.mac.type == e1000_pch2lan)) {
 +      if (adapter->hw.mac.type == e1000_pch2lan) {
 +              /*
 +               * With jumbo frames, excessive C-state transition
 +               * latencies result in dropped transactions.
 +               */
                if (adapter->netdev->mtu > ETH_DATA_LEN) {
                        u32 rxdctl = er32(RXDCTL(0));
                        ew32(RXDCTL(0), rxdctl | 0x3);
 -                      if (adapter->flags & FLAG_HAS_ERT)
 -                              ew32(ERT, E1000_ERT_2048 | (1 << 13));
 -                      /*
 -                       * With jumbo frames and early-receive enabled,
 -                       * excessive C-state transition latencies result in
 -                       * dropped transactions.
 -                       */
                        pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
                } else {
                        pm_qos_update_request(&adapter->netdev->pm_qos_req,
@@@ -3262,7 -3235,6 +3259,7 @@@ static void e1000e_set_rx_mode(struct n
                e1000e_vlan_filter_disable(adapter);
        } else {
                int count;
 +
                if (netdev->flags & IFF_ALLMULTI) {
                        rctl |= E1000_RCTL_MPE;
                } else {
                e1000e_vlan_strip_disable(adapter);
  }
  
 +static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
 +{
 +      struct e1000_hw *hw = &adapter->hw;
 +      u32 mrqc, rxcsum;
 +      int i;
 +      static const u32 rsskey[10] = {
 +              0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
 +              0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
 +      };
 +
 +      /* Fill out hash function seed */
 +      for (i = 0; i < 10; i++)
 +              ew32(RSSRK(i), rsskey[i]);
 +
 +      /* Direct all traffic to queue 0 */
 +      for (i = 0; i < 32; i++)
 +              ew32(RETA(i), 0);
 +
 +      /*
 +       * Disable raw packet checksumming so that RSS hash is placed in
 +       * descriptor on writeback.
 +       */
 +      rxcsum = er32(RXCSUM);
 +      rxcsum |= E1000_RXCSUM_PCSD;
 +
 +      ew32(RXCSUM, rxcsum);
 +
 +      mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
 +              E1000_MRQC_RSS_FIELD_IPV4_TCP |
 +              E1000_MRQC_RSS_FIELD_IPV6 |
 +              E1000_MRQC_RSS_FIELD_IPV6_TCP |
 +              E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
 +
 +      ew32(MRQC, mrqc);
 +}
 +
  /**
   * e1000_configure - configure the hardware for Rx and Tx
   * @adapter: private board structure
   **/
  static void e1000_configure(struct e1000_adapter *adapter)
  {
 +      struct e1000_ring *rx_ring = adapter->rx_ring;
 +
        e1000e_set_rx_mode(adapter->netdev);
  
        e1000_restore_vlan(adapter);
        e1000_init_manageability_pt(adapter);
  
        e1000_configure_tx(adapter);
 +
 +      if (adapter->netdev->features & NETIF_F_RXHASH)
 +              e1000e_setup_rss_hash(adapter);
        e1000_setup_rctl(adapter);
        e1000_configure_rx(adapter);
 -      adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
 -                            GFP_KERNEL);
 +      adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
  }
  
  /**
@@@ -3445,7 -3377,9 +3442,7 @@@ void e1000e_reset(struct e1000_adapter 
                         * if short on Rx space, Rx wins and must trump Tx
                         * adjustment or use Early Receive if available
                         */
 -                      if ((pba < min_rx_space) &&
 -                          (!(adapter->flags & FLAG_HAS_ERT)))
 -                              /* ERT enabled in e1000_configure_rx */
 +                      if (pba < min_rx_space)
                                pba = min_rx_space;
                }
  
         * (or the size used for early receive) above it in the Rx FIFO.
         * Set it to the lower of:
         * - 90% of the Rx FIFO size, and
 -       * - the full Rx FIFO size minus the early receive size (for parts
 -       *   with ERT support assuming ERT set to E1000_ERT_2048), or
         * - the full Rx FIFO size minus one full frame
         */
        if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
                fc->pause_time = 0xFFFF;
        else
                fc->pause_time = E1000_FC_PAUSE_TIME;
 -      fc->send_xon = 1;
 +      fc->send_xon = true;
        fc->current_mode = fc->requested_mode;
  
        switch (hw->mac.type) {
 +      case e1000_ich9lan:
 +      case e1000_ich10lan:
 +              if (adapter->netdev->mtu > ETH_DATA_LEN) {
 +                      pba = 14;
 +                      ew32(PBA, pba);
 +                      fc->high_water = 0x2800;
 +                      fc->low_water = fc->high_water - 8;
 +                      break;
 +              }
 +              /* fall-through */
        default:
 -              if ((adapter->flags & FLAG_HAS_ERT) &&
 -                  (adapter->netdev->mtu > ETH_DATA_LEN))
 -                      hwm = min(((pba << 10) * 9 / 10),
 -                                ((pba << 10) - (E1000_ERT_2048 << 3)));
 -              else
 -                      hwm = min(((pba << 10) * 9 / 10),
 -                                ((pba << 10) - adapter->max_frame_size));
 +              hwm = min(((pba << 10) * 9 / 10),
 +                        ((pba << 10) - adapter->max_frame_size));
  
                fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
                fc->low_water = fc->high_water - 8;
  
        /*
         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
 -       * fit in receive buffer and early-receive not supported.
 +       * fit in receive buffer.
         */
        if (adapter->itr_setting & 0x3) {
 -              if (((adapter->max_frame_size * 2) > (pba << 10)) &&
 -                  !(adapter->flags & FLAG_HAS_ERT)) {
 +              if ((adapter->max_frame_size * 2) > (pba << 10)) {
                        if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
                                dev_info(&adapter->pdev->dev,
                                        "Interrupt Throttle Rate turned off\n");
@@@ -3659,8 -3591,8 +3656,8 @@@ void e1000e_down(struct e1000_adapter *
        spin_unlock(&adapter->stats64_lock);
  
        e1000e_flush_descriptors(adapter);
 -      e1000_clean_tx_ring(adapter);
 -      e1000_clean_rx_ring(adapter);
 +      e1000_clean_tx_ring(adapter->tx_ring);
 +      e1000_clean_rx_ring(adapter->rx_ring);
  
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
@@@ -3700,8 -3632,6 +3697,8 @@@ static int __devinit e1000_sw_init(stru
        adapter->rx_ps_bsize0 = 128;
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 +      adapter->tx_ring_count = E1000_DEFAULT_TXD;
 +      adapter->rx_ring_count = E1000_DEFAULT_RXD;
  
        spin_lock_init(&adapter->stats64_lock);
  
@@@ -3789,9 -3719,8 +3786,9 @@@ static int e1000_test_msi_interrupt(str
        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
                adapter->int_mode = E1000E_INT_MODE_LEGACY;
                e_info("MSI interrupt test failed, using legacy interrupt.\n");
 -      } else
 +      } else {
                e_dbg("MSI interrupt test succeeded!\n");
 +      }
  
        free_irq(adapter->pdev->irq, netdev);
        pci_disable_msi(adapter->pdev);
@@@ -3861,12 -3790,12 +3858,12 @@@ static int e1000_open(struct net_devic
        netif_carrier_off(netdev);
  
        /* allocate transmit descriptors */
 -      err = e1000e_setup_tx_resources(adapter);
 +      err = e1000e_setup_tx_resources(adapter->tx_ring);
        if (err)
                goto err_setup_tx;
  
        /* allocate receive descriptors */
 -      err = e1000e_setup_rx_resources(adapter);
 +      err = e1000e_setup_rx_resources(adapter->rx_ring);
        if (err)
                goto err_setup_rx;
  
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
                e1000_update_mng_vlan(adapter);
  
 -      /* DMA latency requirement to workaround early-receive/jumbo issue */
 -      if ((adapter->flags & FLAG_HAS_ERT) ||
 -          (adapter->hw.mac.type == e1000_pch2lan))
 +      /* DMA latency requirement to workaround jumbo issue */
 +      if (adapter->hw.mac.type == e1000_pch2lan)
                pm_qos_add_request(&adapter->netdev->pm_qos_req,
                                   PM_QOS_CPU_DMA_LATENCY,
                                   PM_QOS_DEFAULT_VALUE);
  err_req_irq:
        e1000e_release_hw_control(adapter);
        e1000_power_down_phy(adapter);
 -      e1000e_free_rx_resources(adapter);
 +      e1000e_free_rx_resources(adapter->rx_ring);
  err_setup_rx:
 -      e1000e_free_tx_resources(adapter);
 +      e1000e_free_tx_resources(adapter->tx_ring);
  err_setup_tx:
        e1000e_reset(adapter);
        pm_runtime_put_sync(&pdev->dev);
@@@ -3979,8 -3909,8 +3976,8 @@@ static int e1000_close(struct net_devic
        }
        e1000_power_down_phy(adapter);
  
 -      e1000e_free_tx_resources(adapter);
 -      e1000e_free_rx_resources(adapter);
 +      e1000e_free_tx_resources(adapter->tx_ring);
 +      e1000e_free_rx_resources(adapter->rx_ring);
  
        /*
         * kill manageability vlan ID if supported, but not if a vlan with
            !test_bit(__E1000_TESTING, &adapter->state))
                e1000e_release_hw_control(adapter);
  
 -      if ((adapter->flags & FLAG_HAS_ERT) ||
 -          (adapter->hw.mac.type == e1000_pch2lan))
 +      if (adapter->hw.mac.type == e1000_pch2lan)
                pm_qos_remove_request(&adapter->netdev->pm_qos_req);
  
        pm_runtime_put_sync(&pdev->dev);
@@@ -4633,12 -4564,13 +4630,12 @@@ link_up
  #define E1000_TX_FLAGS_VLAN           0x00000002
  #define E1000_TX_FLAGS_TSO            0x00000004
  #define E1000_TX_FLAGS_IPV4           0x00000008
 +#define E1000_TX_FLAGS_NO_FCS         0x00000010
  #define E1000_TX_FLAGS_VLAN_MASK      0xffff0000
  #define E1000_TX_FLAGS_VLAN_SHIFT     16
  
 -static int e1000_tso(struct e1000_adapter *adapter,
 -                   struct sk_buff *skb)
 +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
  {
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
        unsigned int i;
        return 1;
  }
  
 -static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
 +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
  {
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct e1000_context_desc *context_desc;
        struct e1000_buffer *buffer_info;
        unsigned int i;
  #define E1000_MAX_PER_TXD     8192
  #define E1000_MAX_TXD_PWR     12
  
 -static int e1000_tx_map(struct e1000_adapter *adapter,
 -                      struct sk_buff *skb, unsigned int first,
 -                      unsigned int max_per_txd, unsigned int nr_frags,
 -                      unsigned int mss)
 +static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
 +                      unsigned int first, unsigned int max_per_txd,
 +                      unsigned int nr_frags, unsigned int mss)
  {
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_buffer *buffer_info;
        unsigned int len = skb_headlen(skb);
@@@ -4860,15 -4793,16 +4857,15 @@@ dma_error
                        i += tx_ring->count;
                i--;
                buffer_info = &tx_ring->buffer_info[i];
 -              e1000_put_txbuf(adapter, buffer_info);
 +              e1000_put_txbuf(tx_ring, buffer_info);
        }
  
        return 0;
  }
  
 -static void e1000_tx_queue(struct e1000_adapter *adapter,
 -                         int tx_flags, int count)
 +static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
  {
 -      struct e1000_ring *tx_ring = adapter->tx_ring;
 +      struct e1000_adapter *adapter = tx_ring->adapter;
        struct e1000_tx_desc *tx_desc = NULL;
        struct e1000_buffer *buffer_info;
        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
        }
  
 +      if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
 +              txd_lower &= ~(E1000_TXD_CMD_IFCS);
 +
        i = tx_ring->next_to_use;
  
        do {
  
        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  
 +      /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
 +      if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
 +              tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
 +
        /*
         * Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
        tx_ring->next_to_use = i;
  
        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 -              e1000e_update_tdt_wa(adapter, i);
 +              e1000e_update_tdt_wa(tx_ring, i);
        else
 -              writel(i, adapter->hw.hw_addr + tx_ring->tail);
 +              writel(i, tx_ring->tail);
  
        /*
         * we need this if more than one processor can write to our tail
@@@ -4978,11 -4905,11 +4975,11 @@@ static int e1000_transfer_dhcp_info(str
        return 0;
  }
  
 -static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
 +static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
  {
 -      struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_adapter *adapter = tx_ring->adapter;
  
 -      netif_stop_queue(netdev);
 +      netif_stop_queue(adapter->netdev);
        /*
         * Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * We need to check again in a case another CPU has just
         * made room available.
         */
 -      if (e1000_desc_unused(adapter->tx_ring) < size)
 +      if (e1000_desc_unused(tx_ring) < size)
                return -EBUSY;
  
        /* A reprieve! */
 -      netif_start_queue(netdev);
 +      netif_start_queue(adapter->netdev);
        ++adapter->restart_queue;
        return 0;
  }
  
 -static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
 +static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
  {
 -      struct e1000_adapter *adapter = netdev_priv(netdev);
 -
 -      if (e1000_desc_unused(adapter->tx_ring) >= size)
 +      if (e1000_desc_unused(tx_ring) >= size)
                return 0;
 -      return __e1000_maybe_stop_tx(netdev, size);
 +      return __e1000_maybe_stop_tx(tx_ring, size);
  }
  
 -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
 +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
  static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
  {
                if (skb->data_len && (hdr_len == len)) {
                        unsigned int pull_size;
  
 -                      pull_size = min((unsigned int)4, skb->data_len);
 +                      pull_size = min_t(unsigned int, 4, skb->data_len);
                        if (!__pskb_pull_tail(skb, pull_size)) {
                                e_err("__pskb_pull_tail failed.\n");
                                dev_kfree_skb_any(skb);
         * need: count + 2 desc gap to keep tail from touching
         * head, otherwise try next time
         */
 -      if (e1000_maybe_stop_tx(netdev, count + 2))
 +      if (e1000_maybe_stop_tx(tx_ring, count + 2))
                return NETDEV_TX_BUSY;
  
        if (vlan_tx_tag_present(skb)) {
  
        first = tx_ring->next_to_use;
  
 -      tso = e1000_tso(adapter, skb);
 +      tso = e1000_tso(tx_ring, skb);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
  
        if (tso)
                tx_flags |= E1000_TX_FLAGS_TSO;
 -      else if (e1000_tx_csum(adapter, skb))
 +      else if (e1000_tx_csum(tx_ring, skb))
                tx_flags |= E1000_TX_FLAGS_CSUM;
  
        /*
        if (skb->protocol == htons(ETH_P_IP))
                tx_flags |= E1000_TX_FLAGS_IPV4;
  
 +      if (unlikely(skb->no_fcs))
 +              tx_flags |= E1000_TX_FLAGS_NO_FCS;
 +
        /* if count is 0 then mapping error has occurred */
 -      count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
 +      count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
        if (count) {
                netdev_sent_queue(netdev, skb->len);
 -              e1000_tx_queue(adapter, tx_flags, count);
 +              e1000_tx_queue(tx_ring, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
 -              e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
 +              e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
  
        } else {
                dev_kfree_skb_any(skb);
@@@ -5237,22 -5163,10 +5234,22 @@@ static int e1000_change_mtu(struct net_
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  
        /* Jumbo frame support */
 -      if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
 -          !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
 -              e_err("Jumbo Frames not supported.\n");
 -              return -EINVAL;
 +      if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
 +              if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
 +                      e_err("Jumbo Frames not supported.\n");
 +                      return -EINVAL;
 +              }
 +
 +              /*
 +               * IP payload checksum (enabled with jumbos/packet-split when
 +               * Rx checksum is enabled) and generation of RSS hash is
 +               * mutually exclusive in the hardware.
 +               */
 +              if ((netdev->features & NETIF_F_RXCSUM) &&
 +                  (netdev->features & NETIF_F_RXHASH)) {
 +                      e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
 +                      return -EINVAL;
 +              }
        }
  
        /* Supported frame sizes */
@@@ -5406,7 -5320,7 +5403,7 @@@ static int e1000_init_phy_wakeup(struc
        /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
        retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
        if (retval)
 -              goto out;
 +              goto release;
  
        /* copy MAC MTA to PHY MTA - only needed for pchlan */
        for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
        retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
        if (retval)
                e_err("Could not set PHY Host Wakeup bit\n");
 -out:
 +release:
        hw->phy.ops.release(hw);
  
        return retval;
@@@ -5992,7 -5906,7 +5989,7 @@@ static void e1000_print_device_info(str
        ret_val = e1000_read_pba_string_generic(hw, pba_str,
                                                E1000_PBANUM_LENGTH);
        if (ret_val)
 -              strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
 +              strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
        e_info("MAC: %d, PHY: %d, PBA No: %s\n",
               hw->mac.type, hw->phy.type, pba_str);
  }
@@@ -6007,8 -5921,7 +6004,8 @@@ static void e1000_eeprom_checks(struct 
                return;
  
        ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
 -      if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
 +      le16_to_cpus(&buf);
 +      if (!ret_val && (!(buf & (1 << 0)))) {
                /* Deep Smart Power Down (DSPD) */
                dev_warn(&adapter->pdev->dev,
                         "Warning: detected DSPD enabled in EEPROM\n");
  }
  
  static int e1000_set_features(struct net_device *netdev,
 -      netdev_features_t features)
 +                            netdev_features_t features)
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        netdev_features_t changed = features ^ netdev->features;
                adapter->flags |= FLAG_TSO_FORCE;
  
        if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
 -                       NETIF_F_RXCSUM)))
 +                       NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
 +                       NETIF_F_RXALL)))
                return 0;
  
 +      /*
 +       * IP payload checksum (enabled with jumbos/packet-split when Rx
 +       * checksum is enabled) and generation of RSS hash is mutually
 +       * exclusive in the hardware.
 +       */
 +      if (adapter->rx_ps_pages &&
 +          (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
 +              e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
 +              return -EINVAL;
 +      }
 +
 +      if (changed & NETIF_F_RXFCS) {
 +              if (features & NETIF_F_RXFCS) {
 +                      adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
 +              } else {
 +                      /* We need to take it back to defaults, which might mean
 +                       * stripping is still disabled at the adapter level.
 +                       */
 +                      if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
 +                              adapter->flags2 |= FLAG2_CRC_STRIPPING;
 +                      else
 +                              adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
 +              }
 +      }
 +
 +      netdev->features = features;
 +
        if (netif_running(netdev))
                e1000e_reinit_locked(adapter);
        else
@@@ -6104,6 -5989,7 +6101,6 @@@ static int __devinit e1000_probe(struc
        const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
        resource_size_t mmio_start, mmio_len;
        resource_size_t flash_start, flash_len;
 -
        static int cards_found;
        u16 aspm_disable_flag = 0;
        int i, err, pci_using_dac;
        e1000e_set_ethtool_ops(netdev);
        netdev->watchdog_timeo          = 5 * HZ;
        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
 -      strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 +      strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
  
        netdev->mem_start = mmio_start;
        netdev->mem_end = mmio_start + mmio_len;
                adapter->hw.phy.ms_type = e1000_ms_hw_default;
        }
  
 -      if (e1000_check_reset_block(&adapter->hw))
 +      if (hw->phy.ops.check_reset_block(hw))
                e_info("PHY reset is blocked due to SOL/IDER session.\n");
  
        /* Set initial default active device features */
                            NETIF_F_HW_VLAN_TX |
                            NETIF_F_TSO |
                            NETIF_F_TSO6 |
 +                          NETIF_F_RXHASH |
                            NETIF_F_RXCSUM |
                            NETIF_F_HW_CSUM);
  
        /* Set user-changeable features (subset of all device features) */
        netdev->hw_features = netdev->features;
 +      netdev->hw_features |= NETIF_F_RXFCS;
 +      netdev->priv_flags |= IFF_SUPP_NOFCS;
 +      netdev->hw_features |= NETIF_F_RXALL;
  
        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
                netdev->features |= NETIF_F_HW_VLAN_FILTER;
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
                    (adapter->hw.bus.func == 1))
 -                      e1000_read_nvm(&adapter->hw,
 -                              NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
 +                      e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
 +                                     1, &eeprom_data);
                else
 -                      e1000_read_nvm(&adapter->hw,
 -                              NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
 +                      e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
 +                                     1, &eeprom_data);
        }
  
        /* fetch WoL from EEPROM */
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_get_hw_control(adapter);
  
 -      strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
 +      strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
        err = register_netdev(netdev);
        if (err)
                goto err_register;
@@@ -6403,7 -6285,7 +6400,7 @@@ err_register
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_release_hw_control(adapter);
  err_eeprom:
 -      if (!e1000_check_reset_block(&adapter->hw))
 +      if (!hw->phy.ops.check_reset_block(hw))
                e1000_phy_hw_reset(&adapter->hw);
  err_hw_init:
        kfree(adapter->tx_ring);
@@@ -6565,7 -6447,7 +6562,7 @@@ static DEFINE_PCI_DEVICE_TABLE(e1000_pc
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
  
 -      { }     /* terminate list */
 +      { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
  };
  MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  
@@@ -6584,9 -6466,7 +6581,9 @@@ static struct pci_driver e1000_driver 
        .probe    = e1000_probe,
        .remove   = __devexit_p(e1000_remove),
  #ifdef CONFIG_PM
 -      .driver.pm = &e1000_pm_ops,
 +      .driver   = {
 +              .pm = &e1000_pm_ops,
 +      },
  #endif
        .shutdown = e1000_shutdown,
        .err_handler = &e1000_err_handler
@@@ -6603,7 -6483,7 +6600,7 @@@ static int __init e1000_init_module(voi
        int ret;
        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
                e1000e_driver_version);
 -      pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
 +      pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
  
        return ret;
@@@ -6628,4 -6508,4 +6625,4 @@@ MODULE_DESCRIPTION("Intel(R) PRO/1000 N
  MODULE_LICENSE("GPL");
  MODULE_VERSION(DRV_VERSION);
  
 -/* e1000_main.c */
 +/* netdev.c */
  #include <asm/byteorder.h>
  #include <asm/uaccess.h>
  
- #define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
- #define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+ #define cas_page_map(x)      kmap_atomic((x))
+ #define cas_page_unmap(x)    kunmap_atomic((x))
  #define CAS_NCPUS            num_online_cpus()
  
  #define cas_skb_release(x)  netif_rx(x)
@@@ -835,6 -835,7 +835,6 @@@ static int cas_saturn_firmware_init(str
        cp->fw_data = vmalloc(cp->fw_size);
        if (!cp->fw_data) {
                err = -ENOMEM;
 -              pr_err("\"%s\" Failed %d\n", fw_name, err);
                goto out;
        }
        memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@@ -1974,7 -1975,7 +1974,7 @@@ static int cas_rx_process_pkt(struct ca
        else
                alloclen = max(hlen, RX_COPY_MIN);
  
 -      skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
 +      skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
        if (skb == NULL)
                return -1;
  
@@@ -4946,6 -4947,7 +4946,6 @@@ static int __devinit cas_init_one(struc
  
        dev = alloc_etherdev(sizeof(*cp));
        if (!dev) {
 -              dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
                err = -ENOMEM;
                goto err_out_disable_pdev;
        }
diff --combined drivers/scsi/fcoe/fcoe.c
@@@ -1498,7 -1498,7 +1498,7 @@@ static int fcoe_xmit(struct fc_lport *l
  
        /* crc offload */
        if (likely(lport->crc_offload)) {
 -              skb->ip_summed = CHECKSUM_PARTIAL;
 +              skb->ip_summed = CHECKSUM_UNNECESSARY;
                skb->csum_start = skb_headroom(skb);
                skb->csum_offset = skb->len;
                crc = 0;
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-               cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
+               cp = kmap_atomic(skb_frag_page(frag))
                        + frag->page_offset;
        } else {
                cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
        cp->fcoe_crc32 = cpu_to_le32(~crc);
  
        if (skb_is_nonlinear(skb)) {
-               kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+               kunmap_atomic(cp);
                cp = NULL;
        }
  
  #include <scsi/scsi_devinfo.h>
  #include <scsi/scsi_dbg.h>
  
 +/*
 + * All wire protocol details (storage protocol between the guest and the host)
 + * are consolidated here.
 + *
 + * Begin protocol definitions.
 + */
  
 -#define STORVSC_MIN_BUF_NR                            64
 -#define STORVSC_RING_BUFFER_SIZE                      (20*PAGE_SIZE)
 -static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
 -
 -module_param(storvsc_ringbuffer_size, int, S_IRUGO);
 -MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
 -
 -/* to alert the user that structure sizes may be mismatched even though the */
 -/* protocol versions match. */
 -
 -
 -#define REVISION_STRING(REVISION_) #REVISION_
 -#define FILL_VMSTOR_REVISION(RESULT_LVALUE_)                          \
 -      do {                                                            \
 -              char *revision_string                                   \
 -                      = REVISION_STRING($Rev : 6 $) + 6;              \
 -              RESULT_LVALUE_ = 0;                                     \
 -              while (*revision_string >= '0'                          \
 -                      && *revision_string <= '9') {                   \
 -                      RESULT_LVALUE_ *= 10;                           \
 -                      RESULT_LVALUE_ += *revision_string - '0';       \
 -                      revision_string++;                              \
 -              }                                                       \
 -      } while (0)
 -
 -/* Major/minor macros.  Minor version is in LSB, meaning that earlier flat */
 -/* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
 -#define VMSTOR_PROTOCOL_MAJOR(VERSION_)               (((VERSION_) >> 8) & 0xff)
 -#define VMSTOR_PROTOCOL_MINOR(VERSION_)               (((VERSION_))      & 0xff)
 -#define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_)       ((((MAJOR_) & 0xff) << 8) | \
 -                                               (((MINOR_) & 0xff)))
 -#define VMSTOR_INVALID_PROTOCOL_VERSION               (-1)
 -
 -/* Version history: */
 -/* V1 Beta                    0.1 */
 -/* V1 RC < 2008/1/31          1.0 */
 -/* V1 RC > 2008/1/31          2.0 */
 -#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
 -
 -
 -
 +/*
 + * Version history:
 + * V1 Beta: 0.1
 + * V1 RC < 2008/1/31: 1.0
 + * V1 RC > 2008/1/31:  2.0
 + * Win7: 4.2
 + */
  
 -/*  This will get replaced with the max transfer length that is possible on */
 -/*  the host adapter. */
 -/*  The max transfer length will be published when we offer a vmbus channel. */
 -#define MAX_TRANSFER_LENGTH   0x40000
 -#define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) +       \
 -                      sizeof(struct vstor_packet) +           \
 -                      sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
 +#define VMSTOR_CURRENT_MAJOR  4
 +#define VMSTOR_CURRENT_MINOR  2
  
  
  /*  Packet structure describing virtual storage requests. */
@@@ -82,31 -115,35 +82,31 @@@ enum vstor_packet_operation 
   * this remains the same across the write regardless of 32/64 bit
   * note: it's patterned off the SCSI_PASS_THROUGH structure
   */
 -#define CDB16GENERIC_LENGTH                   0x10
 -
 -#ifndef SENSE_BUFFER_SIZE
 -#define SENSE_BUFFER_SIZE                     0x12
 -#endif
 -
 -#define MAX_DATA_BUF_LEN_WITH_PADDING         0x14
 +#define STORVSC_MAX_CMD_LEN                   0x10
 +#define STORVSC_SENSE_BUFFER_SIZE             0x12
 +#define STORVSC_MAX_BUF_LEN_WITH_PADDING      0x14
  
  struct vmscsi_request {
 -      unsigned short length;
 -      unsigned char srb_status;
 -      unsigned char scsi_status;
 +      u16 length;
 +      u8 srb_status;
 +      u8 scsi_status;
  
 -      unsigned char port_number;
 -      unsigned char path_id;
 -      unsigned char target_id;
 -      unsigned char lun;
 +      u port_number;
 +      u path_id;
 +      u target_id;
 +      u lun;
  
 -      unsigned char cdb_length;
 -      unsigned char sense_info_length;
 -      unsigned char data_in;
 -      unsigned char reserved;
 +      u cdb_length;
 +      u sense_info_length;
 +      u data_in;
 +      u reserved;
  
 -      unsigned int data_transfer_length;
 +      u32 data_transfer_length;
  
        union {
 -              unsigned char cdb[CDB16GENERIC_LENGTH];
 -              unsigned char sense_data[SENSE_BUFFER_SIZE];
 -              unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
 +              u8 cdb[STORVSC_MAX_CMD_LEN];
 +              u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
 +              u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
        };
  } __attribute((packed));
  
   * properties of the channel.
   */
  struct vmstorage_channel_properties {
 -      unsigned short protocol_version;
 -      unsigned char path_id;
 -      unsigned char target_id;
 +      u16 protocol_version;
 +      u path_id;
 +      u8 target_id;
  
        /* Note: port number is only really known on the client side */
 -      unsigned int port_number;
 -      unsigned int flags;
 -      unsigned int max_transfer_bytes;
 +      u32  port_number;
 +      u32  flags;
 +      u32   max_transfer_bytes;
  
 -      /*  This id is unique for each channel and will correspond with */
 -      /*  vendor specific data in the inquirydata */
 -      unsigned long long unique_id;
 +      /*
 +       * This id is unique for each channel and will correspond with
 +       * vendor specific data in the inquiry data.
 +       */
 +
 +      u64  unique_id;
  } __packed;
  
  /*  This structure is sent during the storage protocol negotiations. */
  struct vmstorage_protocol_version {
        /* Major (MSW) and minor (LSW) version numbers. */
 -      unsigned short major_minor;
 +      u16 major_minor;
  
        /*
         * Revision number is auto-incremented whenever this file is changed
         * (See FILL_VMSTOR_REVISION macro above).  Mismatch does not
         * definitely indicate incompatibility--but it does indicate mismatched
         * builds.
 +       * This is only used on the windows side. Just set it to 0.
         */
 -      unsigned short revision;
 +      u16 revision;
  } __packed;
  
  /* Channel Property Flags */
@@@ -157,10 -190,10 +157,10 @@@ struct vstor_packet 
        enum vstor_packet_operation operation;
  
        /*  Flags - see below for values */
 -      unsigned int flags;
 +      u32 flags;
  
        /* Status of the request returned from the server side. */
 -      unsigned int status;
 +      u32 status;
  
        /* Data payload area */
        union {
        };
  } __packed;
  
 -/* Packet flags */
  /*
 + * Packet Flags:
 + *
   * This flag indicates that the server should send back a completion for this
   * packet.
   */
 +
  #define REQUEST_COMPLETION_FLAG       0x1
  
 -/*  This is the set of flags that the vsc can set in any packets it sends */
 -#define VSC_LEGAL_FLAGS               (REQUEST_COMPLETION_FLAG)
 +/* Matches Windows-end */
 +enum storvsc_request_type {
 +      WRITE_TYPE = 0,
 +      READ_TYPE,
 +      UNKNOWN_TYPE,
 +};
 +
 +/*
 + * SRB status codes and masks; a subset of the codes used here.
 + */
 +
 +#define SRB_STATUS_AUTOSENSE_VALID    0x80
 +#define SRB_STATUS_INVALID_LUN        0x20
 +#define SRB_STATUS_SUCCESS    0x01
 +#define SRB_STATUS_ERROR      0x04
 +
 +/*
 + * This is the end of Protocol specific defines.
 + */
 +
 +
 +/*
 + * We setup a mempool to allocate request structures for this driver
 + * on a per-lun basis. The following define specifies the number of
 + * elements in the pool.
 + */
  
 +#define STORVSC_MIN_BUF_NR                            64
 +static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
  
 -/* Defines */
 +module_param(storvsc_ringbuffer_size, int, S_IRUGO);
 +MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
  
  #define STORVSC_MAX_IO_REQUESTS                               128
  
  #define STORVSC_MAX_LUNS_PER_TARGET                   64
  #define STORVSC_MAX_TARGETS                           1
  #define STORVSC_MAX_CHANNELS                          1
 -#define STORVSC_MAX_CMD_LEN                           16
  
 -/* Matches Windows-end */
 -enum storvsc_request_type {
 -      WRITE_TYPE,
 -      READ_TYPE,
 -      UNKNOWN_TYPE,
 -};
  
  
 -struct hv_storvsc_request {
 +struct storvsc_cmd_request {
 +      struct list_head entry;
 +      struct scsi_cmnd *cmd;
 +
 +      unsigned int bounce_sgl_count;
 +      struct scatterlist *bounce_sgl;
 +
        struct hv_device *device;
  
        /* Synchronize the request/response if needed */
        struct completion wait_event;
  
        unsigned char *sense_buffer;
 -      void *context;
 -      void (*on_io_completion)(struct hv_storvsc_request *request);
        struct hv_multipage_buffer data_buffer;
 -
        struct vstor_packet vstor_packet;
  };
  
@@@ -273,8 -281,8 +273,8 @@@ struct storvsc_device 
        unsigned char target_id;
  
        /* Used for vsc/vsp channel reset process */
 -      struct hv_storvsc_request init_request;
 -      struct hv_storvsc_request reset_request;
 +      struct storvsc_cmd_request init_request;
 +      struct storvsc_cmd_request reset_request;
  };
  
  struct stor_mem_pools {
@@@ -289,6 -297,16 +289,6 @@@ struct hv_host_device 
        unsigned char target;
  };
  
 -struct storvsc_cmd_request {
 -      struct list_head entry;
 -      struct scsi_cmnd *cmd;
 -
 -      unsigned int bounce_sgl_count;
 -      struct scatterlist *bounce_sgl;
 -
 -      struct hv_storvsc_request request;
 -};
 -
  struct storvsc_scan_work {
        struct work_struct work;
        struct Scsi_Host *host;
@@@ -334,34 -352,6 +334,34 @@@ done
        kfree(wrk);
  }
  
 +/*
 + * Major/minor macros.  Minor version is in LSB, meaning that earlier flat
 + * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
 + */
 +
 +static inline u16 storvsc_get_version(u8 major, u8 minor)
 +{
 +      u16 version;
 +
 +      version = ((major << 8) | minor);
 +      return version;
 +}
 +
 +/*
 + * We can get incoming messages from the host that are not in response to
 + * messages that we have sent out. An example of this would be messages
 + * received by the guest to notify dynamic addition/removal of LUNs. To
 + * deal with potential race conditions where the driver may be in the
 + * midst of being unloaded when we might receive an unsolicited message
 + * from the host, we have implemented a mechanism to gurantee sequential
 + * consistency:
 + *
 + * 1) Once the device is marked as being destroyed, we will fail all
 + *    outgoing messages.
 + * 2) We permit incoming messages when the device is being destroyed,
 + *    only to properly account for messages already sent out.
 + */
 +
  static inline struct storvsc_device *get_out_stor_device(
                                        struct hv_device *device)
  {
@@@ -408,348 -398,122 +408,348 @@@ get_in_err
  
  }
  
 -static int storvsc_channel_init(struct hv_device *device)
 +static void destroy_bounce_buffer(struct scatterlist *sgl,
 +                                unsigned int sg_count)
  {
 -      struct storvsc_device *stor_device;
 -      struct hv_storvsc_request *request;
 -      struct vstor_packet *vstor_packet;
 -      int ret, t;
 +      int i;
 +      struct page *page_buf;
  
 -      stor_device = get_out_stor_device(device);
 -      if (!stor_device)
 -              return -ENODEV;
 +      for (i = 0; i < sg_count; i++) {
 +              page_buf = sg_page((&sgl[i]));
 +              if (page_buf != NULL)
 +                      __free_page(page_buf);
 +      }
  
 -      request = &stor_device->init_request;
 -      vstor_packet = &request->vstor_packet;
 +      kfree(sgl);
 +}
  
 -      /*
 -       * Now, initiate the vsc/vsp initialization protocol on the open
 -       * channel
 -       */
 -      memset(request, 0, sizeof(struct hv_storvsc_request));
 -      init_completion(&request->wait_event);
 -      vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
 -      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
 +{
 +      int i;
  
 -      ret = vmbus_sendpacket(device->channel, vstor_packet,
 -                             sizeof(struct vstor_packet),
 -                             (unsigned long)request,
 -                             VM_PKT_DATA_INBAND,
 -                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 -      if (ret != 0)
 -              goto cleanup;
 +      /* No need to check */
 +      if (sg_count < 2)
 +              return -1;
  
 -      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 -      if (t == 0) {
 -              ret = -ETIMEDOUT;
 -              goto cleanup;
 +      /* We have at least 2 sg entries */
 +      for (i = 0; i < sg_count; i++) {
 +              if (i == 0) {
 +                      /* make sure 1st one does not have hole */
 +                      if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
 +                              return i;
 +              } else if (i == sg_count - 1) {
 +                      /* make sure last one does not have hole */
 +                      if (sgl[i].offset != 0)
 +                              return i;
 +              } else {
 +                      /* make sure no hole in the middle */
 +                      if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
 +                              return i;
 +              }
        }
 +      return -1;
 +}
  
 -      if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 -          vstor_packet->status != 0)
 -              goto cleanup;
 -
 -
 -      /* reuse the packet for version range supported */
 -      memset(vstor_packet, 0, sizeof(struct vstor_packet));
 -      vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
 -      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
 +                                              unsigned int sg_count,
 +                                              unsigned int len,
 +                                              int write)
 +{
 +      int i;
 +      int num_pages;
 +      struct scatterlist *bounce_sgl;
 +      struct page *page_buf;
 +      unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
  
 -      vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
 -      FILL_VMSTOR_REVISION(vstor_packet->version.revision);
 +      num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
  
 -      ret = vmbus_sendpacket(device->channel, vstor_packet,
 -                             sizeof(struct vstor_packet),
 -                             (unsigned long)request,
 -                             VM_PKT_DATA_INBAND,
 -                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 -      if (ret != 0)
 -              goto cleanup;
 +      bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
 +      if (!bounce_sgl)
 +              return NULL;
  
 -      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 -      if (t == 0) {
 -              ret = -ETIMEDOUT;
 -              goto cleanup;
 +      for (i = 0; i < num_pages; i++) {
 +              page_buf = alloc_page(GFP_ATOMIC);
 +              if (!page_buf)
 +                      goto cleanup;
 +              sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
        }
  
 -      if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 -          vstor_packet->status != 0)
 -              goto cleanup;
 -
 +      return bounce_sgl;
  
 -      memset(vstor_packet, 0, sizeof(struct vstor_packet));
 -      vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
 -      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 -      vstor_packet->storage_channel_properties.port_number =
 -                                      stor_device->port_number;
 +cleanup:
 +      destroy_bounce_buffer(bounce_sgl, num_pages);
 +      return NULL;
 +}
  
 -      ret = vmbus_sendpacket(device->channel, vstor_packet,
 -                             sizeof(struct vstor_packet),
 -                             (unsigned long)request,
 -                             VM_PKT_DATA_INBAND,
 -                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
++/* Disgusting wrapper functions */
++static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
++{
++      void *addr = kmap_atomic(sg_page(sgl + idx));
++      return (unsigned long)addr;
++}
 -      if (ret != 0)
 -              goto cleanup;
++static inline void sg_kunmap_atomic(unsigned long addr)
++{
++      kunmap_atomic((void *)addr);
++}
 -      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 -      if (t == 0) {
 -              ret = -ETIMEDOUT;
 -              goto cleanup;
 -      }
 -      if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 -          vstor_packet->status != 0)
 -              goto cleanup;
 +/* Assume the original sgl has enough room */
 +static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
 +                                          struct scatterlist *bounce_sgl,
 +                                          unsigned int orig_sgl_count,
 +                                          unsigned int bounce_sgl_count)
 +{
 +      int i;
 +      int j = 0;
 +      unsigned long src, dest;
 +      unsigned int srclen, destlen, copylen;
 +      unsigned int total_copied = 0;
 +      unsigned long bounce_addr = 0;
 +      unsigned long dest_addr = 0;
 +      unsigned long flags;
  
 -      stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
 -      stor_device->target_id
 -              = vstor_packet->storage_channel_properties.target_id;
 +      local_irq_save(flags);
  
 -      memset(vstor_packet, 0, sizeof(struct vstor_packet));
 -      vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
 -      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +      for (i = 0; i < orig_sgl_count; i++) {
-               dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
-                                       KM_IRQ0) + orig_sgl[i].offset;
++              dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
 +              dest = dest_addr;
 +              destlen = orig_sgl[i].length;
  
 -      ret = vmbus_sendpacket(device->channel, vstor_packet,
 -                             sizeof(struct vstor_packet),
 -                             (unsigned long)request,
 -                             VM_PKT_DATA_INBAND,
 -                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 +              if (bounce_addr == 0)
-                       bounce_addr =
-                       (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
-                                                       KM_IRQ0);
++                      bounce_addr = sg_kmap_atomic(bounce_sgl,j);
  
 -      if (ret != 0)
 -              goto cleanup;
 +              while (destlen) {
 +                      src = bounce_addr + bounce_sgl[j].offset;
 +                      srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
  
 -      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 -      if (t == 0) {
 -              ret = -ETIMEDOUT;
 -              goto cleanup;
 -      }
 +                      copylen = min(srclen, destlen);
 +                      memcpy((void *)dest, (void *)src, copylen);
 +
 +                      total_copied += copylen;
 +                      bounce_sgl[j].offset += copylen;
 +                      destlen -= copylen;
 +                      dest += copylen;
 +
 +                      if (bounce_sgl[j].offset == bounce_sgl[j].length) {
 +                              /* full */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++                              sg_kunmap_atomic(bounce_addr);
 +                              j++;
 +
 +                              /*
 +                               * It is possible that the number of elements
 +                               * in the bounce buffer may not be equal to
 +                               * the number of elements in the original
 +                               * scatter list. Handle this correctly.
 +                               */
 +
 +                              if (j == bounce_sgl_count) {
 +                                      /*
 +                                       * We are done; cleanup and return.
 +                                       */
-                                       kunmap_atomic((void *)(dest_addr -
-                                                       orig_sgl[i].offset),
-                                                       KM_IRQ0);
++                                      sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
 +                                      local_irq_restore(flags);
 +                                      return total_copied;
 +                              }
 +
 +                              /* if we need to use another bounce buffer */
 +                              if (destlen || i != orig_sgl_count - 1)
-                                       bounce_addr =
-                                       (unsigned long)kmap_atomic(
-                                       sg_page((&bounce_sgl[j])), KM_IRQ0);
++                                      bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 +                      } else if (destlen == 0 && i == orig_sgl_count - 1) {
 +                              /* unmap the last bounce that is < PAGE_SIZE */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++                              sg_kunmap_atomic(bounce_addr);
 +                      }
 +              }
 +
-               kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
-                             KM_IRQ0);
++              sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
 +      }
 +
 +      local_irq_restore(flags);
 +
 +      return total_copied;
 +}
 +
 +/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
 +static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 +                                        struct scatterlist *bounce_sgl,
 +                                        unsigned int orig_sgl_count)
 +{
 +      int i;
 +      int j = 0;
 +      unsigned long src, dest;
 +      unsigned int srclen, destlen, copylen;
 +      unsigned int total_copied = 0;
 +      unsigned long bounce_addr = 0;
 +      unsigned long src_addr = 0;
 +      unsigned long flags;
 +
 +      local_irq_save(flags);
 +
 +      for (i = 0; i < orig_sgl_count; i++) {
-               src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
-                               KM_IRQ0) + orig_sgl[i].offset;
++              src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
 +              src = src_addr;
 +              srclen = orig_sgl[i].length;
 +
 +              if (bounce_addr == 0)
-                       bounce_addr =
-                       (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
-                                               KM_IRQ0);
++                      bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 +
 +              while (srclen) {
 +                      /* assume bounce offset always == 0 */
 +                      dest = bounce_addr + bounce_sgl[j].length;
 +                      destlen = PAGE_SIZE - bounce_sgl[j].length;
 +
 +                      copylen = min(srclen, destlen);
 +                      memcpy((void *)dest, (void *)src, copylen);
 +
 +                      total_copied += copylen;
 +                      bounce_sgl[j].length += copylen;
 +                      srclen -= copylen;
 +                      src += copylen;
 +
 +                      if (bounce_sgl[j].length == PAGE_SIZE) {
 +                              /* full..move to next entry */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++                              sg_kunmap_atomic(bounce_addr);
 +                              j++;
 +
 +                              /* if we need to use another bounce buffer */
 +                              if (srclen || i != orig_sgl_count - 1)
-                                       bounce_addr =
-                                       (unsigned long)kmap_atomic(
-                                       sg_page((&bounce_sgl[j])), KM_IRQ0);
++                                      bounce_addr = sg_kmap_atomic(bounce_sgl,j);
 +
 +                      } else if (srclen == 0 && i == orig_sgl_count - 1) {
 +                              /* unmap the last bounce that is < PAGE_SIZE */
-                               kunmap_atomic((void *)bounce_addr, KM_IRQ0);
++                              sg_kunmap_atomic(bounce_addr);
 +                      }
 +              }
 +
-               kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
++              sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
 +      }
 +
 +      local_irq_restore(flags);
 +
 +      return total_copied;
 +}
 +
 +static int storvsc_channel_init(struct hv_device *device)
 +{
 +      struct storvsc_device *stor_device;
 +      struct storvsc_cmd_request *request;
 +      struct vstor_packet *vstor_packet;
 +      int ret, t;
 +
 +      stor_device = get_out_stor_device(device);
 +      if (!stor_device)
 +              return -ENODEV;
 +
 +      request = &stor_device->init_request;
 +      vstor_packet = &request->vstor_packet;
 +
 +      /*
 +       * Now, initiate the vsc/vsp initialization protocol on the open
 +       * channel
 +       */
 +      memset(request, 0, sizeof(struct storvsc_cmd_request));
 +      init_completion(&request->wait_event);
 +      vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
 +      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +
 +      ret = vmbus_sendpacket(device->channel, vstor_packet,
 +                             sizeof(struct vstor_packet),
 +                             (unsigned long)request,
 +                             VM_PKT_DATA_INBAND,
 +                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 +      if (ret != 0)
 +              goto cleanup;
 +
 +      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 +      if (t == 0) {
 +              ret = -ETIMEDOUT;
 +              goto cleanup;
 +      }
 +
 +      if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 +          vstor_packet->status != 0)
 +              goto cleanup;
 +
 +
 +      /* reuse the packet for version range supported */
 +      memset(vstor_packet, 0, sizeof(struct vstor_packet));
 +      vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
 +      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +
 +      vstor_packet->version.major_minor =
 +              storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
 +
 +      /*
 +       * The revision number is only used in Windows; set it to 0.
 +       */
 +      vstor_packet->version.revision = 0;
 +
 +      ret = vmbus_sendpacket(device->channel, vstor_packet,
 +                             sizeof(struct vstor_packet),
 +                             (unsigned long)request,
 +                             VM_PKT_DATA_INBAND,
 +                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 +      if (ret != 0)
 +              goto cleanup;
 +
 +      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 +      if (t == 0) {
 +              ret = -ETIMEDOUT;
 +              goto cleanup;
 +      }
 +
 +      if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 +          vstor_packet->status != 0)
 +              goto cleanup;
 +
 +
 +      memset(vstor_packet, 0, sizeof(struct vstor_packet));
 +      vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
 +      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +      vstor_packet->storage_channel_properties.port_number =
 +                                      stor_device->port_number;
 +
 +      ret = vmbus_sendpacket(device->channel, vstor_packet,
 +                             sizeof(struct vstor_packet),
 +                             (unsigned long)request,
 +                             VM_PKT_DATA_INBAND,
 +                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 +
 +      if (ret != 0)
 +              goto cleanup;
 +
 +      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 +      if (t == 0) {
 +              ret = -ETIMEDOUT;
 +              goto cleanup;
 +      }
 +
 +      if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
 +          vstor_packet->status != 0)
 +              goto cleanup;
 +
 +      stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
 +      stor_device->target_id
 +              = vstor_packet->storage_channel_properties.target_id;
 +
 +      memset(vstor_packet, 0, sizeof(struct vstor_packet));
 +      vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
 +      vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 +
 +      ret = vmbus_sendpacket(device->channel, vstor_packet,
 +                             sizeof(struct vstor_packet),
 +                             (unsigned long)request,
 +                             VM_PKT_DATA_INBAND,
 +                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 +
 +      if (ret != 0)
 +              goto cleanup;
 +
 +      t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 +      if (t == 0) {
 +              ret = -ETIMEDOUT;
 +              goto cleanup;
 +      }
  
        if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
            vstor_packet->status != 0)
@@@ -760,84 -524,9 +760,84 @@@ cleanup
        return ret;
  }
  
 +
 +static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
 +{
 +      struct scsi_cmnd *scmnd = cmd_request->cmd;
 +      struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
 +      void (*scsi_done_fn)(struct scsi_cmnd *);
 +      struct scsi_sense_hdr sense_hdr;
 +      struct vmscsi_request *vm_srb;
 +      struct storvsc_scan_work *wrk;
 +      struct stor_mem_pools *memp = scmnd->device->hostdata;
 +
 +      vm_srb = &cmd_request->vstor_packet.vm_srb;
 +      if (cmd_request->bounce_sgl_count) {
 +              if (vm_srb->data_in == READ_TYPE)
 +                      copy_from_bounce_buffer(scsi_sglist(scmnd),
 +                                      cmd_request->bounce_sgl,
 +                                      scsi_sg_count(scmnd),
 +                                      cmd_request->bounce_sgl_count);
 +              destroy_bounce_buffer(cmd_request->bounce_sgl,
 +                                      cmd_request->bounce_sgl_count);
 +      }
 +
 +      /*
 +       * If there is an error; offline the device since all
 +       * error recovery strategies would have already been
 +       * deployed on the host side.
 +       */
 +      if (vm_srb->srb_status == SRB_STATUS_ERROR)
 +              scmnd->result = DID_TARGET_FAILURE << 16;
 +      else
 +              scmnd->result = vm_srb->scsi_status;
 +
 +      /*
 +       * If the LUN is invalid; remove the device.
 +       */
 +      if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
 +              struct storvsc_device *stor_dev;
 +              struct hv_device *dev = host_dev->dev;
 +              struct Scsi_Host *host;
 +
 +              stor_dev = get_in_stor_device(dev);
 +              host = stor_dev->host;
 +
 +              wrk = kmalloc(sizeof(struct storvsc_scan_work),
 +                              GFP_ATOMIC);
 +              if (!wrk) {
 +                      scmnd->result = DID_TARGET_FAILURE << 16;
 +              } else {
 +                      wrk->host = host;
 +                      wrk->lun = vm_srb->lun;
 +                      INIT_WORK(&wrk->work, storvsc_remove_lun);
 +                      schedule_work(&wrk->work);
 +              }
 +      }
 +
 +      if (scmnd->result) {
 +              if (scsi_normalize_sense(scmnd->sense_buffer,
 +                              SCSI_SENSE_BUFFERSIZE, &sense_hdr))
 +                      scsi_print_sense_hdr("storvsc", &sense_hdr);
 +      }
 +
 +      scsi_set_resid(scmnd,
 +              cmd_request->data_buffer.len -
 +              vm_srb->data_transfer_length);
 +
 +      scsi_done_fn = scmnd->scsi_done;
 +
 +      scmnd->host_scribble = NULL;
 +      scmnd->scsi_done = NULL;
 +
 +      scsi_done_fn(scmnd);
 +
 +      mempool_free(cmd_request, memp->request_mempool);
 +}
 +
  static void storvsc_on_io_completion(struct hv_device *device,
                                  struct vstor_packet *vstor_packet,
 -                                struct hv_storvsc_request *request)
 +                                struct storvsc_cmd_request *request)
  {
        struct storvsc_device *stor_device;
        struct vstor_packet *stor_pkt;
         */
  
        if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
 -              (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
 +         (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
                vstor_packet->vm_srb.scsi_status = 0;
 -              vstor_packet->vm_srb.srb_status = 0x1;
 +              vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
        }
  
  
        vstor_packet->vm_srb.sense_info_length;
  
        if (vstor_packet->vm_srb.scsi_status != 0 ||
 -              vstor_packet->vm_srb.srb_status != 1){
 +              vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
                dev_warn(&device->device,
                         "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
                         stor_pkt->vm_srb.cdb[0],
  
        if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
                /* CHECK_CONDITION */
 -              if (vstor_packet->vm_srb.srb_status & 0x80) {
 +              if (vstor_packet->vm_srb.srb_status &
 +                      SRB_STATUS_AUTOSENSE_VALID) {
                        /* autosense data available */
                        dev_warn(&device->device,
                                 "stor pkt %p autosense data valid - len %d\n",
        stor_pkt->vm_srb.data_transfer_length =
        vstor_packet->vm_srb.data_transfer_length;
  
 -      request->on_io_completion(request);
 +      storvsc_command_completion(request);
  
        if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
                stor_device->drain_notify)
  
  static void storvsc_on_receive(struct hv_device *device,
                             struct vstor_packet *vstor_packet,
 -                           struct hv_storvsc_request *request)
 +                           struct storvsc_cmd_request *request)
  {
        struct storvsc_scan_work *work;
        struct storvsc_device *stor_device;
@@@ -943,7 -631,7 +943,7 @@@ static void storvsc_on_channel_callback
        u32 bytes_recvd;
        u64 request_id;
        unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
 -      struct hv_storvsc_request *request;
 +      struct storvsc_cmd_request *request;
        int ret;
  
  
                                       &bytes_recvd, &request_id);
                if (ret == 0 && bytes_recvd > 0) {
  
 -                      request = (struct hv_storvsc_request *)
 +                      request = (struct storvsc_cmd_request *)
                                        (unsigned long)request_id;
  
                        if ((request == &stor_device->init_request) ||
@@@ -986,6 -674,7 +986,6 @@@ static int storvsc_connect_to_vsp(struc
  
        memset(&props, 0, sizeof(struct vmstorage_channel_properties));
  
 -      /* Open the channel */
        ret = vmbus_open(device->channel,
                         ring_size,
                         ring_size,
@@@ -1039,7 -728,7 +1039,7 @@@ static int storvsc_dev_remove(struct hv
  }
  
  static int storvsc_do_io(struct hv_device *device,
 -                            struct hv_storvsc_request *request)
 +                            struct storvsc_cmd_request *request)
  {
        struct storvsc_device *stor_device;
        struct vstor_packet *vstor_packet;
        vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
  
  
 -      vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
 -
 -
 -      vstor_packet->vm_srb.data_transfer_length =
 -      request->data_buffer.len;
 -
 -      vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
 -
 -      if (request->data_buffer.len) {
 -              ret = vmbus_sendpacket_multipagebuffer(device->channel,
 -                              &request->data_buffer,
 -                              vstor_packet,
 -                              sizeof(struct vstor_packet),
 -                              (unsigned long)request);
 -      } else {
 -              ret = vmbus_sendpacket(device->channel, vstor_packet,
 -                             sizeof(struct vstor_packet),
 -                             (unsigned long)request,
 -                             VM_PKT_DATA_INBAND,
 -                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 -      }
 -
 -      if (ret != 0)
 -              return ret;
 -
 -      atomic_inc(&stor_device->num_outstanding_req);
 -
 -      return ret;
 -}
 -
 -static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
 -{
 -      *target =
 -              dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
 -
 -      *path =
 -              dev->dev_instance.b[3] << 24 |
 -              dev->dev_instance.b[2] << 16 |
 -              dev->dev_instance.b[1] << 8  | dev->dev_instance.b[0];
 -}
 -
 -
 -static int storvsc_device_alloc(struct scsi_device *sdevice)
 -{
 -      struct stor_mem_pools *memp;
 -      int number = STORVSC_MIN_BUF_NR;
 -
 -      memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
 -      if (!memp)
 -              return -ENOMEM;
 -
 -      memp->request_pool =
 -              kmem_cache_create(dev_name(&sdevice->sdev_dev),
 -                              sizeof(struct storvsc_cmd_request), 0,
 -                              SLAB_HWCACHE_ALIGN, NULL);
 -
 -      if (!memp->request_pool)
 -              goto err0;
 -
 -      memp->request_mempool = mempool_create(number, mempool_alloc_slab,
 -                                              mempool_free_slab,
 -                                              memp->request_pool);
 -
 -      if (!memp->request_mempool)
 -              goto err1;
 -
 -      sdevice->hostdata = memp;
 -
 -      return 0;
 -
 -err1:
 -      kmem_cache_destroy(memp->request_pool);
 -
 -err0:
 -      kfree(memp);
 -      return -ENOMEM;
 -}
 -
 -static void storvsc_device_destroy(struct scsi_device *sdevice)
 -{
 -      struct stor_mem_pools *memp = sdevice->hostdata;
 -
 -      mempool_destroy(memp->request_mempool);
 -      kmem_cache_destroy(memp->request_pool);
 -      kfree(memp);
 -      sdevice->hostdata = NULL;
 -}
 -
 -static int storvsc_device_configure(struct scsi_device *sdevice)
 -{
 -      scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
 -                              STORVSC_MAX_IO_REQUESTS);
 -
 -      blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
 -
 -      blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
 -
 -      return 0;
 -}
 -
 -static void destroy_bounce_buffer(struct scatterlist *sgl,
 -                                unsigned int sg_count)
 -{
 -      int i;
 -      struct page *page_buf;
 -
 -      for (i = 0; i < sg_count; i++) {
 -              page_buf = sg_page((&sgl[i]));
 -              if (page_buf != NULL)
 -                      __free_page(page_buf);
 -      }
 -
 -      kfree(sgl);
 -}
 -
 -static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
 -{
 -      int i;
 -
 -      /* No need to check */
 -      if (sg_count < 2)
 -              return -1;
 -
 -      /* We have at least 2 sg entries */
 -      for (i = 0; i < sg_count; i++) {
 -              if (i == 0) {
 -                      /* make sure 1st one does not have hole */
 -                      if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
 -                              return i;
 -              } else if (i == sg_count - 1) {
 -                      /* make sure last one does not have hole */
 -                      if (sgl[i].offset != 0)
 -                              return i;
 -              } else {
 -                      /* make sure no hole in the middle */
 -                      if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
 -                              return i;
 -              }
 -      }
 -      return -1;
 -}
 -
 -static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
 -                                              unsigned int sg_count,
 -                                              unsigned int len,
 -                                              int write)
 -{
 -      int i;
 -      int num_pages;
 -      struct scatterlist *bounce_sgl;
 -      struct page *page_buf;
 -      unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
 -
 -      num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
 -
 -      bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
 -      if (!bounce_sgl)
 -              return NULL;
 -
 -      for (i = 0; i < num_pages; i++) {
 -              page_buf = alloc_page(GFP_ATOMIC);
 -              if (!page_buf)
 -                      goto cleanup;
 -              sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
 -      }
 -
 -      return bounce_sgl;
 -
 -cleanup:
 -      destroy_bounce_buffer(bounce_sgl, num_pages);
 -      return NULL;
 -}
 -
 -
 -/* Assume the original sgl has enough room */
 -static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
 -                                          struct scatterlist *bounce_sgl,
 -                                          unsigned int orig_sgl_count,
 -                                          unsigned int bounce_sgl_count)
 -{
 -      int i;
 -      int j = 0;
 -      unsigned long src, dest;
 -      unsigned int srclen, destlen, copylen;
 -      unsigned int total_copied = 0;
 -      unsigned long bounce_addr = 0;
 -      unsigned long dest_addr = 0;
 -      unsigned long flags;
 -
 -      local_irq_save(flags);
 -
 -      for (i = 0; i < orig_sgl_count; i++) {
 -              dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
 -                                      + orig_sgl[i].offset;
 -              dest = dest_addr;
 -              destlen = orig_sgl[i].length;
 -
 -              if (bounce_addr == 0)
 -                      bounce_addr =
 -                      (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
 -
 -              while (destlen) {
 -                      src = bounce_addr + bounce_sgl[j].offset;
 -                      srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
 -
 -                      copylen = min(srclen, destlen);
 -                      memcpy((void *)dest, (void *)src, copylen);
 -
 -                      total_copied += copylen;
 -                      bounce_sgl[j].offset += copylen;
 -                      destlen -= copylen;
 -                      dest += copylen;
 -
 -                      if (bounce_sgl[j].offset == bounce_sgl[j].length) {
 -                              /* full */
 -                              kunmap_atomic((void *)bounce_addr);
 -                              j++;
 +      vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
  
 -                              /*
 -                               * It is possible that the number of elements
 -                               * in the bounce buffer may not be equal to
 -                               * the number of elements in the original
 -                               * scatter list. Handle this correctly.
 -                               */
  
 -                              if (j == bounce_sgl_count) {
 -                                      /*
 -                                       * We are done; cleanup and return.
 -                                       */
 -                                      kunmap_atomic((void *)(dest_addr -
 -                                                      orig_sgl[i].offset),
 -                                                      KM_IRQ0);
 -                                      local_irq_restore(flags);
 -                                      return total_copied;
 -                              }
 +      vstor_packet->vm_srb.data_transfer_length =
 +      request->data_buffer.len;
  
 -                              /* if we need to use another bounce buffer */
 -                              if (destlen || i != orig_sgl_count - 1)
 -                                      bounce_addr =
 -                                      (unsigned long)kmap_atomic(
 -                                      sg_page((&bounce_sgl[j])));
 -                      } else if (destlen == 0 && i == orig_sgl_count - 1) {
 -                              /* unmap the last bounce that is < PAGE_SIZE */
 -                              kunmap_atomic((void *)bounce_addr);
 -                      }
 -              }
 +      vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
  
 -              kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset));
 +      if (request->data_buffer.len) {
 +              ret = vmbus_sendpacket_multipagebuffer(device->channel,
 +                              &request->data_buffer,
 +                              vstor_packet,
 +                              sizeof(struct vstor_packet),
 +                              (unsigned long)request);
 +      } else {
 +              ret = vmbus_sendpacket(device->channel, vstor_packet,
 +                             sizeof(struct vstor_packet),
 +                             (unsigned long)request,
 +                             VM_PKT_DATA_INBAND,
 +                             VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        }
  
 -      local_irq_restore(flags);
 +      if (ret != 0)
 +              return ret;
  
 -      return total_copied;
 -}
 +      atomic_inc(&stor_device->num_outstanding_req);
  
 +      return ret;
 +}
  
 -/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
 -static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 -                                        struct scatterlist *bounce_sgl,
 -                                        unsigned int orig_sgl_count)
 +static int storvsc_device_alloc(struct scsi_device *sdevice)
  {
 -      int i;
 -      int j = 0;
 -      unsigned long src, dest;
 -      unsigned int srclen, destlen, copylen;
 -      unsigned int total_copied = 0;
 -      unsigned long bounce_addr = 0;
 -      unsigned long src_addr = 0;
 -      unsigned long flags;
 -
 -      local_irq_save(flags);
 +      struct stor_mem_pools *memp;
 +      int number = STORVSC_MIN_BUF_NR;
  
 -      for (i = 0; i < orig_sgl_count; i++) {
 -              src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
 -                              + orig_sgl[i].offset;
 -              src = src_addr;
 -              srclen = orig_sgl[i].length;
 +      memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
 +      if (!memp)
 +              return -ENOMEM;
  
 -              if (bounce_addr == 0)
 -                      bounce_addr =
 -                      (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
 +      memp->request_pool =
 +              kmem_cache_create(dev_name(&sdevice->sdev_dev),
 +                              sizeof(struct storvsc_cmd_request), 0,
 +                              SLAB_HWCACHE_ALIGN, NULL);
  
 -              while (srclen) {
 -                      /* assume bounce offset always == 0 */
 -                      dest = bounce_addr + bounce_sgl[j].length;
 -                      destlen = PAGE_SIZE - bounce_sgl[j].length;
 +      if (!memp->request_pool)
 +              goto err0;
  
 -                      copylen = min(srclen, destlen);
 -                      memcpy((void *)dest, (void *)src, copylen);
 +      memp->request_mempool = mempool_create(number, mempool_alloc_slab,
 +                                              mempool_free_slab,
 +                                              memp->request_pool);
  
 -                      total_copied += copylen;
 -                      bounce_sgl[j].length += copylen;
 -                      srclen -= copylen;
 -                      src += copylen;
 +      if (!memp->request_mempool)
 +              goto err1;
  
 -                      if (bounce_sgl[j].length == PAGE_SIZE) {
 -                              /* full..move to next entry */
 -                              kunmap_atomic((void *)bounce_addr);
 -                              j++;
 +      sdevice->hostdata = memp;
  
 -                              /* if we need to use another bounce buffer */
 -                              if (srclen || i != orig_sgl_count - 1)
 -                                      bounce_addr =
 -                                      (unsigned long)kmap_atomic(
 -                                      sg_page((&bounce_sgl[j])));
 +      return 0;
  
 -                      } else if (srclen == 0 && i == orig_sgl_count - 1) {
 -                              /* unmap the last bounce that is < PAGE_SIZE */
 -                              kunmap_atomic((void *)bounce_addr);
 -                      }
 -              }
 +err1:
 +      kmem_cache_destroy(memp->request_pool);
  
 -              kunmap_atomic((void *)(src_addr - orig_sgl[i].offset));
 -      }
 +err0:
 +      kfree(memp);
 +      return -ENOMEM;
 +}
  
 -      local_irq_restore(flags);
 +static void storvsc_device_destroy(struct scsi_device *sdevice)
 +{
 +      struct stor_mem_pools *memp = sdevice->hostdata;
  
 -      return total_copied;
 +      mempool_destroy(memp->request_mempool);
 +      kmem_cache_destroy(memp->request_pool);
 +      kfree(memp);
 +      sdevice->hostdata = NULL;
  }
  
 -
 -static int storvsc_remove(struct hv_device *dev)
 +static int storvsc_device_configure(struct scsi_device *sdevice)
  {
 -      struct storvsc_device *stor_device = hv_get_drvdata(dev);
 -      struct Scsi_Host *host = stor_device->host;
 -
 -      scsi_remove_host(host);
 +      scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
 +                              STORVSC_MAX_IO_REQUESTS);
  
 -      scsi_host_put(host);
 +      blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
  
 -      storvsc_dev_remove(dev);
 +      blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
  
        return 0;
  }
  
 -
  static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
                           sector_t capacity, int *info)
  {
        return 0;
  }
  
 -static int storvsc_host_reset(struct hv_device *device)
 +static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
  {
 +      struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
 +      struct hv_device *device = host_dev->dev;
 +
        struct storvsc_device *stor_device;
 -      struct hv_storvsc_request *request;
 +      struct storvsc_cmd_request *request;
        struct vstor_packet *vstor_packet;
        int ret, t;
  
        return SUCCESS;
  }
  
 -
 -/*
 - * storvsc_host_reset_handler - Reset the scsi HBA
 - */
 -static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
 -{
 -      struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
 -      struct hv_device *dev = host_dev->dev;
 -
 -      return storvsc_host_reset(dev);
 -}
 -
 -
 -/*
 - * storvsc_command_completion - Command completion processing
 - */
 -static void storvsc_command_completion(struct hv_storvsc_request *request)
 -{
 -      struct storvsc_cmd_request *cmd_request =
 -              (struct storvsc_cmd_request *)request->context;
 -      struct scsi_cmnd *scmnd = cmd_request->cmd;
 -      struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
 -      void (*scsi_done_fn)(struct scsi_cmnd *);
 -      struct scsi_sense_hdr sense_hdr;
 -      struct vmscsi_request *vm_srb;
 -      struct storvsc_scan_work *wrk;
 -      struct stor_mem_pools *memp = scmnd->device->hostdata;
 -
 -      vm_srb = &request->vstor_packet.vm_srb;
 -      if (cmd_request->bounce_sgl_count) {
 -              if (vm_srb->data_in == READ_TYPE)
 -                      copy_from_bounce_buffer(scsi_sglist(scmnd),
 -                                      cmd_request->bounce_sgl,
 -                                      scsi_sg_count(scmnd),
 -                                      cmd_request->bounce_sgl_count);
 -              destroy_bounce_buffer(cmd_request->bounce_sgl,
 -                                      cmd_request->bounce_sgl_count);
 -      }
 -
 -      /*
 -       * If there is an error; offline the device since all
 -       * error recovery strategies would have already been
 -       * deployed on the host side.
 -       */
 -      if (vm_srb->srb_status == 0x4)
 -              scmnd->result = DID_TARGET_FAILURE << 16;
 -      else
 -              scmnd->result = vm_srb->scsi_status;
 -
 -      /*
 -       * If the LUN is invalid; remove the device.
 -       */
 -      if (vm_srb->srb_status == 0x20) {
 -              struct storvsc_device *stor_dev;
 -              struct hv_device *dev = host_dev->dev;
 -              struct Scsi_Host *host;
 -
 -              stor_dev = get_in_stor_device(dev);
 -              host = stor_dev->host;
 -
 -              wrk = kmalloc(sizeof(struct storvsc_scan_work),
 -                              GFP_ATOMIC);
 -              if (!wrk) {
 -                      scmnd->result = DID_TARGET_FAILURE << 16;
 -              } else {
 -                      wrk->host = host;
 -                      wrk->lun = vm_srb->lun;
 -                      INIT_WORK(&wrk->work, storvsc_remove_lun);
 -                      schedule_work(&wrk->work);
 -              }
 -      }
 -
 -      if (scmnd->result) {
 -              if (scsi_normalize_sense(scmnd->sense_buffer,
 -                              SCSI_SENSE_BUFFERSIZE, &sense_hdr))
 -                      scsi_print_sense_hdr("storvsc", &sense_hdr);
 -      }
 -
 -      scsi_set_resid(scmnd,
 -              request->data_buffer.len -
 -              vm_srb->data_transfer_length);
 -
 -      scsi_done_fn = scmnd->scsi_done;
 -
 -      scmnd->host_scribble = NULL;
 -      scmnd->scsi_done = NULL;
 -
 -      scsi_done_fn(scmnd);
 -
 -      mempool_free(cmd_request, memp->request_mempool);
 -}
 -
 -static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
 +static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
  {
        bool allowed = true;
        u8 scsi_op = scmnd->cmnd[0];
  
        switch (scsi_op) {
 -      /* smartd sends this command, which will offline the device */
 +      /*
 +       * smartd sends this command and the host does not handle
 +       * this. So, don't send it.
 +       */
        case SET_WINDOW:
                scmnd->result = ILLEGAL_REQUEST << 16;
                allowed = false;
        return allowed;
  }
  
 -/*
 - * storvsc_queuecommand - Initiate command processing
 - */
  static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
  {
        int ret;
        struct hv_host_device *host_dev = shost_priv(host);
        struct hv_device *dev = host_dev->dev;
 -      struct hv_storvsc_request *request;
        struct storvsc_cmd_request *cmd_request;
        unsigned int request_size = 0;
        int i;
        struct vmscsi_request *vm_srb;
        struct stor_mem_pools *memp = scmnd->device->hostdata;
  
 -      if (storvsc_check_scsi_cmd(scmnd) == false) {
 +      if (!storvsc_scsi_cmd_ok(scmnd)) {
                scmnd->scsi_done(scmnd);
                return 0;
        }
  
 -      /* If retrying, no need to prep the cmd */
 -      if (scmnd->host_scribble) {
 -
 -              cmd_request =
 -                      (struct storvsc_cmd_request *)scmnd->host_scribble;
 -
 -              goto retry_request;
 -      }
 -
        request_size = sizeof(struct storvsc_cmd_request);
  
        cmd_request = mempool_alloc(memp->request_mempool,
                                       GFP_ATOMIC);
 +
 +      /*
 +       * We might be invoked in an interrupt context; hence
 +       * mempool_alloc() can fail.
 +       */
        if (!cmd_request)
                return SCSI_MLQUEUE_DEVICE_BUSY;
  
        memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
  
        /* Setup the cmd request */
 -      cmd_request->bounce_sgl_count = 0;
 -      cmd_request->bounce_sgl = NULL;
        cmd_request->cmd = scmnd;
  
        scmnd->host_scribble = (unsigned char *)cmd_request;
  
 -      request = &cmd_request->request;
 -      vm_srb = &request->vstor_packet.vm_srb;
 +      vm_srb = &cmd_request->vstor_packet.vm_srb;
  
  
        /* Build the SRB */
                break;
        }
  
 -      request->on_io_completion = storvsc_command_completion;
 -      request->context = cmd_request;/* scmnd; */
  
        vm_srb->port_number = host_dev->port;
        vm_srb->path_id = scmnd->device->channel;
  
        memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
  
 -      request->sense_buffer = scmnd->sense_buffer;
 +      cmd_request->sense_buffer = scmnd->sense_buffer;
  
  
 -      request->data_buffer.len = scsi_bufflen(scmnd);
 +      cmd_request->data_buffer.len = scsi_bufflen(scmnd);
        if (scsi_sg_count(scmnd)) {
                sgl = (struct scatterlist *)scsi_sglist(scmnd);
                sg_count = scsi_sg_count(scmnd);
                                                     scsi_bufflen(scmnd),
                                                     vm_srb->data_in);
                        if (!cmd_request->bounce_sgl) {
 -                              scmnd->host_scribble = NULL;
 -                              mempool_free(cmd_request,
 -                                              memp->request_mempool);
 -
 -                              return SCSI_MLQUEUE_HOST_BUSY;
 +                              ret = SCSI_MLQUEUE_HOST_BUSY;
 +                              goto queue_error;
                        }
  
                        cmd_request->bounce_sgl_count =
                        sg_count = cmd_request->bounce_sgl_count;
                }
  
 -              request->data_buffer.offset = sgl[0].offset;
 +              cmd_request->data_buffer.offset = sgl[0].offset;
  
                for (i = 0; i < sg_count; i++)
 -                      request->data_buffer.pfn_array[i] =
 +                      cmd_request->data_buffer.pfn_array[i] =
                                page_to_pfn(sg_page((&sgl[i])));
  
        } else if (scsi_sglist(scmnd)) {
 -              request->data_buffer.offset =
 +              cmd_request->data_buffer.offset =
                        virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
 -              request->data_buffer.pfn_array[0] =
 +              cmd_request->data_buffer.pfn_array[0] =
                        virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
        }
  
 -retry_request:
        /* Invokes the vsc to start an IO */
 -      ret = storvsc_do_io(dev, &cmd_request->request);
 +      ret = storvsc_do_io(dev, cmd_request);
  
        if (ret == -EAGAIN) {
                /* no more space */
  
 -              if (cmd_request->bounce_sgl_count)
 +              if (cmd_request->bounce_sgl_count) {
                        destroy_bounce_buffer(cmd_request->bounce_sgl,
                                        cmd_request->bounce_sgl_count);
  
 -              mempool_free(cmd_request, memp->request_mempool);
 -
 -              scmnd->host_scribble = NULL;
 -
 -              ret = SCSI_MLQUEUE_DEVICE_BUSY;
 +                      ret = SCSI_MLQUEUE_DEVICE_BUSY;
 +                      goto queue_error;
 +              }
        }
  
 +      return 0;
 +
 +queue_error:
 +      mempool_free(cmd_request, memp->request_mempool);
 +      scmnd->host_scribble = NULL;
        return ret;
  }
  
 -/* Scsi driver */
  static struct scsi_host_template scsi_driver = {
        .module =               THIS_MODULE,
        .name =                 "storvsc_host_t",
@@@ -1407,6 -1445,11 +1407,6 @@@ static const struct hv_vmbus_device_id 
  
  MODULE_DEVICE_TABLE(vmbus, id_table);
  
 -
 -/*
 - * storvsc_probe - Add a new device for this driver
 - */
 -
  static int storvsc_probe(struct hv_device *device,
                        const struct hv_vmbus_device_id *dev_id)
  {
        struct Scsi_Host *host;
        struct hv_host_device *host_dev;
        bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
 -      int path = 0;
        int target = 0;
        struct storvsc_device *stor_device;
  
        if (ret)
                goto err_out1;
  
 -      if (dev_is_ide)
 -              storvsc_get_ide_info(device, &target, &path);
 -
        host_dev->path = stor_device->path_id;
        host_dev->target = stor_device->target_id;
  
  
        if (!dev_is_ide) {
                scsi_scan_host(host);
 -              return 0;
 -      }
 -      ret = scsi_add_device(host, 0, target, 0);
 -      if (ret) {
 -              scsi_remove_host(host);
 -              goto err_out2;
 +      } else {
 +              target = (device->dev_instance.b[5] << 8 |
 +                       device->dev_instance.b[4]);
 +              ret = scsi_add_device(host, 0, target, 0);
 +              if (ret) {
 +                      scsi_remove_host(host);
 +                      goto err_out2;
 +              }
        }
        return 0;
  
@@@ -1494,17 -1539,7 +1494,17 @@@ err_out0
        return ret;
  }
  
 -/* The one and only one */
 +static int storvsc_remove(struct hv_device *dev)
 +{
 +      struct storvsc_device *stor_device = hv_get_drvdata(dev);
 +      struct Scsi_Host *host = stor_device->host;
 +
 +      scsi_remove_host(host);
 +      storvsc_dev_remove(dev);
 +      scsi_host_put(host);
 +
 +      return 0;
 +}
  
  static struct hv_driver storvsc_drv = {
        .name = KBUILD_MODNAME,
@@@ -56,17 -56,17 +56,17 @@@ static void clear_flag(struct block_hea
   * This is called from xv_malloc/xv_free path, so it
   * needs to be fast.
   */
- static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
+ static void *get_ptr_atomic(struct page *page, u16 offset)
  {
        unsigned char *base;
  
-       base = kmap_atomic(page, type);
+       base = kmap_atomic(page);
        return base + offset;
  }
  
- static void put_ptr_atomic(void *ptr, enum km_type type)
+ static void put_ptr_atomic(void *ptr)
  {
-       kunmap_atomic(ptr, type);
+       kunmap_atomic(ptr);
  }
  
  static u32 get_blockprev(struct block_header *block)
@@@ -202,10 -202,10 +202,10 @@@ static void insert_block(struct xv_poo
  
        if (block->link.next_page) {
                nextblock = get_ptr_atomic(block->link.next_page,
-                                       block->link.next_offset, KM_USER1);
+                                       block->link.next_offset);
                nextblock->link.prev_page = page;
                nextblock->link.prev_offset = offset;
-               put_ptr_atomic(nextblock, KM_USER1);
+               put_ptr_atomic(nextblock);
                /* If there was a next page then the free bits are set. */
                return;
        }
@@@ -225,18 -225,18 +225,18 @@@ static void remove_block(struct xv_poo
  
        if (block->link.prev_page) {
                tmpblock = get_ptr_atomic(block->link.prev_page,
-                               block->link.prev_offset, KM_USER1);
+                               block->link.prev_offset);
                tmpblock->link.next_page = block->link.next_page;
                tmpblock->link.next_offset = block->link.next_offset;
-               put_ptr_atomic(tmpblock, KM_USER1);
+               put_ptr_atomic(tmpblock);
        }
  
        if (block->link.next_page) {
                tmpblock = get_ptr_atomic(block->link.next_page,
-                               block->link.next_offset, KM_USER1);
+                               block->link.next_offset);
                tmpblock->link.prev_page = block->link.prev_page;
                tmpblock->link.prev_offset = block->link.prev_offset;
-               put_ptr_atomic(tmpblock, KM_USER1);
+               put_ptr_atomic(tmpblock);
        }
  
        /* Is this block is at the head of the freelist? */
                if (pool->freelist[slindex].page) {
                        struct block_header *tmpblock;
                        tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
-                                       pool->freelist[slindex].offset,
-                                       KM_USER1);
+                                       pool->freelist[slindex].offset);
                        tmpblock->link.prev_page = NULL;
                        tmpblock->link.prev_offset = 0;
-                       put_ptr_atomic(tmpblock, KM_USER1);
+                       put_ptr_atomic(tmpblock);
                } else {
                        /* This freelist bucket is empty */
                        __clear_bit(slindex % BITS_PER_LONG,
@@@ -284,7 -283,7 +283,7 @@@ static int grow_pool(struct xv_pool *po
        stat_inc(&pool->total_pages);
  
        spin_lock(&pool->lock);
-       block = get_ptr_atomic(page, 0, KM_USER0);
+       block = get_ptr_atomic(page, 0);
  
        block->size = PAGE_SIZE - XV_ALIGN;
        set_flag(block, BLOCK_FREE);
  
        insert_block(pool, page, 0, block);
  
-       put_ptr_atomic(block, KM_USER0);
+       put_ptr_atomic(block);
        spin_unlock(&pool->lock);
  
        return 0;
@@@ -375,7 -374,7 +374,7 @@@ int xv_malloc(struct xv_pool *pool, u3
                return -ENOMEM;
        }
  
-       block = get_ptr_atomic(*page, *offset, KM_USER0);
+       block = get_ptr_atomic(*page, *offset);
  
        remove_block(pool, *page, *offset, block, index);
  
        block->size = origsize;
        clear_flag(block, BLOCK_FREE);
  
-       put_ptr_atomic(block, KM_USER0);
+       put_ptr_atomic(block);
        spin_unlock(&pool->lock);
  
        *offset += XV_ALIGN;
@@@ -426,7 -425,7 +425,7 @@@ void xv_free(struct xv_pool *pool, stru
  
        spin_lock(&pool->lock);
  
-       page_start = get_ptr_atomic(page, 0, KM_USER0);
+       page_start = get_ptr_atomic(page, 0);
        block = (struct block_header *)((char *)page_start + offset);
  
        /* Catch double free bugs */
  
        /* No used objects in this page. Free it. */
        if (block->size == PAGE_SIZE - XV_ALIGN) {
-               put_ptr_atomic(page_start, KM_USER0);
+               put_ptr_atomic(page_start);
                spin_unlock(&pool->lock);
  
                __free_page(page);
                set_blockprev(tmpblock, offset);
        }
  
-       put_ptr_atomic(page_start, KM_USER0);
+       put_ptr_atomic(page_start);
        spin_unlock(&pool->lock);
  }
  EXPORT_SYMBOL_GPL(xv_free);
index 36d53ed,0000000..68b2e05
mode 100644,000000..100644
--- /dev/null
@@@ -1,3320 -1,0 +1,3320 @@@
-       to_va = kmap_atomic(page, KM_USER0);
 +/*
 + * zcache.c
 + *
 + * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
 + * Copyright (c) 2010,2011, Nitin Gupta
 + *
 + * Zcache provides an in-kernel "host implementation" for transcendent memory
 + * and, thus indirectly, for cleancache and frontswap.  Zcache includes two
 + * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
 + * 1) "compression buddies" ("zbud") is used for ephemeral pages
 + * 2) xvmalloc is used for persistent pages.
 + * Xvmalloc (based on the TLSF allocator) has very low fragmentation
 + * so maximizes space efficiency, while zbud allows pairs (and potentially,
 + * in the future, more than a pair of) compressed pages to be closely linked
 + * so that reclaiming can be done via the kernel's physical-page-oriented
 + * "shrinker" interface.
 + *
 + * [1] For a definition of page-accessible memory (aka PAM), see:
 + *   http://marc.info/?l=linux-mm&m=127811271605009
 + *  RAMSTER TODO:
 + *   - handle remotifying of buddied pages (see zbud_remotify_zbpg)
 + *   - kernel boot params: nocleancache/nofrontswap don't always work?!?
 + */
 +
 +#include <linux/module.h>
 +#include <linux/cpu.h>
 +#include <linux/highmem.h>
 +#include <linux/list.h>
 +#include <linux/lzo.h>
 +#include <linux/slab.h>
 +#include <linux/spinlock.h>
 +#include <linux/types.h>
 +#include <linux/atomic.h>
 +#include <linux/math64.h>
 +#include "tmem.h"
 +#include "zcache.h"
 +#include "ramster.h"
 +#include "cluster/tcp.h"
 +
 +#include "xvmalloc.h" /* temporary until change to zsmalloc */
 +
 +#define       RAMSTER_TESTING
 +
 +#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
 +#error "ramster is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
 +#endif
 +#ifdef CONFIG_CLEANCACHE
 +#include <linux/cleancache.h>
 +#endif
 +#ifdef CONFIG_FRONTSWAP
 +#include <linux/frontswap.h>
 +#endif
 +
 +enum ramster_remotify_op {
 +      RAMSTER_REMOTIFY_EPH_PUT,
 +      RAMSTER_REMOTIFY_PERS_PUT,
 +      RAMSTER_REMOTIFY_FLUSH_PAGE,
 +      RAMSTER_REMOTIFY_FLUSH_OBJ,
 +      RAMSTER_INTRANSIT_PERS
 +};
 +
 +struct ramster_remotify_hdr {
 +      enum ramster_remotify_op op;
 +      struct list_head list;
 +};
 +
 +#define ZBH_SENTINEL  0x43214321
 +#define ZBPG_SENTINEL  0xdeadbeef
 +
 +#define ZBUD_MAX_BUDS 2
 +
 +struct zbud_hdr {
 +      struct ramster_remotify_hdr rem_op;
 +      uint16_t client_id;
 +      uint16_t pool_id;
 +      struct tmem_oid oid;
 +      uint32_t index;
 +      uint16_t size; /* compressed size in bytes, zero means unused */
 +      DECL_SENTINEL
 +};
 +
 +#define ZVH_SENTINEL  0x43214321
 +static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
 +
 +struct zv_hdr {
 +      struct ramster_remotify_hdr rem_op;
 +      uint16_t client_id;
 +      uint16_t pool_id;
 +      struct tmem_oid oid;
 +      uint32_t index;
 +      DECL_SENTINEL
 +};
 +
 +struct flushlist_node {
 +      struct ramster_remotify_hdr rem_op;
 +      struct tmem_xhandle xh;
 +};
 +
 +union {
 +      struct ramster_remotify_hdr rem_op;
 +      struct zv_hdr zv;
 +      struct zbud_hdr zbud;
 +      struct flushlist_node flist;
 +} remotify_list_node;
 +
 +static LIST_HEAD(zcache_rem_op_list);
 +static DEFINE_SPINLOCK(zcache_rem_op_list_lock);
 +
 +#if 0
 +/* this is more aggressive but may cause other problems? */
 +#define ZCACHE_GFP_MASK       (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
 +#else
 +#define ZCACHE_GFP_MASK \
 +      (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
 +#endif
 +
 +#define MAX_POOLS_PER_CLIENT 16
 +
 +#define MAX_CLIENTS 16
 +#define LOCAL_CLIENT ((uint16_t)-1)
 +
 +MODULE_LICENSE("GPL");
 +
 +struct zcache_client {
 +      struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
 +      struct xv_pool *xvpool;
 +      bool allocated;
 +      atomic_t refcount;
 +};
 +
 +static struct zcache_client zcache_host;
 +static struct zcache_client zcache_clients[MAX_CLIENTS];
 +
 +static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
 +{
 +      BUG_ON(cli == NULL);
 +      if (cli == &zcache_host)
 +              return LOCAL_CLIENT;
 +      return cli - &zcache_clients[0];
 +}
 +
 +static inline bool is_local_client(struct zcache_client *cli)
 +{
 +      return cli == &zcache_host;
 +}
 +
 +/**********
 + * Compression buddies ("zbud") provides for packing two (or, possibly
 + * in the future, more) compressed ephemeral pages into a single "raw"
 + * (physical) page and tracking them with data structures so that
 + * the raw pages can be easily reclaimed.
 + *
 + * A zbud page ("zbpg") is an aligned page containing a list_head,
 + * a lock, and two "zbud headers".  The remainder of the physical
 + * page is divided up into aligned 64-byte "chunks" which contain
 + * the compressed data for zero, one, or two zbuds.  Each zbpg
 + * resides on: (1) an "unused list" if it has no zbuds; (2) a
 + * "buddied" list if it is fully populated  with two zbuds; or
 + * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
 + * the one unbuddied zbud uses.  The data inside a zbpg cannot be
 + * read or written unless the zbpg's lock is held.
 + */
 +
 +struct zbud_page {
 +      struct list_head bud_list;
 +      spinlock_t lock;
 +      struct zbud_hdr buddy[ZBUD_MAX_BUDS];
 +      DECL_SENTINEL
 +      /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
 +};
 +
 +#define CHUNK_SHIFT   6
 +#define CHUNK_SIZE    (1 << CHUNK_SHIFT)
 +#define CHUNK_MASK    (~(CHUNK_SIZE-1))
 +#define NCHUNKS               (((PAGE_SIZE - sizeof(struct zbud_page)) & \
 +                              CHUNK_MASK) >> CHUNK_SHIFT)
 +#define MAX_CHUNK     (NCHUNKS-1)
 +
 +static struct {
 +      struct list_head list;
 +      unsigned count;
 +} zbud_unbuddied[NCHUNKS];
 +/* list N contains pages with N chunks USED and NCHUNKS-N unused */
 +/* element 0 is never used but optimizing that isn't worth it */
 +static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
 +
 +struct list_head zbud_buddied_list;
 +static unsigned long zcache_zbud_buddied_count;
 +
 +/* protects the buddied list and all unbuddied lists */
 +static DEFINE_SPINLOCK(zbud_budlists_spinlock);
 +
 +static atomic_t zcache_zbud_curr_raw_pages;
 +static atomic_t zcache_zbud_curr_zpages;
 +static unsigned long zcache_zbud_curr_zbytes;
 +static unsigned long zcache_zbud_cumul_zpages;
 +static unsigned long zcache_zbud_cumul_zbytes;
 +static unsigned long zcache_compress_poor;
 +static unsigned long zcache_policy_percent_exceeded;
 +static unsigned long zcache_mean_compress_poor;
 +
 +/*
 + * RAMster counters
 + * - Remote pages are pages with a local pampd but the data is remote
 + * - Foreign pages are pages stored locally but belonging to another node
 + */
 +static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
 +static unsigned long ramster_pers_remotify_enable;
 +static unsigned long ramster_eph_remotify_enable;
 +static unsigned long ramster_eph_pages_remoted;
 +static unsigned long ramster_eph_pages_remote_failed;
 +static unsigned long ramster_pers_pages_remoted;
 +static unsigned long ramster_pers_pages_remote_failed;
 +static unsigned long ramster_pers_pages_remote_nomem;
 +static unsigned long ramster_remote_objects_flushed;
 +static unsigned long ramster_remote_object_flushes_failed;
 +static unsigned long ramster_remote_pages_flushed;
 +static unsigned long ramster_remote_page_flushes_failed;
 +static unsigned long ramster_remote_eph_pages_succ_get;
 +static unsigned long ramster_remote_pers_pages_succ_get;
 +static unsigned long ramster_remote_eph_pages_unsucc_get;
 +static unsigned long ramster_remote_pers_pages_unsucc_get;
 +static atomic_t ramster_curr_flnode_count = ATOMIC_INIT(0);
 +static unsigned long ramster_curr_flnode_count_max;
 +static atomic_t ramster_foreign_eph_pampd_count = ATOMIC_INIT(0);
 +static unsigned long ramster_foreign_eph_pampd_count_max;
 +static atomic_t ramster_foreign_pers_pampd_count = ATOMIC_INIT(0);
 +static unsigned long ramster_foreign_pers_pampd_count_max;
 +
 +/* forward references */
 +static void *zcache_get_free_page(void);
 +static void zcache_free_page(void *p);
 +
 +/*
 + * zbud helper functions
 + */
 +
 +static inline unsigned zbud_max_buddy_size(void)
 +{
 +      return MAX_CHUNK << CHUNK_SHIFT;
 +}
 +
 +static inline unsigned zbud_size_to_chunks(unsigned size)
 +{
 +      BUG_ON(size == 0 || size > zbud_max_buddy_size());
 +      return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
 +}
 +
 +static inline int zbud_budnum(struct zbud_hdr *zh)
 +{
 +      unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
 +      struct zbud_page *zbpg = NULL;
 +      unsigned budnum = -1U;
 +      int i;
 +
 +      for (i = 0; i < ZBUD_MAX_BUDS; i++)
 +              if (offset == offsetof(typeof(*zbpg), buddy[i])) {
 +                      budnum = i;
 +                      break;
 +              }
 +      BUG_ON(budnum == -1U);
 +      return budnum;
 +}
 +
 +static char *zbud_data(struct zbud_hdr *zh, unsigned size)
 +{
 +      struct zbud_page *zbpg;
 +      char *p;
 +      unsigned budnum;
 +
 +      ASSERT_SENTINEL(zh, ZBH);
 +      budnum = zbud_budnum(zh);
 +      BUG_ON(size == 0 || size > zbud_max_buddy_size());
 +      zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
 +      ASSERT_SPINLOCK(&zbpg->lock);
 +      p = (char *)zbpg;
 +      if (budnum == 0)
 +              p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
 +                                                      CHUNK_MASK);
 +      else if (budnum == 1)
 +              p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
 +      return p;
 +}
 +
 +static void zbud_copy_from_pampd(char *data, size_t *size, struct zbud_hdr *zh)
 +{
 +      struct zbud_page *zbpg;
 +      char *p;
 +      unsigned budnum;
 +
 +      ASSERT_SENTINEL(zh, ZBH);
 +      budnum = zbud_budnum(zh);
 +      zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
 +      spin_lock(&zbpg->lock);
 +      BUG_ON(zh->size > *size);
 +      p = (char *)zbpg;
 +      if (budnum == 0)
 +              p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
 +                                                      CHUNK_MASK);
 +      else if (budnum == 1)
 +              p += PAGE_SIZE - ((zh->size + CHUNK_SIZE - 1) & CHUNK_MASK);
 +      /* client should be filled in by caller */
 +      memcpy(data, p, zh->size);
 +      *size = zh->size;
 +      spin_unlock(&zbpg->lock);
 +}
 +
 +/*
 + * zbud raw page management
 + */
 +
 +static struct zbud_page *zbud_alloc_raw_page(void)
 +{
 +      struct zbud_page *zbpg = NULL;
 +      struct zbud_hdr *zh0, *zh1;
 +              zbpg = zcache_get_free_page();
 +      if (likely(zbpg != NULL)) {
 +              INIT_LIST_HEAD(&zbpg->bud_list);
 +              zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
 +              spin_lock_init(&zbpg->lock);
 +              atomic_inc(&zcache_zbud_curr_raw_pages);
 +              INIT_LIST_HEAD(&zbpg->bud_list);
 +              SET_SENTINEL(zbpg, ZBPG);
 +              zh0->size = 0; zh1->size = 0;
 +              tmem_oid_set_invalid(&zh0->oid);
 +              tmem_oid_set_invalid(&zh1->oid);
 +      }
 +      return zbpg;
 +}
 +
 +static void zbud_free_raw_page(struct zbud_page *zbpg)
 +{
 +      struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
 +
 +      ASSERT_SENTINEL(zbpg, ZBPG);
 +      BUG_ON(!list_empty(&zbpg->bud_list));
 +      ASSERT_SPINLOCK(&zbpg->lock);
 +      BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
 +      BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
 +      INVERT_SENTINEL(zbpg, ZBPG);
 +      spin_unlock(&zbpg->lock);
 +      atomic_dec(&zcache_zbud_curr_raw_pages);
 +      zcache_free_page(zbpg);
 +}
 +
 +/*
 + * core zbud handling routines
 + */
 +
 +static unsigned zbud_free(struct zbud_hdr *zh)
 +{
 +      unsigned size;
 +
 +      ASSERT_SENTINEL(zh, ZBH);
 +      BUG_ON(!tmem_oid_valid(&zh->oid));
 +      size = zh->size;
 +      BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
 +      zh->size = 0;
 +      tmem_oid_set_invalid(&zh->oid);
 +      INVERT_SENTINEL(zh, ZBH);
 +      zcache_zbud_curr_zbytes -= size;
 +      atomic_dec(&zcache_zbud_curr_zpages);
 +      return size;
 +}
 +
 +static void zbud_free_and_delist(struct zbud_hdr *zh)
 +{
 +      unsigned chunks;
 +      struct zbud_hdr *zh_other;
 +      unsigned budnum = zbud_budnum(zh), size;
 +      struct zbud_page *zbpg =
 +              container_of(zh, struct zbud_page, buddy[budnum]);
 +
 +      /* FIXME, should be BUG_ON, pool destruction path doesn't disable
 +       * interrupts tmem_destroy_pool()->tmem_pampd_destroy_all_in_obj()->
 +       * tmem_objnode_node_destroy()-> zcache_pampd_free() */
 +      WARN_ON(!irqs_disabled());
 +      spin_lock(&zbpg->lock);
 +      if (list_empty(&zbpg->bud_list)) {
 +              /* ignore zombie page... see zbud_evict_pages() */
 +              spin_unlock(&zbpg->lock);
 +              return;
 +      }
 +      size = zbud_free(zh);
 +      ASSERT_SPINLOCK(&zbpg->lock);
 +      zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
 +      if (zh_other->size == 0) { /* was unbuddied: unlist and free */
 +              chunks = zbud_size_to_chunks(size) ;
 +              spin_lock(&zbud_budlists_spinlock);
 +              BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
 +              list_del_init(&zbpg->bud_list);
 +              zbud_unbuddied[chunks].count--;
 +              spin_unlock(&zbud_budlists_spinlock);
 +              zbud_free_raw_page(zbpg);
 +      } else { /* was buddied: move remaining buddy to unbuddied list */
 +              chunks = zbud_size_to_chunks(zh_other->size) ;
 +              spin_lock(&zbud_budlists_spinlock);
 +              list_del_init(&zbpg->bud_list);
 +              zcache_zbud_buddied_count--;
 +              list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
 +              zbud_unbuddied[chunks].count++;
 +              spin_unlock(&zbud_budlists_spinlock);
 +              spin_unlock(&zbpg->lock);
 +      }
 +}
 +
 +static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
 +                                      struct tmem_oid *oid,
 +                                      uint32_t index, struct page *page,
 +                                      void *cdata, unsigned size)
 +{
 +      struct zbud_hdr *zh0, *zh1, *zh = NULL;
 +      struct zbud_page *zbpg = NULL, *ztmp;
 +      unsigned nchunks;
 +      char *to;
 +      int i, found_good_buddy = 0;
 +
 +      nchunks = zbud_size_to_chunks(size) ;
 +      for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
 +              spin_lock(&zbud_budlists_spinlock);
 +              if (!list_empty(&zbud_unbuddied[i].list)) {
 +                      list_for_each_entry_safe(zbpg, ztmp,
 +                                  &zbud_unbuddied[i].list, bud_list) {
 +                              if (spin_trylock(&zbpg->lock)) {
 +                                      found_good_buddy = i;
 +                                      goto found_unbuddied;
 +                              }
 +                      }
 +              }
 +              spin_unlock(&zbud_budlists_spinlock);
 +      }
 +      /* didn't find a good buddy, try allocating a new page */
 +      zbpg = zbud_alloc_raw_page();
 +      if (unlikely(zbpg == NULL))
 +              goto out;
 +      /* ok, have a page, now compress the data before taking locks */
 +      spin_lock(&zbud_budlists_spinlock);
 +      spin_lock(&zbpg->lock);
 +      list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
 +      zbud_unbuddied[nchunks].count++;
 +      zh = &zbpg->buddy[0];
 +      goto init_zh;
 +
 +found_unbuddied:
 +      ASSERT_SPINLOCK(&zbpg->lock);
 +      zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
 +      BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
 +      if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
 +              ASSERT_SENTINEL(zh0, ZBH);
 +              zh = zh1;
 +      } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
 +              ASSERT_SENTINEL(zh1, ZBH);
 +              zh = zh0;
 +      } else
 +              BUG();
 +      list_del_init(&zbpg->bud_list);
 +      zbud_unbuddied[found_good_buddy].count--;
 +      list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
 +      zcache_zbud_buddied_count++;
 +
 +init_zh:
 +      SET_SENTINEL(zh, ZBH);
 +      zh->size = size;
 +      zh->index = index;
 +      zh->oid = *oid;
 +      zh->pool_id = pool_id;
 +      zh->client_id = client_id;
 +      to = zbud_data(zh, size);
 +      memcpy(to, cdata, size);
 +      spin_unlock(&zbpg->lock);
 +      spin_unlock(&zbud_budlists_spinlock);
 +      zbud_cumul_chunk_counts[nchunks]++;
 +      atomic_inc(&zcache_zbud_curr_zpages);
 +      zcache_zbud_cumul_zpages++;
 +      zcache_zbud_curr_zbytes += size;
 +      zcache_zbud_cumul_zbytes += size;
 +out:
 +      return zh;
 +}
 +
 +static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
 +{
 +      struct zbud_page *zbpg;
 +      unsigned budnum = zbud_budnum(zh);
 +      size_t out_len = PAGE_SIZE;
 +      char *to_va, *from_va;
 +      unsigned size;
 +      int ret = 0;
 +
 +      zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
 +      spin_lock(&zbpg->lock);
 +      if (list_empty(&zbpg->bud_list)) {
 +              /* ignore zombie page... see zbud_evict_pages() */
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +      ASSERT_SENTINEL(zh, ZBH);
 +      BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
-       kunmap_atomic(to_va, KM_USER0);
++      to_va = kmap_atomic(page);
 +      size = zh->size;
 +      from_va = zbud_data(zh, size);
 +      ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
 +      BUG_ON(ret != LZO_E_OK);
 +      BUG_ON(out_len != PAGE_SIZE);
-       zv = kmap_atomic(page, KM_USER0) + offset;
++      kunmap_atomic(to_va);
 +out:
 +      spin_unlock(&zbpg->lock);
 +      return ret;
 +}
 +
 +/*
 + * The following routines handle shrinking of ephemeral pages by evicting
 + * pages "least valuable" first.
 + */
 +
 +static unsigned long zcache_evicted_raw_pages;
 +static unsigned long zcache_evicted_buddied_pages;
 +static unsigned long zcache_evicted_unbuddied_pages;
 +
 +static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
 +                                              uint16_t poolid);
 +static void zcache_put_pool(struct tmem_pool *pool);
 +
 +/*
 + * Flush and free all zbuds in a zbpg, then free the pageframe
 + */
 +static void zbud_evict_zbpg(struct zbud_page *zbpg)
 +{
 +      struct zbud_hdr *zh;
 +      int i, j;
 +      uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
 +      uint32_t index[ZBUD_MAX_BUDS];
 +      struct tmem_oid oid[ZBUD_MAX_BUDS];
 +      struct tmem_pool *pool;
 +      unsigned long flags;
 +
 +      ASSERT_SPINLOCK(&zbpg->lock);
 +      for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
 +              zh = &zbpg->buddy[i];
 +              if (zh->size) {
 +                      client_id[j] = zh->client_id;
 +                      pool_id[j] = zh->pool_id;
 +                      oid[j] = zh->oid;
 +                      index[j] = zh->index;
 +                      j++;
 +              }
 +      }
 +      spin_unlock(&zbpg->lock);
 +      for (i = 0; i < j; i++) {
 +              pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
 +              BUG_ON(pool == NULL);
 +              local_irq_save(flags);
 +              /* these flushes should dispose of any local storage */
 +              tmem_flush_page(pool, &oid[i], index[i]);
 +              local_irq_restore(flags);
 +              zcache_put_pool(pool);
 +      }
 +}
 +
 +/*
 + * Free nr pages.  This code is funky because we want to hold the locks
 + * protecting various lists for as short a time as possible, and in some
 + * circumstances the list may change asynchronously when the list lock is
 + * not held.  In some cases we also trylock not only to avoid waiting on a
 + * page in use by another cpu, but also to avoid potential deadlock due to
 + * lock inversion.
 + */
 +static void zbud_evict_pages(int nr)
 +{
 +      struct zbud_page *zbpg;
 +      int i, newly_unused_pages = 0;
 +
 +
 +      /* now try freeing unbuddied pages, starting with least space avail */
 +      for (i = 0; i < MAX_CHUNK; i++) {
 +retry_unbud_list_i:
 +              spin_lock_bh(&zbud_budlists_spinlock);
 +              if (list_empty(&zbud_unbuddied[i].list)) {
 +                      spin_unlock_bh(&zbud_budlists_spinlock);
 +                      continue;
 +              }
 +              list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
 +                      if (unlikely(!spin_trylock(&zbpg->lock)))
 +                              continue;
 +                      zbud_unbuddied[i].count--;
 +                      spin_unlock(&zbud_budlists_spinlock);
 +                      zcache_evicted_unbuddied_pages++;
 +                      /* want budlists unlocked when doing zbpg eviction */
 +                      zbud_evict_zbpg(zbpg);
 +                      newly_unused_pages++;
 +                      local_bh_enable();
 +                      if (--nr <= 0)
 +                              goto evict_unused;
 +                      goto retry_unbud_list_i;
 +              }
 +              spin_unlock_bh(&zbud_budlists_spinlock);
 +      }
 +
 +      /* as a last resort, free buddied pages */
 +retry_bud_list:
 +      spin_lock_bh(&zbud_budlists_spinlock);
 +      if (list_empty(&zbud_buddied_list)) {
 +              spin_unlock_bh(&zbud_budlists_spinlock);
 +              goto evict_unused;
 +      }
 +      list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
 +              if (unlikely(!spin_trylock(&zbpg->lock)))
 +                      continue;
 +              zcache_zbud_buddied_count--;
 +              spin_unlock(&zbud_budlists_spinlock);
 +              zcache_evicted_buddied_pages++;
 +              /* want budlists unlocked when doing zbpg eviction */
 +              zbud_evict_zbpg(zbpg);
 +              newly_unused_pages++;
 +              local_bh_enable();
 +              if (--nr <= 0)
 +                      goto evict_unused;
 +              goto retry_bud_list;
 +      }
 +      spin_unlock_bh(&zbud_budlists_spinlock);
 +
 +evict_unused:
 +      return;
 +}
 +
 +static DEFINE_PER_CPU(unsigned char *, zcache_remoteputmem);
 +
 +static int zbud_remotify_zbud(struct tmem_xhandle *xh, char *data,
 +                              size_t size)
 +{
 +      struct tmem_pool *pool;
 +      int i, remotenode, ret = -1;
 +      unsigned char cksum, *p;
 +      unsigned long flags;
 +
 +      for (p = data, cksum = 0, i = 0; i < size; i++)
 +              cksum += *p;
 +      ret = ramster_remote_put(xh, data, size, true, &remotenode);
 +      if (ret == 0) {
 +              /* data was successfully remoted so change the local version
 +               * to point to the remote node where it landed */
 +              pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh->pool_id);
 +              BUG_ON(pool == NULL);
 +              local_irq_save(flags);
 +              /* tmem_replace will also free up any local space */
 +              (void)tmem_replace(pool, &xh->oid, xh->index,
 +                      pampd_make_remote(remotenode, size, cksum));
 +              local_irq_restore(flags);
 +              zcache_put_pool(pool);
 +              ramster_eph_pages_remoted++;
 +              ret = 0;
 +      } else
 +              ramster_eph_pages_remote_failed++;
 +      return ret;
 +}
 +
 +static int zbud_remotify_zbpg(struct zbud_page *zbpg)
 +{
 +      struct zbud_hdr *zh1, *zh2 = NULL;
 +      struct tmem_xhandle xh1, xh2 = { 0 };
 +      char *data1 = NULL, *data2 = NULL;
 +      size_t size1 = 0, size2 = 0;
 +      int ret = 0;
 +      unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
 +
 +      ASSERT_SPINLOCK(&zbpg->lock);
 +      if (zbpg->buddy[0].size == 0)
 +              zh1 = &zbpg->buddy[1];
 +      else if (zbpg->buddy[1].size == 0)
 +              zh1 = &zbpg->buddy[0];
 +      else {
 +              zh1 = &zbpg->buddy[0];
 +              zh2 = &zbpg->buddy[1];
 +      }
 +      /* don't remotify pages that are already remotified */
 +      if (zh1->client_id != LOCAL_CLIENT)
 +              zh1 = NULL;
 +      if ((zh2 != NULL) && (zh2->client_id != LOCAL_CLIENT))
 +              zh2 = NULL;
 +
 +      /* copy the data and metadata so can release lock */
 +      if (zh1 != NULL) {
 +              xh1.client_id = zh1->client_id;
 +              xh1.pool_id = zh1->pool_id;
 +              xh1.oid = zh1->oid;
 +              xh1.index = zh1->index;
 +              size1 = zh1->size;
 +              data1 = zbud_data(zh1, size1);
 +              memcpy(tmpmem, zbud_data(zh1, size1), size1);
 +              data1 = tmpmem;
 +              tmpmem += size1;
 +      }
 +      if (zh2 != NULL) {
 +              xh2.client_id = zh2->client_id;
 +              xh2.pool_id = zh2->pool_id;
 +              xh2.oid = zh2->oid;
 +              xh2.index = zh2->index;
 +              size2 = zh2->size;
 +              memcpy(tmpmem, zbud_data(zh2, size2), size2);
 +              data2 = tmpmem;
 +      }
 +      spin_unlock(&zbpg->lock);
 +      preempt_enable();
 +
 +      /* OK, no locks held anymore, remotify one or both zbuds */
 +      if (zh1 != NULL)
 +              ret = zbud_remotify_zbud(&xh1, data1, size1);
 +      if (zh2 != NULL)
 +              ret |= zbud_remotify_zbud(&xh2, data2, size2);
 +      return ret;
 +}
 +
 +void zbud_remotify_pages(int nr)
 +{
 +      struct zbud_page *zbpg;
 +      int i, ret;
 +
 +      /*
 +       * for now just try remotifying unbuddied pages, starting with
 +       * least space avail
 +       */
 +      for (i = 0; i < MAX_CHUNK; i++) {
 +retry_unbud_list_i:
 +              preempt_disable();  /* enable in zbud_remotify_zbpg */
 +              spin_lock_bh(&zbud_budlists_spinlock);
 +              if (list_empty(&zbud_unbuddied[i].list)) {
 +                      spin_unlock_bh(&zbud_budlists_spinlock);
 +                      preempt_enable();
 +                      continue; /* next i in for loop */
 +              }
 +              list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
 +                      if (unlikely(!spin_trylock(&zbpg->lock)))
 +                              continue; /* next list_for_each_entry */
 +                      zbud_unbuddied[i].count--;
 +                      /* want budlists unlocked when doing zbpg remotify */
 +                      spin_unlock_bh(&zbud_budlists_spinlock);
 +                      ret = zbud_remotify_zbpg(zbpg);
 +                      /* preemption is re-enabled in zbud_remotify_zbpg */
 +                      if (ret == 0) {
 +                              if (--nr <= 0)
 +                                      goto out;
 +                              goto retry_unbud_list_i;
 +                      }
 +                      /* if fail to remotify any page, quit */
 +                      pr_err("TESTING zbud_remotify_pages failed on page,"
 +                              " trying to re-add\n");
 +                      spin_lock_bh(&zbud_budlists_spinlock);
 +                      spin_lock(&zbpg->lock);
 +                      list_add_tail(&zbpg->bud_list, &zbud_unbuddied[i].list);
 +                      zbud_unbuddied[i].count++;
 +                      spin_unlock(&zbpg->lock);
 +                      spin_unlock_bh(&zbud_budlists_spinlock);
 +                      pr_err("TESTING zbud_remotify_pages failed on page,"
 +                              " finished re-add\n");
 +                      goto out;
 +              }
 +              spin_unlock_bh(&zbud_budlists_spinlock);
 +              preempt_enable();
 +      }
 +
 +next_buddied_zbpg:
 +      preempt_disable();  /* enable in zbud_remotify_zbpg */
 +      spin_lock_bh(&zbud_budlists_spinlock);
 +      if (list_empty(&zbud_buddied_list))
 +              goto unlock_out;
 +      list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
 +              if (unlikely(!spin_trylock(&zbpg->lock)))
 +                      continue; /* next list_for_each_entry */
 +              zcache_zbud_buddied_count--;
 +              /* want budlists unlocked when doing zbpg remotify */
 +              spin_unlock_bh(&zbud_budlists_spinlock);
 +              ret = zbud_remotify_zbpg(zbpg);
 +              /* preemption is re-enabled in zbud_remotify_zbpg */
 +              if (ret == 0) {
 +                      if (--nr <= 0)
 +                              goto out;
 +                      goto next_buddied_zbpg;
 +              }
 +              /* if fail to remotify any page, quit */
 +              pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
 +                      " trying to re-add\n");
 +              spin_lock_bh(&zbud_budlists_spinlock);
 +              spin_lock(&zbpg->lock);
 +              list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
 +              zcache_zbud_buddied_count++;
 +              spin_unlock(&zbpg->lock);
 +              spin_unlock_bh(&zbud_budlists_spinlock);
 +              pr_err("TESTING zbud_remotify_pages failed on BUDDIED page,"
 +                      " finished re-add\n");
 +              goto out;
 +      }
 +unlock_out:
 +      spin_unlock_bh(&zbud_budlists_spinlock);
 +      preempt_enable();
 +out:
 +      return;
 +}
 +
 +/* the "flush list" asynchronously collects pages to remotely flush */
 +#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
 +static void ramster_flnode_free(struct flushlist_node *,
 +                              struct tmem_pool *);
 +
 +static void zcache_remote_flush_page(struct flushlist_node *flnode)
 +{
 +      struct tmem_xhandle *xh;
 +      int remotenode, ret;
 +
 +      preempt_disable();
 +      xh = &flnode->xh;
 +      remotenode = flnode->xh.client_id;
 +      ret = ramster_remote_flush(xh, remotenode);
 +      if (ret >= 0)
 +              ramster_remote_pages_flushed++;
 +      else
 +              ramster_remote_page_flushes_failed++;
 +      preempt_enable_no_resched();
 +      ramster_flnode_free(flnode, NULL);
 +}
 +
 +static void zcache_remote_flush_object(struct flushlist_node *flnode)
 +{
 +      struct tmem_xhandle *xh;
 +      int remotenode, ret;
 +
 +      preempt_disable();
 +      xh = &flnode->xh;
 +      remotenode = flnode->xh.client_id;
 +      ret = ramster_remote_flush_object(xh, remotenode);
 +      if (ret >= 0)
 +              ramster_remote_objects_flushed++;
 +      else
 +              ramster_remote_object_flushes_failed++;
 +      preempt_enable_no_resched();
 +      ramster_flnode_free(flnode, NULL);
 +}
 +
 +static void zcache_remote_eph_put(struct zbud_hdr *zbud)
 +{
 +      /* FIXME */
 +}
 +
 +static void zcache_remote_pers_put(struct zv_hdr *zv)
 +{
 +      struct tmem_xhandle xh;
 +      uint16_t size;
 +      bool ephemeral;
 +      int remotenode, ret = -1;
 +      char *data;
 +      struct tmem_pool *pool;
 +      unsigned long flags;
 +      unsigned char cksum;
 +      char *p;
 +      int i;
 +      unsigned char *tmpmem = __get_cpu_var(zcache_remoteputmem);
 +
 +      ASSERT_SENTINEL(zv, ZVH);
 +      BUG_ON(zv->client_id != LOCAL_CLIENT);
 +      local_bh_disable();
 +      xh.client_id = zv->client_id;
 +      xh.pool_id = zv->pool_id;
 +      xh.oid = zv->oid;
 +      xh.index = zv->index;
 +      size = xv_get_object_size(zv) - sizeof(*zv);
 +      BUG_ON(size == 0 || size > zv_max_page_size);
 +      data = (char *)zv + sizeof(*zv);
 +      for (p = data, cksum = 0, i = 0; i < size; i++)
 +              cksum += *p;
 +      memcpy(tmpmem, data, size);
 +      data = tmpmem;
 +      pool = zcache_get_pool_by_id(zv->client_id, zv->pool_id);
 +      ephemeral = is_ephemeral(pool);
 +      zcache_put_pool(pool);
 +      /* now OK to release lock set in caller */
 +      spin_unlock(&zcache_rem_op_list_lock);
 +      local_bh_enable();
 +      preempt_disable();
 +      ret = ramster_remote_put(&xh, data, size, ephemeral, &remotenode);
 +      preempt_enable_no_resched();
 +      if (ret != 0) {
 +              /*
 +               * This is some form of a memory leak... if the remote put
 +               * fails, there will never be another attempt to remotify
 +               * this page.  But since we've dropped the zv pointer,
 +               * the page may have been freed or the data replaced
 +               * so we can't just "put it back" in the remote op list.
 +               * Even if we could, not sure where to put it in the list
 +               * because there may be flushes that must be strictly
 +               * ordered vs the put.  So leave this as a FIXME for now.
 +               * But count them so we know if it becomes a problem.
 +               */
 +              ramster_pers_pages_remote_failed++;
 +              goto out;
 +      } else
 +              atomic_inc(&ramster_remote_pers_pages);
 +      ramster_pers_pages_remoted++;
 +      /*
 +       * data was successfully remoted so change the local version to
 +       * point to the remote node where it landed
 +       */
 +      local_bh_disable();
 +      pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
 +      local_irq_save(flags);
 +      (void)tmem_replace(pool, &xh.oid, xh.index,
 +                      pampd_make_remote(remotenode, size, cksum));
 +      local_irq_restore(flags);
 +      zcache_put_pool(pool);
 +      local_bh_enable();
 +out:
 +      return;
 +}
 +
 +static void zcache_do_remotify_ops(int nr)
 +{
 +      struct ramster_remotify_hdr *rem_op;
 +      union remotify_list_node *u;
 +
 +      while (1) {
 +              if (!nr)
 +                      goto out;
 +              spin_lock(&zcache_rem_op_list_lock);
 +              if (list_empty(&zcache_rem_op_list)) {
 +                      spin_unlock(&zcache_rem_op_list_lock);
 +                      goto out;
 +              }
 +              rem_op = list_first_entry(&zcache_rem_op_list,
 +                              struct ramster_remotify_hdr, list);
 +              list_del_init(&rem_op->list);
 +              if (rem_op->op != RAMSTER_REMOTIFY_PERS_PUT)
 +                      spin_unlock(&zcache_rem_op_list_lock);
 +              u = (union remotify_list_node *)rem_op;
 +              switch (rem_op->op) {
 +              case RAMSTER_REMOTIFY_EPH_PUT:
 +BUG();
 +                      zcache_remote_eph_put((struct zbud_hdr *)rem_op);
 +                      break;
 +              case RAMSTER_REMOTIFY_PERS_PUT:
 +                      zcache_remote_pers_put((struct zv_hdr *)rem_op);
 +                      break;
 +              case RAMSTER_REMOTIFY_FLUSH_PAGE:
 +                      zcache_remote_flush_page((struct flushlist_node *)u);
 +                      break;
 +              case RAMSTER_REMOTIFY_FLUSH_OBJ:
 +                      zcache_remote_flush_object((struct flushlist_node *)u);
 +                      break;
 +              default:
 +                      BUG();
 +              }
 +      }
 +out:
 +      return;
 +}
 +
 +/*
 + * Communicate interface revision with userspace
 + */
 +#include "cluster/ramster_nodemanager.h"
 +static unsigned long ramster_interface_revision  = R2NM_API_VERSION;
 +
 +/*
 + * For now, just push over a few pages every few seconds to
 + * ensure that it basically works
 + */
 +static struct workqueue_struct *ramster_remotify_workqueue;
 +static void ramster_remotify_process(struct work_struct *work);
 +static DECLARE_DELAYED_WORK(ramster_remotify_worker,
 +              ramster_remotify_process);
 +
 +static void ramster_remotify_queue_delayed_work(unsigned long delay)
 +{
 +      if (!queue_delayed_work(ramster_remotify_workqueue,
 +                              &ramster_remotify_worker, delay))
 +              pr_err("ramster_remotify: bad workqueue\n");
 +}
 +
 +
 +static int use_frontswap;
 +static int use_cleancache;
 +static int ramster_remote_target_nodenum = -1;
 +static void ramster_remotify_process(struct work_struct *work)
 +{
 +      static bool remotify_in_progress;
 +
 +      BUG_ON(irqs_disabled());
 +      if (remotify_in_progress)
 +              ramster_remotify_queue_delayed_work(HZ);
 +      else if (ramster_remote_target_nodenum != -1) {
 +              remotify_in_progress = true;
 +#ifdef CONFIG_CLEANCACHE
 +      if (use_cleancache && ramster_eph_remotify_enable)
 +              zbud_remotify_pages(5000); /* FIXME is this a good number? */
 +#endif
 +#ifdef CONFIG_FRONTSWAP
 +      if (use_frontswap && ramster_pers_remotify_enable)
 +              zcache_do_remotify_ops(500); /* FIXME is this a good number? */
 +#endif
 +              remotify_in_progress = false;
 +              ramster_remotify_queue_delayed_work(HZ);
 +      }
 +}
 +
 +static void ramster_remotify_init(void)
 +{
 +      unsigned long n = 60UL;
 +      ramster_remotify_workqueue =
 +              create_singlethread_workqueue("ramster_remotify");
 +      ramster_remotify_queue_delayed_work(n * HZ);
 +}
 +
 +
 +static void zbud_init(void)
 +{
 +      int i;
 +
 +      INIT_LIST_HEAD(&zbud_buddied_list);
 +      zcache_zbud_buddied_count = 0;
 +      for (i = 0; i < NCHUNKS; i++) {
 +              INIT_LIST_HEAD(&zbud_unbuddied[i].list);
 +              zbud_unbuddied[i].count = 0;
 +      }
 +}
 +
 +#ifdef CONFIG_SYSFS
 +/*
 + * These sysfs routines show a nice distribution of how many zbpg's are
 + * currently (and have ever been placed) in each unbuddied list.  It's fun
 + * to watch but can probably go away before final merge.
 + */
 +static int zbud_show_unbuddied_list_counts(char *buf)
 +{
 +      int i;
 +      char *p = buf;
 +
 +      for (i = 0; i < NCHUNKS; i++)
 +              p += sprintf(p, "%u ", zbud_unbuddied[i].count);
 +      return p - buf;
 +}
 +
 +static int zbud_show_cumul_chunk_counts(char *buf)
 +{
 +      unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
 +      unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
 +      unsigned long total_chunks_lte_42 = 0;
 +      char *p = buf;
 +
 +      for (i = 0; i < NCHUNKS; i++) {
 +              p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
 +              chunks += zbud_cumul_chunk_counts[i];
 +              total_chunks += zbud_cumul_chunk_counts[i];
 +              sum_total_chunks += i * zbud_cumul_chunk_counts[i];
 +              if (i == 21)
 +                      total_chunks_lte_21 = total_chunks;
 +              if (i == 32)
 +                      total_chunks_lte_32 = total_chunks;
 +              if (i == 42)
 +                      total_chunks_lte_42 = total_chunks;
 +      }
 +      p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
 +              total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
 +              chunks == 0 ? 0 : sum_total_chunks / chunks);
 +      return p - buf;
 +}
 +#endif
 +
 +/**********
 + * This "zv" PAM implementation combines the TLSF-based xvMalloc
 + * with lzo1x compression to maximize the amount of data that can
 + * be packed into a physical page.
 + *
 + * Zv represents a PAM page with the index and object (plus a "size" value
 + * necessary for decompression) immediately preceding the compressed data.
 + */
 +
 +/* rudimentary policy limits */
 +/* total number of persistent pages may not exceed this percentage */
 +static unsigned int zv_page_count_policy_percent = 75;
 +/*
 + * byte count defining poor compression; pages with greater zsize will be
 + * rejected
 + */
 +static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
 +/*
 + * byte count defining poor *mean* compression; pages with greater zsize
 + * will be rejected until sufficient better-compressed pages are accepted
 + * driving the mean below this threshold
 + */
 +static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
 +
 +static atomic_t zv_curr_dist_counts[NCHUNKS];
 +static atomic_t zv_cumul_dist_counts[NCHUNKS];
 +
 +
 +static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
 +                              struct tmem_oid *oid, uint32_t index,
 +                              void *cdata, unsigned clen)
 +{
 +      struct page *page;
 +      struct zv_hdr *zv = NULL;
 +      uint32_t offset;
 +      int alloc_size = clen + sizeof(struct zv_hdr);
 +      int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 +      int ret;
 +
 +      BUG_ON(!irqs_disabled());
 +      BUG_ON(chunks >= NCHUNKS);
 +      ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
 +                      &page, &offset, ZCACHE_GFP_MASK);
 +      if (unlikely(ret))
 +              goto out;
 +      atomic_inc(&zv_curr_dist_counts[chunks]);
 +      atomic_inc(&zv_cumul_dist_counts[chunks]);
-       kunmap_atomic(zv, KM_USER0);
++      zv = kmap_atomic(page) + offset;
 +      zv->index = index;
 +      zv->oid = *oid;
 +      zv->pool_id = pool_id;
 +      SET_SENTINEL(zv, ZVH);
 +      INIT_LIST_HEAD(&zv->rem_op.list);
 +      zv->client_id = get_client_id_from_client(cli);
 +      zv->rem_op.op = RAMSTER_REMOTIFY_PERS_PUT;
 +      if (zv->client_id == LOCAL_CLIENT) {
 +              spin_lock(&zcache_rem_op_list_lock);
 +              list_add_tail(&zv->rem_op.list, &zcache_rem_op_list);
 +              spin_unlock(&zcache_rem_op_list_lock);
 +      }
 +      memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
-       zv = kmap_atomic(page, KM_USER0) + offset;
++      kunmap_atomic(zv);
 +out:
 +      return zv;
 +}
 +
 +/* similar to zv_create, but just reserve space, no data yet */
 +static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
 +                              struct tmem_oid *oid, uint32_t index,
 +                              unsigned clen)
 +{
 +      struct zcache_client *cli = pool->client;
 +      struct page *page;
 +      struct zv_hdr *zv = NULL;
 +      uint32_t offset;
 +      int ret;
 +
 +      BUG_ON(!irqs_disabled());
 +      BUG_ON(!is_local_client(pool->client));
 +      ret = xv_malloc(cli->xvpool, clen + sizeof(struct zv_hdr),
 +                      &page, &offset, ZCACHE_GFP_MASK);
 +      if (unlikely(ret))
 +              goto out;
-       kunmap_atomic(zv, KM_USER0);
++      zv = kmap_atomic(page) + offset;
 +      SET_SENTINEL(zv, ZVH);
 +      INIT_LIST_HEAD(&zv->rem_op.list);
 +      zv->client_id = LOCAL_CLIENT;
 +      zv->rem_op.op = RAMSTER_INTRANSIT_PERS;
 +      zv->index = index;
 +      zv->oid = *oid;
 +      zv->pool_id = pool->pool_id;
-       to_va = kmap_atomic(page, KM_USER0);
++      kunmap_atomic(zv);
 +out:
 +      return zv;
 +}
 +
 +static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
 +{
 +      unsigned long flags;
 +      struct page *page;
 +      uint32_t offset;
 +      uint16_t size = xv_get_object_size(zv);
 +      int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 +
 +      ASSERT_SENTINEL(zv, ZVH);
 +      BUG_ON(chunks >= NCHUNKS);
 +      atomic_dec(&zv_curr_dist_counts[chunks]);
 +      size -= sizeof(*zv);
 +      spin_lock(&zcache_rem_op_list_lock);
 +      size = xv_get_object_size(zv) - sizeof(*zv);
 +      BUG_ON(size == 0);
 +      INVERT_SENTINEL(zv, ZVH);
 +      if (!list_empty(&zv->rem_op.list))
 +              list_del_init(&zv->rem_op.list);
 +      spin_unlock(&zcache_rem_op_list_lock);
 +      page = virt_to_page(zv);
 +      offset = (unsigned long)zv & ~PAGE_MASK;
 +      local_irq_save(flags);
 +      xv_free(xvpool, page, offset);
 +      local_irq_restore(flags);
 +}
 +
 +static void zv_decompress(struct page *page, struct zv_hdr *zv)
 +{
 +      size_t clen = PAGE_SIZE;
 +      char *to_va;
 +      unsigned size;
 +      int ret;
 +
 +      ASSERT_SENTINEL(zv, ZVH);
 +      size = xv_get_object_size(zv) - sizeof(*zv);
 +      BUG_ON(size == 0);
-       kunmap_atomic(to_va, KM_USER0);
++      to_va = kmap_atomic(page);
 +      ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
 +                                      size, to_va, &clen);
-       from_va = kmap_atomic(from, KM_USER0);
++      kunmap_atomic(to_va);
 +      BUG_ON(ret != LZO_E_OK);
 +      BUG_ON(clen != PAGE_SIZE);
 +}
 +
 +static void zv_copy_from_pampd(char *data, size_t *bufsize, struct zv_hdr *zv)
 +{
 +      unsigned size;
 +
 +      ASSERT_SENTINEL(zv, ZVH);
 +      size = xv_get_object_size(zv) - sizeof(*zv);
 +      BUG_ON(size == 0 || size > zv_max_page_size);
 +      BUG_ON(size > *bufsize);
 +      memcpy(data, (char *)zv + sizeof(*zv), size);
 +      *bufsize = size;
 +}
 +
 +static void zv_copy_to_pampd(struct zv_hdr *zv, char *data, size_t size)
 +{
 +      unsigned zv_size;
 +
 +      ASSERT_SENTINEL(zv, ZVH);
 +      zv_size = xv_get_object_size(zv) - sizeof(*zv);
 +      BUG_ON(zv_size != size);
 +      BUG_ON(zv_size == 0 || zv_size > zv_max_page_size);
 +      memcpy((char *)zv + sizeof(*zv), data, size);
 +}
 +
 +#ifdef CONFIG_SYSFS
 +/*
 + * show a distribution of compression stats for zv pages.
 + */
 +
 +static int zv_curr_dist_counts_show(char *buf)
 +{
 +      unsigned long i, n, chunks = 0, sum_total_chunks = 0;
 +      char *p = buf;
 +
 +      for (i = 0; i < NCHUNKS; i++) {
 +              n = atomic_read(&zv_curr_dist_counts[i]);
 +              p += sprintf(p, "%lu ", n);
 +              chunks += n;
 +              sum_total_chunks += i * n;
 +      }
 +      p += sprintf(p, "mean:%lu\n",
 +              chunks == 0 ? 0 : sum_total_chunks / chunks);
 +      return p - buf;
 +}
 +
 +static int zv_cumul_dist_counts_show(char *buf)
 +{
 +      unsigned long i, n, chunks = 0, sum_total_chunks = 0;
 +      char *p = buf;
 +
 +      for (i = 0; i < NCHUNKS; i++) {
 +              n = atomic_read(&zv_cumul_dist_counts[i]);
 +              p += sprintf(p, "%lu ", n);
 +              chunks += n;
 +              sum_total_chunks += i * n;
 +      }
 +      p += sprintf(p, "mean:%lu\n",
 +              chunks == 0 ? 0 : sum_total_chunks / chunks);
 +      return p - buf;
 +}
 +
 +/*
 + * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
 + * pages that don't compress to less than this value (including metadata
 + * overhead) to be rejected.  We don't allow the value to get too close
 + * to PAGE_SIZE.
 + */
 +static ssize_t zv_max_zsize_show(struct kobject *kobj,
 +                                  struct kobj_attribute *attr,
 +                                  char *buf)
 +{
 +      return sprintf(buf, "%u\n", zv_max_zsize);
 +}
 +
 +static ssize_t zv_max_zsize_store(struct kobject *kobj,
 +                                  struct kobj_attribute *attr,
 +                                  const char *buf, size_t count)
 +{
 +      unsigned long val;
 +      int err;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      err = kstrtoul(buf, 10, &val);
 +      if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
 +              return -EINVAL;
 +      zv_max_zsize = val;
 +      return count;
 +}
 +
 +/*
 + * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
 + * pages that don't compress to less than this value (including metadata
 + * overhead) to be rejected UNLESS the mean compression is also smaller
 + * than this value.  In other words, we are load-balancing-by-zsize the
 + * accepted pages.  Again, we don't allow the value to get too close
 + * to PAGE_SIZE.
 + */
 +static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
 +                                  struct kobj_attribute *attr,
 +                                  char *buf)
 +{
 +      return sprintf(buf, "%u\n", zv_max_mean_zsize);
 +}
 +
 +static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
 +                                  struct kobj_attribute *attr,
 +                                  const char *buf, size_t count)
 +{
 +      unsigned long val;
 +      int err;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      err = kstrtoul(buf, 10, &val);
 +      if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
 +              return -EINVAL;
 +      zv_max_mean_zsize = val;
 +      return count;
 +}
 +
 +/*
 + * setting zv_page_count_policy_percent via sysfs sets an upper bound of
 + * persistent (e.g. swap) pages that will be retained according to:
 + *     (zv_page_count_policy_percent * totalram_pages) / 100)
 + * when that limit is reached, further puts will be rejected (until
 + * some pages have been flushed).  Note that, due to compression,
 + * this number may exceed 100; it defaults to 75 and we set an
 + * arbitary limit of 150.  A poor choice will almost certainly result
 + * in OOM's, so this value should only be changed prudently.
 + */
 +static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
 +                                               struct kobj_attribute *attr,
 +                                               char *buf)
 +{
 +      return sprintf(buf, "%u\n", zv_page_count_policy_percent);
 +}
 +
 +static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
 +                                                struct kobj_attribute *attr,
 +                                                const char *buf, size_t count)
 +{
 +      unsigned long val;
 +      int err;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      err = kstrtoul(buf, 10, &val);
 +      if (err || (val == 0) || (val > 150))
 +              return -EINVAL;
 +      zv_page_count_policy_percent = val;
 +      return count;
 +}
 +
 +static struct kobj_attribute zcache_zv_max_zsize_attr = {
 +              .attr = { .name = "zv_max_zsize", .mode = 0644 },
 +              .show = zv_max_zsize_show,
 +              .store = zv_max_zsize_store,
 +};
 +
 +static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
 +              .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
 +              .show = zv_max_mean_zsize_show,
 +              .store = zv_max_mean_zsize_store,
 +};
 +
 +static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
 +              .attr = { .name = "zv_page_count_policy_percent",
 +                        .mode = 0644 },
 +              .show = zv_page_count_policy_percent_show,
 +              .store = zv_page_count_policy_percent_store,
 +};
 +#endif
 +
 +/*
 + * zcache core code starts here
 + */
 +
 +/* useful stats not collected by cleancache or frontswap */
 +static unsigned long zcache_flush_total;
 +static unsigned long zcache_flush_found;
 +static unsigned long zcache_flobj_total;
 +static unsigned long zcache_flobj_found;
 +static unsigned long zcache_failed_eph_puts;
 +static unsigned long zcache_nonactive_puts;
 +static unsigned long zcache_failed_pers_puts;
 +
 +/*
 + * Tmem operations assume the poolid implies the invoking client.
 + * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
 + * RAMster has each client numbered by cluster node, and a KVM version
 + * of zcache would have one client per guest and each client might
 + * have a poolid==N.
 + */
 +static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
 +{
 +      struct tmem_pool *pool = NULL;
 +      struct zcache_client *cli = NULL;
 +
 +      if (cli_id == LOCAL_CLIENT)
 +              cli = &zcache_host;
 +      else {
 +              if (cli_id >= MAX_CLIENTS)
 +                      goto out;
 +              cli = &zcache_clients[cli_id];
 +              if (cli == NULL)
 +                      goto out;
 +              atomic_inc(&cli->refcount);
 +      }
 +      if (poolid < MAX_POOLS_PER_CLIENT) {
 +              pool = cli->tmem_pools[poolid];
 +              if (pool != NULL)
 +                      atomic_inc(&pool->refcount);
 +      }
 +out:
 +      return pool;
 +}
 +
 +static void zcache_put_pool(struct tmem_pool *pool)
 +{
 +      struct zcache_client *cli = NULL;
 +
 +      if (pool == NULL)
 +              BUG();
 +      cli = pool->client;
 +      atomic_dec(&pool->refcount);
 +      atomic_dec(&cli->refcount);
 +}
 +
 +int zcache_new_client(uint16_t cli_id)
 +{
 +      struct zcache_client *cli = NULL;
 +      int ret = -1;
 +
 +      if (cli_id == LOCAL_CLIENT)
 +              cli = &zcache_host;
 +      else if ((unsigned int)cli_id < MAX_CLIENTS)
 +              cli = &zcache_clients[cli_id];
 +      if (cli == NULL)
 +              goto out;
 +      if (cli->allocated)
 +              goto out;
 +      cli->allocated = 1;
 +#ifdef CONFIG_FRONTSWAP
 +      cli->xvpool = xv_create_pool();
 +      if (cli->xvpool == NULL)
 +              goto out;
 +#endif
 +      ret = 0;
 +out:
 +      return ret;
 +}
 +
 +/* counters for debugging */
 +static unsigned long zcache_failed_get_free_pages;
 +static unsigned long zcache_failed_alloc;
 +static unsigned long zcache_put_to_flush;
 +
 +/*
 + * for now, used named slabs so can easily track usage; later can
 + * either just use kmalloc, or perhaps add a slab-like allocator
 + * to more carefully manage total memory utilization
 + */
 +static struct kmem_cache *zcache_objnode_cache;
 +static struct kmem_cache *zcache_obj_cache;
 +static struct kmem_cache *ramster_flnode_cache;
 +static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
 +static unsigned long zcache_curr_obj_count_max;
 +static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
 +static unsigned long zcache_curr_objnode_count_max;
 +
 +/*
 + * to avoid memory allocation recursion (e.g. due to direct reclaim), we
 + * preload all necessary data structures so the hostops callbacks never
 + * actually do a malloc
 + */
 +struct zcache_preload {
 +      void *page;
 +      struct tmem_obj *obj;
 +      int nr;
 +      struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
 +      struct flushlist_node *flnode;
 +};
 +static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
 +
 +static int zcache_do_preload(struct tmem_pool *pool)
 +{
 +      struct zcache_preload *kp;
 +      struct tmem_objnode *objnode;
 +      struct tmem_obj *obj;
 +      struct flushlist_node *flnode;
 +      void *page;
 +      int ret = -ENOMEM;
 +
 +      if (unlikely(zcache_objnode_cache == NULL))
 +              goto out;
 +      if (unlikely(zcache_obj_cache == NULL))
 +              goto out;
 +      preempt_disable();
 +      kp = &__get_cpu_var(zcache_preloads);
 +      while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
 +              preempt_enable_no_resched();
 +              objnode = kmem_cache_alloc(zcache_objnode_cache,
 +                              ZCACHE_GFP_MASK);
 +              if (unlikely(objnode == NULL)) {
 +                      zcache_failed_alloc++;
 +                      goto out;
 +              }
 +              preempt_disable();
 +              kp = &__get_cpu_var(zcache_preloads);
 +              if (kp->nr < ARRAY_SIZE(kp->objnodes))
 +                      kp->objnodes[kp->nr++] = objnode;
 +              else
 +                      kmem_cache_free(zcache_objnode_cache, objnode);
 +      }
 +      preempt_enable_no_resched();
 +      obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
 +      if (unlikely(obj == NULL)) {
 +              zcache_failed_alloc++;
 +              goto out;
 +      }
 +      flnode = kmem_cache_alloc(ramster_flnode_cache, ZCACHE_GFP_MASK);
 +      if (unlikely(flnode == NULL)) {
 +              zcache_failed_alloc++;
 +              goto out;
 +      }
 +      if (is_ephemeral(pool)) {
 +              page = (void *)__get_free_page(ZCACHE_GFP_MASK);
 +              if (unlikely(page == NULL)) {
 +                      zcache_failed_get_free_pages++;
 +                      kmem_cache_free(zcache_obj_cache, obj);
 +                      kmem_cache_free(ramster_flnode_cache, flnode);
 +                      goto out;
 +              }
 +      }
 +      preempt_disable();
 +      kp = &__get_cpu_var(zcache_preloads);
 +      if (kp->obj == NULL)
 +              kp->obj = obj;
 +      else
 +              kmem_cache_free(zcache_obj_cache, obj);
 +      if (kp->flnode == NULL)
 +              kp->flnode = flnode;
 +      else
 +              kmem_cache_free(ramster_flnode_cache, flnode);
 +      if (is_ephemeral(pool)) {
 +              if (kp->page == NULL)
 +                      kp->page = page;
 +              else
 +                      free_page((unsigned long)page);
 +      }
 +      ret = 0;
 +out:
 +      return ret;
 +}
 +
 +static int ramster_do_preload_flnode_only(struct tmem_pool *pool)
 +{
 +      struct zcache_preload *kp;
 +      struct flushlist_node *flnode;
 +      int ret = -ENOMEM;
 +
 +      BUG_ON(!irqs_disabled());
 +      if (unlikely(ramster_flnode_cache == NULL))
 +              BUG();
 +      kp = &__get_cpu_var(zcache_preloads);
 +      flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
 +      if (unlikely(flnode == NULL) && kp->flnode == NULL)
 +              BUG();  /* FIXME handle more gracefully, but how??? */
 +      else if (kp->flnode == NULL)
 +              kp->flnode = flnode;
 +      else
 +              kmem_cache_free(ramster_flnode_cache, flnode);
 +      return ret;
 +}
 +
 +static void *zcache_get_free_page(void)
 +{
 +      struct zcache_preload *kp;
 +      void *page;
 +
 +      kp = &__get_cpu_var(zcache_preloads);
 +      page = kp->page;
 +      BUG_ON(page == NULL);
 +      kp->page = NULL;
 +      return page;
 +}
 +
 +static void zcache_free_page(void *p)
 +{
 +      free_page((unsigned long)p);
 +}
 +
 +/*
 + * zcache implementation for tmem host ops
 + */
 +
 +static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
 +{
 +      struct tmem_objnode *objnode = NULL;
 +      unsigned long count;
 +      struct zcache_preload *kp;
 +
 +      kp = &__get_cpu_var(zcache_preloads);
 +      if (kp->nr <= 0)
 +              goto out;
 +      objnode = kp->objnodes[kp->nr - 1];
 +      BUG_ON(objnode == NULL);
 +      kp->objnodes[kp->nr - 1] = NULL;
 +      kp->nr--;
 +      count = atomic_inc_return(&zcache_curr_objnode_count);
 +      if (count > zcache_curr_objnode_count_max)
 +              zcache_curr_objnode_count_max = count;
 +out:
 +      return objnode;
 +}
 +
 +static void zcache_objnode_free(struct tmem_objnode *objnode,
 +                                      struct tmem_pool *pool)
 +{
 +      atomic_dec(&zcache_curr_objnode_count);
 +      BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
 +      kmem_cache_free(zcache_objnode_cache, objnode);
 +}
 +
 +static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
 +{
 +      struct tmem_obj *obj = NULL;
 +      unsigned long count;
 +      struct zcache_preload *kp;
 +
 +      kp = &__get_cpu_var(zcache_preloads);
 +      obj = kp->obj;
 +      BUG_ON(obj == NULL);
 +      kp->obj = NULL;
 +      count = atomic_inc_return(&zcache_curr_obj_count);
 +      if (count > zcache_curr_obj_count_max)
 +              zcache_curr_obj_count_max = count;
 +      return obj;
 +}
 +
 +static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
 +{
 +      atomic_dec(&zcache_curr_obj_count);
 +      BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
 +      kmem_cache_free(zcache_obj_cache, obj);
 +}
 +
 +static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
 +{
 +      struct flushlist_node *flnode = NULL;
 +      struct zcache_preload *kp;
 +      int count;
 +
 +      kp = &__get_cpu_var(zcache_preloads);
 +      flnode = kp->flnode;
 +      BUG_ON(flnode == NULL);
 +      kp->flnode = NULL;
 +      count = atomic_inc_return(&ramster_curr_flnode_count);
 +      if (count > ramster_curr_flnode_count_max)
 +              ramster_curr_flnode_count_max = count;
 +      return flnode;
 +}
 +
 +static void ramster_flnode_free(struct flushlist_node *flnode,
 +                              struct tmem_pool *pool)
 +{
 +      atomic_dec(&ramster_curr_flnode_count);
 +      BUG_ON(atomic_read(&ramster_curr_flnode_count) < 0);
 +      kmem_cache_free(ramster_flnode_cache, flnode);
 +}
 +
 +static struct tmem_hostops zcache_hostops = {
 +      .obj_alloc = zcache_obj_alloc,
 +      .obj_free = zcache_obj_free,
 +      .objnode_alloc = zcache_objnode_alloc,
 +      .objnode_free = zcache_objnode_free,
 +};
 +
 +/*
 + * zcache implementations for PAM page descriptor ops
 + */
 +
 +
 +static inline void dec_and_check(atomic_t *pvar)
 +{
 +      atomic_dec(pvar);
 +      /* later when all accounting is fixed, make this a BUG */
 +      WARN_ON_ONCE(atomic_read(pvar) < 0);
 +}
 +
 +static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
 +static unsigned long zcache_curr_eph_pampd_count_max;
 +static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
 +static unsigned long zcache_curr_pers_pampd_count_max;
 +
 +/* forward reference */
 +static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
 +
 +static int zcache_pampd_eph_create(char *data, size_t size, bool raw,
 +                              struct tmem_pool *pool, struct tmem_oid *oid,
 +                              uint32_t index, void **pampd)
 +{
 +      int ret = -1;
 +      void *cdata = data;
 +      size_t clen = size;
 +      struct zcache_client *cli = pool->client;
 +      uint16_t client_id = get_client_id_from_client(cli);
 +      struct page *page = NULL;
 +      unsigned long count;
 +
 +      if (!raw) {
 +              page = virt_to_page(data);
 +              ret = zcache_compress(page, &cdata, &clen);
 +              if (ret == 0)
 +                      goto out;
 +              if (clen == 0 || clen > zbud_max_buddy_size()) {
 +                      zcache_compress_poor++;
 +                      goto out;
 +              }
 +      }
 +      *pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
 +                                      index, page, cdata, clen);
 +      if (*pampd == NULL) {
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +      ret = 0;
 +      count = atomic_inc_return(&zcache_curr_eph_pampd_count);
 +      if (count > zcache_curr_eph_pampd_count_max)
 +              zcache_curr_eph_pampd_count_max = count;
 +      if (client_id != LOCAL_CLIENT) {
 +              count = atomic_inc_return(&ramster_foreign_eph_pampd_count);
 +              if (count > ramster_foreign_eph_pampd_count_max)
 +                      ramster_foreign_eph_pampd_count_max = count;
 +      }
 +out:
 +      return ret;
 +}
 +
 +static int zcache_pampd_pers_create(char *data, size_t size, bool raw,
 +                              struct tmem_pool *pool, struct tmem_oid *oid,
 +                              uint32_t index, void **pampd)
 +{
 +      int ret = -1;
 +      void *cdata = data;
 +      size_t clen = size;
 +      struct zcache_client *cli = pool->client;
 +      struct page *page;
 +      unsigned long count;
 +      unsigned long zv_mean_zsize;
 +      struct zv_hdr *zv;
 +      long curr_pers_pampd_count;
 +      u64 total_zsize;
 +#ifdef RAMSTER_TESTING
 +      static bool pampd_neg_warned;
 +#endif
 +
 +      curr_pers_pampd_count = atomic_read(&zcache_curr_pers_pampd_count) -
 +                      atomic_read(&ramster_remote_pers_pages);
 +#ifdef RAMSTER_TESTING
 +      /* should always be positive, but warn if accounting is off */
 +      if (!pampd_neg_warned) {
 +              pr_warn("ramster: bad accounting for curr_pers_pampd_count\n");
 +              pampd_neg_warned = true;
 +      }
 +#endif
 +      if (curr_pers_pampd_count >
 +                  (zv_page_count_policy_percent * totalram_pages) / 100) {
 +              zcache_policy_percent_exceeded++;
 +              goto out;
 +      }
 +      if (raw)
 +              goto ok_to_create;
 +      page = virt_to_page(data);
 +      if (zcache_compress(page, &cdata, &clen) == 0)
 +              goto out;
 +      /* reject if compression is too poor */
 +      if (clen > zv_max_zsize) {
 +              zcache_compress_poor++;
 +              goto out;
 +      }
 +      /* reject if mean compression is too poor */
 +      if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
 +              total_zsize = xv_get_total_size_bytes(cli->xvpool);
 +              zv_mean_zsize = div_u64(total_zsize, curr_pers_pampd_count);
 +              if (zv_mean_zsize > zv_max_mean_zsize) {
 +                      zcache_mean_compress_poor++;
 +                      goto out;
 +              }
 +      }
 +ok_to_create:
 +      *pampd = (void *)zv_create(cli, pool->pool_id, oid, index, cdata, clen);
 +      if (*pampd == NULL) {
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +      ret = 0;
 +      count = atomic_inc_return(&zcache_curr_pers_pampd_count);
 +      if (count > zcache_curr_pers_pampd_count_max)
 +              zcache_curr_pers_pampd_count_max = count;
 +      if (is_local_client(cli))
 +              goto out;
 +      zv = *(struct zv_hdr **)pampd;
 +      count = atomic_inc_return(&ramster_foreign_pers_pampd_count);
 +      if (count > ramster_foreign_pers_pampd_count_max)
 +              ramster_foreign_pers_pampd_count_max = count;
 +out:
 +      return ret;
 +}
 +
 +static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
 +                              struct tmem_pool *pool, struct tmem_oid *oid,
 +                              uint32_t index)
 +{
 +      void *pampd = NULL;
 +      int ret;
 +      bool ephemeral;
 +
 +      BUG_ON(preemptible());
 +      ephemeral = (eph == 1) || ((eph == 0) && is_ephemeral(pool));
 +      if (ephemeral)
 +              ret = zcache_pampd_eph_create(data, size, raw, pool,
 +                                              oid, index, &pampd);
 +      else
 +              ret = zcache_pampd_pers_create(data, size, raw, pool,
 +                                              oid, index, &pampd);
 +      /* FIXME add some counters here for failed creates? */
 +      return pampd;
 +}
 +
 +/*
 + * fill the pageframe corresponding to the struct page with the data
 + * from the passed pampd
 + */
 +static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
 +                                      void *pampd, struct tmem_pool *pool,
 +                                      struct tmem_oid *oid, uint32_t index)
 +{
 +      int ret = 0;
 +
 +      BUG_ON(preemptible());
 +      BUG_ON(is_ephemeral(pool)); /* Fix later for shared pools? */
 +      BUG_ON(pampd_is_remote(pampd));
 +      if (raw)
 +              zv_copy_from_pampd(data, bufsize, pampd);
 +      else
 +              zv_decompress(virt_to_page(data), pampd);
 +      return ret;
 +}
 +
 +static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
 +                                      void *pampd, struct tmem_pool *pool,
 +                                      struct tmem_oid *oid, uint32_t index)
 +{
 +      int ret = 0;
 +      unsigned long flags;
 +      struct zcache_client *cli = pool->client;
 +
 +      BUG_ON(preemptible());
 +      BUG_ON(pampd_is_remote(pampd));
 +      if (is_ephemeral(pool)) {
 +              local_irq_save(flags);
 +              if (raw)
 +                      zbud_copy_from_pampd(data, bufsize, pampd);
 +              else
 +                      ret = zbud_decompress(virt_to_page(data), pampd);
 +              zbud_free_and_delist((struct zbud_hdr *)pampd);
 +              local_irq_restore(flags);
 +              if (!is_local_client(cli))
 +                      dec_and_check(&ramster_foreign_eph_pampd_count);
 +              dec_and_check(&zcache_curr_eph_pampd_count);
 +      } else {
 +              if (is_local_client(cli))
 +                      BUG();
 +              if (raw)
 +                      zv_copy_from_pampd(data, bufsize, pampd);
 +              else
 +                      zv_decompress(virt_to_page(data), pampd);
 +              zv_free(cli->xvpool, pampd);
 +              if (!is_local_client(cli))
 +                      dec_and_check(&ramster_foreign_pers_pampd_count);
 +              dec_and_check(&zcache_curr_pers_pampd_count);
 +              ret = 0;
 +      }
 +      return ret;
 +}
 +
 +static bool zcache_pampd_is_remote(void *pampd)
 +{
 +      return pampd_is_remote(pampd);
 +}
 +
 +/*
 + * free the pampd and remove it from any zcache lists
 + * pampd must no longer be pointed to from any tmem data structures!
 + */
 +static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
 +                            struct tmem_oid *oid, uint32_t index, bool acct)
 +{
 +      struct zcache_client *cli = pool->client;
 +      bool eph = is_ephemeral(pool);
 +      struct zv_hdr *zv;
 +
 +      BUG_ON(preemptible());
 +      if (pampd_is_remote(pampd)) {
 +              WARN_ON(acct == false);
 +              if (oid == NULL) {
 +                      /*
 +                       * a NULL oid means to ignore this pampd free
 +                       * as the remote freeing will be handled elsewhere
 +                       */
 +              } else if (eph) {
 +                      /* FIXME remote flush optional but probably good idea */
 +                      /* FIXME get these working properly again */
 +                      dec_and_check(&zcache_curr_eph_pampd_count);
 +              } else if (pampd_is_intransit(pampd)) {
 +                      /* did a pers remote get_and_free, so just free local */
 +                      pampd = pampd_mask_intransit_and_remote(pampd);
 +                      goto local_pers;
 +              } else {
 +                      struct flushlist_node *flnode =
 +                              ramster_flnode_alloc(pool);
 +
 +                      flnode->xh.client_id = pampd_remote_node(pampd);
 +                      flnode->xh.pool_id = pool->pool_id;
 +                      flnode->xh.oid = *oid;
 +                      flnode->xh.index = index;
 +                      flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
 +                      spin_lock(&zcache_rem_op_list_lock);
 +                      list_add(&flnode->rem_op.list, &zcache_rem_op_list);
 +                      spin_unlock(&zcache_rem_op_list_lock);
 +                      dec_and_check(&zcache_curr_pers_pampd_count);
 +                      dec_and_check(&ramster_remote_pers_pages);
 +              }
 +      } else if (eph) {
 +              zbud_free_and_delist((struct zbud_hdr *)pampd);
 +              if (!is_local_client(pool->client))
 +                      dec_and_check(&ramster_foreign_eph_pampd_count);
 +              if (acct)
 +                      /* FIXME get these working properly again */
 +                      dec_and_check(&zcache_curr_eph_pampd_count);
 +      } else {
 +local_pers:
 +              zv = (struct zv_hdr *)pampd;
 +              if (!is_local_client(pool->client))
 +                      dec_and_check(&ramster_foreign_pers_pampd_count);
 +              zv_free(cli->xvpool, zv);
 +              if (acct)
 +                      /* FIXME get these working properly again */
 +                      dec_and_check(&zcache_curr_pers_pampd_count);
 +      }
 +}
 +
 +static void zcache_pampd_free_obj(struct tmem_pool *pool,
 +                                      struct tmem_obj *obj)
 +{
 +      struct flushlist_node *flnode;
 +
 +      BUG_ON(preemptible());
 +      if (obj->extra == NULL)
 +              return;
 +      BUG_ON(!pampd_is_remote(obj->extra));
 +      flnode = ramster_flnode_alloc(pool);
 +      flnode->xh.client_id = pampd_remote_node(obj->extra);
 +      flnode->xh.pool_id = pool->pool_id;
 +      flnode->xh.oid = obj->oid;
 +      flnode->xh.index = FLUSH_ENTIRE_OBJECT;
 +      flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
 +      spin_lock(&zcache_rem_op_list_lock);
 +      list_add(&flnode->rem_op.list, &zcache_rem_op_list);
 +      spin_unlock(&zcache_rem_op_list_lock);
 +}
 +
 +void zcache_pampd_new_obj(struct tmem_obj *obj)
 +{
 +      obj->extra = NULL;
 +}
 +
 +int zcache_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
 +{
 +      int ret = -1;
 +
 +      if (new_pampd != NULL) {
 +              if (obj->extra == NULL)
 +                      obj->extra = new_pampd;
 +              /* enforce that all remote pages in an object reside
 +               * in the same node! */
 +              else if (pampd_remote_node(new_pampd) !=
 +                              pampd_remote_node((void *)(obj->extra)))
 +                      BUG();
 +              ret = 0;
 +      }
 +      return ret;
 +}
 +
 +/*
 + * Called by the message handler after a (still compressed) page has been
 + * fetched from the remote machine in response to an "is_remote" tmem_get
 + * or persistent tmem_localify.  For a tmem_get, "extra" is the address of
 + * the page that is to be filled to succesfully resolve the tmem_get; for
 + * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
 + * in the local zcache).  "data" points to "size" bytes of (compressed) data
 + * passed in the message.  In the case of a persistent remote get, if
 + * pre-allocation was successful (see zcache_repatriate_preload), the page
 + * is placed into both local zcache and at "extra".
 + */
 +int zcache_localify(int pool_id, struct tmem_oid *oidp,
 +                      uint32_t index, char *data, size_t size,
 +                      void *extra)
 +{
 +      int ret = -ENOENT;
 +      unsigned long flags;
 +      struct tmem_pool *pool;
 +      bool ephemeral, delete = false;
 +      size_t clen = PAGE_SIZE;
 +      void *pampd, *saved_hb;
 +      struct tmem_obj *obj;
 +
 +      pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
 +      if (unlikely(pool == NULL))
 +              /* pool doesn't exist anymore */
 +              goto out;
 +      ephemeral = is_ephemeral(pool);
 +      local_irq_save(flags);  /* FIXME: maybe only disable softirqs? */
 +      pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
 +      if (pampd == NULL) {
 +              /* hmmm... must have been a flush while waiting */
 +#ifdef RAMSTER_TESTING
 +              pr_err("UNTESTED pampd==NULL in zcache_localify\n");
 +#endif
 +              if (ephemeral)
 +                      ramster_remote_eph_pages_unsucc_get++;
 +              else
 +                      ramster_remote_pers_pages_unsucc_get++;
 +              obj = NULL;
 +              goto finish;
 +      } else if (unlikely(!pampd_is_remote(pampd))) {
 +              /* hmmm... must have been a dup put while waiting */
 +#ifdef RAMSTER_TESTING
 +              pr_err("UNTESTED dup while waiting in zcache_localify\n");
 +#endif
 +              if (ephemeral)
 +                      ramster_remote_eph_pages_unsucc_get++;
 +              else
 +                      ramster_remote_pers_pages_unsucc_get++;
 +              obj = NULL;
 +              pampd = NULL;
 +              ret = -EEXIST;
 +              goto finish;
 +      } else if (size == 0) {
 +              /* no remote data, delete the local is_remote pampd */
 +              pampd = NULL;
 +              if (ephemeral)
 +                      ramster_remote_eph_pages_unsucc_get++;
 +              else
 +                      BUG();
 +              delete = true;
 +              goto finish;
 +      }
 +      if (!ephemeral && pampd_is_intransit(pampd)) {
 +              /* localify to zcache */
 +              pampd = pampd_mask_intransit_and_remote(pampd);
 +              zv_copy_to_pampd(pampd, data, size);
 +      } else {
 +              pampd = NULL;
 +              obj = NULL;
 +      }
 +      if (extra != NULL) {
 +              /* decompress direct-to-memory to complete remotify */
 +              ret = lzo1x_decompress_safe((char *)data, size,
 +                                              (char *)extra, &clen);
 +              BUG_ON(ret != LZO_E_OK);
 +              BUG_ON(clen != PAGE_SIZE);
 +      }
 +      if (ephemeral)
 +              ramster_remote_eph_pages_succ_get++;
 +      else
 +              ramster_remote_pers_pages_succ_get++;
 +      ret = 0;
 +finish:
 +      tmem_localify_finish(obj, index, pampd, saved_hb, delete);
 +      zcache_put_pool(pool);
 +      local_irq_restore(flags);
 +out:
 +      return ret;
 +}
 +
 +/*
 + * Called on a remote persistent tmem_get to attempt to preallocate
 + * local storage for the data contained in the remote persistent page.
 + * If succesfully preallocated, returns the pampd, marked as remote and
 + * in_transit.  Else returns NULL.  Note that the appropriate tmem data
 + * structure must be locked.
 + */
 +static void *zcache_pampd_repatriate_preload(void *pampd,
 +                                              struct tmem_pool *pool,
 +                                              struct tmem_oid *oid,
 +                                              uint32_t index,
 +                                              bool *intransit)
 +{
 +      int clen = pampd_remote_size(pampd);
 +      void *ret_pampd = NULL;
 +      unsigned long flags;
 +
 +      if (!pampd_is_remote(pampd))
 +              BUG();
 +      if (is_ephemeral(pool))
 +              BUG();
 +      if (pampd_is_intransit(pampd)) {
 +              /*
 +               * to avoid multiple allocations (and maybe a memory leak)
 +               * don't preallocate if already in the process of being
 +               * repatriated
 +               */
 +              *intransit = true;
 +              goto out;
 +      }
 +      *intransit = false;
 +      local_irq_save(flags);
 +      ret_pampd = (void *)zv_alloc(pool, oid, index, clen);
 +      if (ret_pampd != NULL) {
 +              /*
 +               *  a pampd is marked intransit if it is remote and space has
 +               *  been allocated for it locally (note, only happens for
 +               *  persistent pages, in which case the remote copy is freed)
 +               */
 +              ret_pampd = pampd_mark_intransit(ret_pampd);
 +              dec_and_check(&ramster_remote_pers_pages);
 +      } else
 +              ramster_pers_pages_remote_nomem++;
 +      local_irq_restore(flags);
 +out:
 +      return ret_pampd;
 +}
 +
 +/*
 + * Called on a remote tmem_get to invoke a message to fetch the page.
 + * Might sleep so no tmem locks can be held.  "extra" is passed
 + * all the way through the round-trip messaging to zcache_localify.
 + */
 +static int zcache_pampd_repatriate(void *fake_pampd, void *real_pampd,
 +                                 struct tmem_pool *pool,
 +                                 struct tmem_oid *oid, uint32_t index,
 +                                 bool free, void *extra)
 +{
 +      struct tmem_xhandle xh;
 +      int ret;
 +
 +      if (pampd_is_intransit(real_pampd))
 +              /* have local space pre-reserved, so free remote copy */
 +              free = true;
 +      xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
 +      /* unreliable request/response for now */
 +      ret = ramster_remote_async_get(&xh, free,
 +                                      pampd_remote_node(fake_pampd),
 +                                      pampd_remote_size(fake_pampd),
 +                                      pampd_remote_cksum(fake_pampd),
 +                                      extra);
 +#ifdef RAMSTER_TESTING
 +      if (ret != 0 && ret != -ENOENT)
 +              pr_err("TESTING zcache_pampd_repatriate returns, ret=%d\n",
 +                      ret);
 +#endif
 +      return ret;
 +}
 +
 +static struct tmem_pamops zcache_pamops = {
 +      .create = zcache_pampd_create,
 +      .get_data = zcache_pampd_get_data,
 +      .free = zcache_pampd_free,
 +      .get_data_and_free = zcache_pampd_get_data_and_free,
 +      .free_obj = zcache_pampd_free_obj,
 +      .is_remote = zcache_pampd_is_remote,
 +      .repatriate_preload = zcache_pampd_repatriate_preload,
 +      .repatriate = zcache_pampd_repatriate,
 +      .new_obj = zcache_pampd_new_obj,
 +      .replace_in_obj = zcache_pampd_replace_in_obj,
 +};
 +
 +/*
 + * zcache compression/decompression and related per-cpu stuff
 + */
 +
 +#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
 +#define LZO_DSTMEM_PAGE_ORDER 1
 +static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
 +static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
 +
 +static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
 +{
 +      int ret = 0;
 +      unsigned char *dmem = __get_cpu_var(zcache_dstmem);
 +      unsigned char *wmem = __get_cpu_var(zcache_workmem);
 +      char *from_va;
 +
 +      BUG_ON(!irqs_disabled());
 +      if (unlikely(dmem == NULL || wmem == NULL))
 +              goto out;  /* no buffer, so can't compress */
-       kunmap_atomic(from_va, KM_USER0);
++      from_va = kmap_atomic(from);
 +      mb();
 +      ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
 +      BUG_ON(ret != LZO_E_OK);
 +      *out_va = dmem;
++      kunmap_atomic(from_va);
 +      ret = 1;
 +out:
 +      return ret;
 +}
 +
 +
 +static int zcache_cpu_notifier(struct notifier_block *nb,
 +                              unsigned long action, void *pcpu)
 +{
 +      int cpu = (long)pcpu;
 +      struct zcache_preload *kp;
 +
 +      switch (action) {
 +      case CPU_UP_PREPARE:
 +              per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
 +                      GFP_KERNEL | __GFP_REPEAT,
 +                      LZO_DSTMEM_PAGE_ORDER),
 +              per_cpu(zcache_workmem, cpu) =
 +                      kzalloc(LZO1X_MEM_COMPRESS,
 +                              GFP_KERNEL | __GFP_REPEAT);
 +              per_cpu(zcache_remoteputmem, cpu) =
 +                      kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
 +              break;
 +      case CPU_DEAD:
 +      case CPU_UP_CANCELED:
 +              kfree(per_cpu(zcache_remoteputmem, cpu));
 +              per_cpu(zcache_remoteputmem, cpu) = NULL;
 +              free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
 +                              LZO_DSTMEM_PAGE_ORDER);
 +              per_cpu(zcache_dstmem, cpu) = NULL;
 +              kfree(per_cpu(zcache_workmem, cpu));
 +              per_cpu(zcache_workmem, cpu) = NULL;
 +              kp = &per_cpu(zcache_preloads, cpu);
 +              while (kp->nr) {
 +                      kmem_cache_free(zcache_objnode_cache,
 +                                      kp->objnodes[kp->nr - 1]);
 +                      kp->objnodes[kp->nr - 1] = NULL;
 +                      kp->nr--;
 +              }
 +              if (kp->obj) {
 +                      kmem_cache_free(zcache_obj_cache, kp->obj);
 +                      kp->obj = NULL;
 +              }
 +              if (kp->flnode) {
 +                      kmem_cache_free(ramster_flnode_cache, kp->flnode);
 +                      kp->flnode = NULL;
 +              }
 +              if (kp->page) {
 +                      free_page((unsigned long)kp->page);
 +                      kp->page = NULL;
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +      return NOTIFY_OK;
 +}
 +
 +static struct notifier_block zcache_cpu_notifier_block = {
 +      .notifier_call = zcache_cpu_notifier
 +};
 +
 +#ifdef CONFIG_SYSFS
 +#define ZCACHE_SYSFS_RO(_name) \
 +      static ssize_t zcache_##_name##_show(struct kobject *kobj, \
 +                              struct kobj_attribute *attr, char *buf) \
 +      { \
 +              return sprintf(buf, "%lu\n", zcache_##_name); \
 +      } \
 +      static struct kobj_attribute zcache_##_name##_attr = { \
 +              .attr = { .name = __stringify(_name), .mode = 0444 }, \
 +              .show = zcache_##_name##_show, \
 +      }
 +
 +#define ZCACHE_SYSFS_RO_ATOMIC(_name) \
 +      static ssize_t zcache_##_name##_show(struct kobject *kobj, \
 +                              struct kobj_attribute *attr, char *buf) \
 +      { \
 +          return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
 +      } \
 +      static struct kobj_attribute zcache_##_name##_attr = { \
 +              .attr = { .name = __stringify(_name), .mode = 0444 }, \
 +              .show = zcache_##_name##_show, \
 +      }
 +
 +#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
 +      static ssize_t zcache_##_name##_show(struct kobject *kobj, \
 +                              struct kobj_attribute *attr, char *buf) \
 +      { \
 +          return _func(buf); \
 +      } \
 +      static struct kobj_attribute zcache_##_name##_attr = { \
 +              .attr = { .name = __stringify(_name), .mode = 0444 }, \
 +              .show = zcache_##_name##_show, \
 +      }
 +
 +ZCACHE_SYSFS_RO(curr_obj_count_max);
 +ZCACHE_SYSFS_RO(curr_objnode_count_max);
 +ZCACHE_SYSFS_RO(flush_total);
 +ZCACHE_SYSFS_RO(flush_found);
 +ZCACHE_SYSFS_RO(flobj_total);
 +ZCACHE_SYSFS_RO(flobj_found);
 +ZCACHE_SYSFS_RO(failed_eph_puts);
 +ZCACHE_SYSFS_RO(nonactive_puts);
 +ZCACHE_SYSFS_RO(failed_pers_puts);
 +ZCACHE_SYSFS_RO(zbud_curr_zbytes);
 +ZCACHE_SYSFS_RO(zbud_cumul_zpages);
 +ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
 +ZCACHE_SYSFS_RO(zbud_buddied_count);
 +ZCACHE_SYSFS_RO(evicted_raw_pages);
 +ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
 +ZCACHE_SYSFS_RO(evicted_buddied_pages);
 +ZCACHE_SYSFS_RO(failed_get_free_pages);
 +ZCACHE_SYSFS_RO(failed_alloc);
 +ZCACHE_SYSFS_RO(put_to_flush);
 +ZCACHE_SYSFS_RO(compress_poor);
 +ZCACHE_SYSFS_RO(mean_compress_poor);
 +ZCACHE_SYSFS_RO(policy_percent_exceeded);
 +ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
 +ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
 +ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
 +ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
 +ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
 +                      zbud_show_unbuddied_list_counts);
 +ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
 +                      zbud_show_cumul_chunk_counts);
 +ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
 +                      zv_curr_dist_counts_show);
 +ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
 +                      zv_cumul_dist_counts_show);
 +
 +static struct attribute *zcache_attrs[] = {
 +      &zcache_curr_obj_count_attr.attr,
 +      &zcache_curr_obj_count_max_attr.attr,
 +      &zcache_curr_objnode_count_attr.attr,
 +      &zcache_curr_objnode_count_max_attr.attr,
 +      &zcache_flush_total_attr.attr,
 +      &zcache_flobj_total_attr.attr,
 +      &zcache_flush_found_attr.attr,
 +      &zcache_flobj_found_attr.attr,
 +      &zcache_failed_eph_puts_attr.attr,
 +      &zcache_nonactive_puts_attr.attr,
 +      &zcache_failed_pers_puts_attr.attr,
 +      &zcache_policy_percent_exceeded_attr.attr,
 +      &zcache_compress_poor_attr.attr,
 +      &zcache_mean_compress_poor_attr.attr,
 +      &zcache_zbud_curr_raw_pages_attr.attr,
 +      &zcache_zbud_curr_zpages_attr.attr,
 +      &zcache_zbud_curr_zbytes_attr.attr,
 +      &zcache_zbud_cumul_zpages_attr.attr,
 +      &zcache_zbud_cumul_zbytes_attr.attr,
 +      &zcache_zbud_buddied_count_attr.attr,
 +      &zcache_evicted_raw_pages_attr.attr,
 +      &zcache_evicted_unbuddied_pages_attr.attr,
 +      &zcache_evicted_buddied_pages_attr.attr,
 +      &zcache_failed_get_free_pages_attr.attr,
 +      &zcache_failed_alloc_attr.attr,
 +      &zcache_put_to_flush_attr.attr,
 +      &zcache_zbud_unbuddied_list_counts_attr.attr,
 +      &zcache_zbud_cumul_chunk_counts_attr.attr,
 +      &zcache_zv_curr_dist_counts_attr.attr,
 +      &zcache_zv_cumul_dist_counts_attr.attr,
 +      &zcache_zv_max_zsize_attr.attr,
 +      &zcache_zv_max_mean_zsize_attr.attr,
 +      &zcache_zv_page_count_policy_percent_attr.attr,
 +      NULL,
 +};
 +
 +static struct attribute_group zcache_attr_group = {
 +      .attrs = zcache_attrs,
 +      .name = "zcache",
 +};
 +
 +#define RAMSTER_SYSFS_RO(_name) \
 +      static ssize_t ramster_##_name##_show(struct kobject *kobj, \
 +                              struct kobj_attribute *attr, char *buf) \
 +      { \
 +              return sprintf(buf, "%lu\n", ramster_##_name); \
 +      } \
 +      static struct kobj_attribute ramster_##_name##_attr = { \
 +              .attr = { .name = __stringify(_name), .mode = 0444 }, \
 +              .show = ramster_##_name##_show, \
 +      }
 +
 +#define RAMSTER_SYSFS_RW(_name) \
 +      static ssize_t ramster_##_name##_show(struct kobject *kobj, \
 +                              struct kobj_attribute *attr, char *buf) \
 +      { \
 +              return sprintf(buf, "%lu\n", ramster_##_name); \
 +      } \
 +      static ssize_t ramster_##_name##_store(struct kobject *kobj, \
 +              struct kobj_attribute *attr, const char *buf, size_t count) \
 +      { \
 +              int err; \
 +              unsigned long enable; \
 +              err = kstrtoul(buf, 10, &enable); \
 +              if (err) \
 +                      return -EINVAL; \
 +              ramster_##_name = enable; \
 +              return count; \
 +      } \
 +      static struct kobj_attribute ramster_##_name##_attr = { \
 +              .attr = { .name = __stringify(_name), .mode = 0644 }, \
 +              .show = ramster_##_name##_show, \
 +              .store = ramster_##_name##_store, \
 +      }
 +
 +#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
 +      static ssize_t ramster_##_name##_show(struct kobject *kobj, \
 +                              struct kobj_attribute *attr, char *buf) \
 +      { \
 +          return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
 +      } \
 +      static struct kobj_attribute ramster_##_name##_attr = { \
 +              .attr = { .name = __stringify(_name), .mode = 0444 }, \
 +              .show = ramster_##_name##_show, \
 +      }
 +
 +RAMSTER_SYSFS_RO(interface_revision);
 +RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
 +RAMSTER_SYSFS_RW(pers_remotify_enable);
 +RAMSTER_SYSFS_RW(eph_remotify_enable);
 +RAMSTER_SYSFS_RO(eph_pages_remoted);
 +RAMSTER_SYSFS_RO(eph_pages_remote_failed);
 +RAMSTER_SYSFS_RO(pers_pages_remoted);
 +RAMSTER_SYSFS_RO(pers_pages_remote_failed);
 +RAMSTER_SYSFS_RO(pers_pages_remote_nomem);
 +RAMSTER_SYSFS_RO(remote_pages_flushed);
 +RAMSTER_SYSFS_RO(remote_page_flushes_failed);
 +RAMSTER_SYSFS_RO(remote_objects_flushed);
 +RAMSTER_SYSFS_RO(remote_object_flushes_failed);
 +RAMSTER_SYSFS_RO(remote_eph_pages_succ_get);
 +RAMSTER_SYSFS_RO(remote_eph_pages_unsucc_get);
 +RAMSTER_SYSFS_RO(remote_pers_pages_succ_get);
 +RAMSTER_SYSFS_RO(remote_pers_pages_unsucc_get);
 +RAMSTER_SYSFS_RO_ATOMIC(foreign_eph_pampd_count);
 +RAMSTER_SYSFS_RO(foreign_eph_pampd_count_max);
 +RAMSTER_SYSFS_RO_ATOMIC(foreign_pers_pampd_count);
 +RAMSTER_SYSFS_RO(foreign_pers_pampd_count_max);
 +RAMSTER_SYSFS_RO_ATOMIC(curr_flnode_count);
 +RAMSTER_SYSFS_RO(curr_flnode_count_max);
 +
 +#define MANUAL_NODES 8
 +static bool ramster_nodes_manual_up[MANUAL_NODES];
 +static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
 +                              struct kobj_attribute *attr, char *buf)
 +{
 +      int i;
 +      char *p = buf;
 +      for (i = 0; i < MANUAL_NODES; i++)
 +              if (ramster_nodes_manual_up[i])
 +                      p += sprintf(p, "%d ", i);
 +      p += sprintf(p, "\n");
 +      return p - buf;
 +}
 +
 +static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
 +              struct kobj_attribute *attr, const char *buf, size_t count)
 +{
 +      int err;
 +      unsigned long node_num;
 +
 +      err = kstrtoul(buf, 10, &node_num);
 +      if (err) {
 +              pr_err("ramster: bad strtoul?\n");
 +              return -EINVAL;
 +      }
 +      if (node_num >= MANUAL_NODES) {
 +              pr_err("ramster: bad node_num=%lu?\n", node_num);
 +              return -EINVAL;
 +      }
 +      if (ramster_nodes_manual_up[node_num]) {
 +              pr_err("ramster: node %d already up, ignoring\n",
 +                                                      (int)node_num);
 +      } else {
 +              ramster_nodes_manual_up[node_num] = true;
 +              r2net_hb_node_up_manual((int)node_num);
 +      }
 +      return count;
 +}
 +
 +static struct kobj_attribute ramster_manual_node_up_attr = {
 +      .attr = { .name = "manual_node_up", .mode = 0644 },
 +      .show = ramster_manual_node_up_show,
 +      .store = ramster_manual_node_up_store,
 +};
 +
 +static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
 +                              struct kobj_attribute *attr, char *buf)
 +{
 +      if (ramster_remote_target_nodenum == -1UL)
 +              return sprintf(buf, "unset\n");
 +      else
 +              return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
 +}
 +
 +static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
 +              struct kobj_attribute *attr, const char *buf, size_t count)
 +{
 +      int err;
 +      unsigned long node_num;
 +
 +      err = kstrtoul(buf, 10, &node_num);
 +      if (err) {
 +              pr_err("ramster: bad strtoul?\n");
 +              return -EINVAL;
 +      } else if (node_num == -1UL) {
 +              pr_err("ramster: disabling all remotification, "
 +                      "data may still reside on remote nodes however\n");
 +              return -EINVAL;
 +      } else if (node_num >= MANUAL_NODES) {
 +              pr_err("ramster: bad node_num=%lu?\n", node_num);
 +              return -EINVAL;
 +      } else if (!ramster_nodes_manual_up[node_num]) {
 +              pr_err("ramster: node %d not up, ignoring setting "
 +                      "of remotification target\n", (int)node_num);
 +      } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
 +              pr_info("ramster: node %d set as remotification target\n",
 +                              (int)node_num);
 +              ramster_remote_target_nodenum = (int)node_num;
 +      } else {
 +              pr_err("ramster: bad num to node node_num=%d?\n",
 +                              (int)node_num);
 +              return -EINVAL;
 +      }
 +      return count;
 +}
 +
 +static struct kobj_attribute ramster_remote_target_nodenum_attr = {
 +      .attr = { .name = "remote_target_nodenum", .mode = 0644 },
 +      .show = ramster_remote_target_nodenum_show,
 +      .store = ramster_remote_target_nodenum_store,
 +};
 +
 +
 +static struct attribute *ramster_attrs[] = {
 +      &ramster_interface_revision_attr.attr,
 +      &ramster_pers_remotify_enable_attr.attr,
 +      &ramster_eph_remotify_enable_attr.attr,
 +      &ramster_remote_pers_pages_attr.attr,
 +      &ramster_eph_pages_remoted_attr.attr,
 +      &ramster_eph_pages_remote_failed_attr.attr,
 +      &ramster_pers_pages_remoted_attr.attr,
 +      &ramster_pers_pages_remote_failed_attr.attr,
 +      &ramster_pers_pages_remote_nomem_attr.attr,
 +      &ramster_remote_pages_flushed_attr.attr,
 +      &ramster_remote_page_flushes_failed_attr.attr,
 +      &ramster_remote_objects_flushed_attr.attr,
 +      &ramster_remote_object_flushes_failed_attr.attr,
 +      &ramster_remote_eph_pages_succ_get_attr.attr,
 +      &ramster_remote_eph_pages_unsucc_get_attr.attr,
 +      &ramster_remote_pers_pages_succ_get_attr.attr,
 +      &ramster_remote_pers_pages_unsucc_get_attr.attr,
 +      &ramster_foreign_eph_pampd_count_attr.attr,
 +      &ramster_foreign_eph_pampd_count_max_attr.attr,
 +      &ramster_foreign_pers_pampd_count_attr.attr,
 +      &ramster_foreign_pers_pampd_count_max_attr.attr,
 +      &ramster_curr_flnode_count_attr.attr,
 +      &ramster_curr_flnode_count_max_attr.attr,
 +      &ramster_manual_node_up_attr.attr,
 +      &ramster_remote_target_nodenum_attr.attr,
 +      NULL,
 +};
 +
 +static struct attribute_group ramster_attr_group = {
 +      .attrs = ramster_attrs,
 +      .name = "ramster",
 +};
 +
 +#endif /* CONFIG_SYSFS */
 +/*
 + * When zcache is disabled ("frozen"), pools can be created and destroyed,
 + * but all puts (and thus all other operations that require memory allocation)
 + * must fail.  If zcache is unfrozen, accepts puts, then frozen again,
 + * data consistency requires all puts while frozen to be converted into
 + * flushes.
 + */
 +static bool zcache_freeze;
 +
 +/*
 + * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
 + */
 +static int shrink_zcache_memory(struct shrinker *shrink,
 +                              struct shrink_control *sc)
 +{
 +      int ret = -1;
 +      int nr = sc->nr_to_scan;
 +      gfp_t gfp_mask = sc->gfp_mask;
 +
 +      if (nr >= 0) {
 +              if (!(gfp_mask & __GFP_FS))
 +                      /* does this case really need to be skipped? */
 +                      goto out;
 +              zbud_evict_pages(nr);
 +      }
 +      ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
 +out:
 +      return ret;
 +}
 +
 +static struct shrinker zcache_shrinker = {
 +      .shrink = shrink_zcache_memory,
 +      .seeks = DEFAULT_SEEKS,
 +};
 +
 +/*
 + * zcache shims between cleancache/frontswap ops and tmem
 + */
 +
 +int zcache_put(int cli_id, int pool_id, struct tmem_oid *oidp,
 +                      uint32_t index, char *data, size_t size,
 +                      bool raw, int ephemeral)
 +{
 +      struct tmem_pool *pool;
 +      int ret = -1;
 +
 +      BUG_ON(!irqs_disabled());
 +      pool = zcache_get_pool_by_id(cli_id, pool_id);
 +      if (unlikely(pool == NULL))
 +              goto out;
 +      if (!zcache_freeze && zcache_do_preload(pool) == 0) {
 +              /* preload does preempt_disable on success */
 +              ret = tmem_put(pool, oidp, index, data, size, raw, ephemeral);
 +              if (ret < 0) {
 +                      if (is_ephemeral(pool))
 +                              zcache_failed_eph_puts++;
 +                      else
 +                              zcache_failed_pers_puts++;
 +              }
 +              zcache_put_pool(pool);
 +              preempt_enable_no_resched();
 +      } else {
 +              zcache_put_to_flush++;
 +              if (atomic_read(&pool->obj_count) > 0)
 +                      /* the put fails whether the flush succeeds or not */
 +                      (void)tmem_flush_page(pool, oidp, index);
 +              zcache_put_pool(pool);
 +      }
 +out:
 +      return ret;
 +}
 +
 +int zcache_get(int cli_id, int pool_id, struct tmem_oid *oidp,
 +                      uint32_t index, char *data, size_t *sizep,
 +                      bool raw, int get_and_free)
 +{
 +      struct tmem_pool *pool;
 +      int ret = -1;
 +      bool eph;
 +
 +      if (!raw) {
 +              BUG_ON(irqs_disabled());
 +              BUG_ON(in_softirq());
 +      }
 +      pool = zcache_get_pool_by_id(cli_id, pool_id);
 +      eph = is_ephemeral(pool);
 +      if (likely(pool != NULL)) {
 +              if (atomic_read(&pool->obj_count) > 0)
 +                      ret = tmem_get(pool, oidp, index, data, sizep,
 +                                      raw, get_and_free);
 +              zcache_put_pool(pool);
 +      }
 +      WARN_ONCE((!eph && (ret != 0)), "zcache_get fails on persistent pool, "
 +                        "bad things are very likely to happen soon\n");
 +#ifdef RAMSTER_TESTING
 +      if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
 +              pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
 +#endif
 +      if (ret == -EAGAIN)
 +              BUG(); /* FIXME... don't need this anymore??? let's ensure */
 +      return ret;
 +}
 +
 +int zcache_flush(int cli_id, int pool_id,
 +                              struct tmem_oid *oidp, uint32_t index)
 +{
 +      struct tmem_pool *pool;
 +      int ret = -1;
 +      unsigned long flags;
 +
 +      local_irq_save(flags);
 +      zcache_flush_total++;
 +      pool = zcache_get_pool_by_id(cli_id, pool_id);
 +      ramster_do_preload_flnode_only(pool);
 +      if (likely(pool != NULL)) {
 +              if (atomic_read(&pool->obj_count) > 0)
 +                      ret = tmem_flush_page(pool, oidp, index);
 +              zcache_put_pool(pool);
 +      }
 +      if (ret >= 0)
 +              zcache_flush_found++;
 +      local_irq_restore(flags);
 +      return ret;
 +}
 +
 +int zcache_flush_object(int cli_id, int pool_id, struct tmem_oid *oidp)
 +{
 +      struct tmem_pool *pool;
 +      int ret = -1;
 +      unsigned long flags;
 +
 +      local_irq_save(flags);
 +      zcache_flobj_total++;
 +      pool = zcache_get_pool_by_id(cli_id, pool_id);
 +      ramster_do_preload_flnode_only(pool);
 +      if (likely(pool != NULL)) {
 +              if (atomic_read(&pool->obj_count) > 0)
 +                      ret = tmem_flush_object(pool, oidp);
 +              zcache_put_pool(pool);
 +      }
 +      if (ret >= 0)
 +              zcache_flobj_found++;
 +      local_irq_restore(flags);
 +      return ret;
 +}
 +
 +int zcache_client_destroy_pool(int cli_id, int pool_id)
 +{
 +      struct tmem_pool *pool = NULL;
 +      struct zcache_client *cli = NULL;
 +      int ret = -1;
 +
 +      if (pool_id < 0)
 +              goto out;
 +      if (cli_id == LOCAL_CLIENT)
 +              cli = &zcache_host;
 +      else if ((unsigned int)cli_id < MAX_CLIENTS)
 +              cli = &zcache_clients[cli_id];
 +      if (cli == NULL)
 +              goto out;
 +      atomic_inc(&cli->refcount);
 +      pool = cli->tmem_pools[pool_id];
 +      if (pool == NULL)
 +              goto out;
 +      cli->tmem_pools[pool_id] = NULL;
 +      /* wait for pool activity on other cpus to quiesce */
 +      while (atomic_read(&pool->refcount) != 0)
 +              ;
 +      atomic_dec(&cli->refcount);
 +      local_bh_disable();
 +      ret = tmem_destroy_pool(pool);
 +      local_bh_enable();
 +      kfree(pool);
 +      pr_info("ramster: destroyed pool id=%d cli_id=%d\n", pool_id, cli_id);
 +out:
 +      return ret;
 +}
 +
 +static int zcache_destroy_pool(int pool_id)
 +{
 +      return zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
 +}
 +
 +int zcache_new_pool(uint16_t cli_id, uint32_t flags)
 +{
 +      int poolid = -1;
 +      struct tmem_pool *pool;
 +      struct zcache_client *cli = NULL;
 +
 +      if (cli_id == LOCAL_CLIENT)
 +              cli = &zcache_host;
 +      else if ((unsigned int)cli_id < MAX_CLIENTS)
 +              cli = &zcache_clients[cli_id];
 +      if (cli == NULL)
 +              goto out;
 +      atomic_inc(&cli->refcount);
 +      pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
 +      if (pool == NULL) {
 +              pr_info("ramster: pool creation failed: out of memory\n");
 +              goto out;
 +      }
 +
 +      for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
 +              if (cli->tmem_pools[poolid] == NULL)
 +                      break;
 +      if (poolid >= MAX_POOLS_PER_CLIENT) {
 +              pr_info("ramster: pool creation failed: max exceeded\n");
 +              kfree(pool);
 +              poolid = -1;
 +              goto out;
 +      }
 +      atomic_set(&pool->refcount, 0);
 +      pool->client = cli;
 +      pool->pool_id = poolid;
 +      tmem_new_pool(pool, flags);
 +      cli->tmem_pools[poolid] = pool;
 +      if (cli_id == LOCAL_CLIENT)
 +              pr_info("ramster: created %s tmem pool, id=%d, local client\n",
 +                      flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
 +                      poolid);
 +      else
 +              pr_info("ramster: created %s tmem pool, id=%d, client=%d\n",
 +                      flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
 +                      poolid, cli_id);
 +out:
 +      if (cli != NULL)
 +              atomic_dec(&cli->refcount);
 +      return poolid;
 +}
 +
 +static int zcache_local_new_pool(uint32_t flags)
 +{
 +      return zcache_new_pool(LOCAL_CLIENT, flags);
 +}
 +
 +int zcache_autocreate_pool(int cli_id, int pool_id, bool ephemeral)
 +{
 +      struct tmem_pool *pool;
 +      struct zcache_client *cli = NULL;
 +      uint32_t flags = ephemeral ? 0 : TMEM_POOL_PERSIST;
 +      int ret = -1;
 +
 +      if (cli_id == LOCAL_CLIENT)
 +              goto out;
 +      if (pool_id >= MAX_POOLS_PER_CLIENT)
 +              goto out;
 +      else if ((unsigned int)cli_id < MAX_CLIENTS)
 +              cli = &zcache_clients[cli_id];
 +      if ((ephemeral && !use_cleancache) || (!ephemeral && !use_frontswap))
 +              BUG(); /* FIXME, handle more gracefully later */
 +      if (!cli->allocated) {
 +              if (zcache_new_client(cli_id))
 +                      BUG(); /* FIXME, handle more gracefully later */
 +              cli = &zcache_clients[cli_id];
 +      }
 +      atomic_inc(&cli->refcount);
 +      pool = cli->tmem_pools[pool_id];
 +      if (pool != NULL) {
 +              if (pool->persistent && ephemeral) {
 +                      pr_err("zcache_autocreate_pool: type mismatch\n");
 +                      goto out;
 +              }
 +              ret = 0;
 +              goto out;
 +      }
 +      pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
 +      if (pool == NULL) {
 +              pr_info("ramster: pool creation failed: out of memory\n");
 +              goto out;
 +      }
 +      atomic_set(&pool->refcount, 0);
 +      pool->client = cli;
 +      pool->pool_id = pool_id;
 +      tmem_new_pool(pool, flags);
 +      cli->tmem_pools[pool_id] = pool;
 +      pr_info("ramster: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
 +              flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
 +              pool_id, cli_id);
 +      ret = 0;
 +out:
 +      if (cli == NULL)
 +              BUG(); /* FIXME, handle more gracefully later */
 +              /* pr_err("zcache_autocreate_pool: failed\n"); */
 +      if (cli != NULL)
 +              atomic_dec(&cli->refcount);
 +      return ret;
 +}
 +
 +/**********
 + * Two kernel functionalities currently can be layered on top of tmem.
 + * These are "cleancache" which is used as a second-chance cache for clean
 + * page cache pages; and "frontswap" which is used for swap pages
 + * to avoid writes to disk.  A generic "shim" is provided here for each
 + * to translate in-kernel semantics to zcache semantics.
 + */
 +
 +#ifdef CONFIG_CLEANCACHE
 +static void zcache_cleancache_put_page(int pool_id,
 +                                      struct cleancache_filekey key,
 +                                      pgoff_t index, struct page *page)
 +{
 +      u32 ind = (u32) index;
 +      struct tmem_oid oid = *(struct tmem_oid *)&key;
 +
 +#ifdef __PG_WAS_ACTIVE
 +      if (!PageWasActive(page)) {
 +              zcache_nonactive_puts++;
 +              return;
 +      }
 +#endif
 +      if (likely(ind == index)) {
 +              char *kva = page_address(page);
 +
 +              (void)zcache_put(LOCAL_CLIENT, pool_id, &oid, index,
 +                      kva, PAGE_SIZE, 0, 1);
 +      }
 +}
 +
 +static int zcache_cleancache_get_page(int pool_id,
 +                                      struct cleancache_filekey key,
 +                                      pgoff_t index, struct page *page)
 +{
 +      u32 ind = (u32) index;
 +      struct tmem_oid oid = *(struct tmem_oid *)&key;
 +      int ret = -1;
 +
 +      preempt_disable();
 +      if (likely(ind == index)) {
 +              char *kva = page_address(page);
 +              size_t size = PAGE_SIZE;
 +
 +              ret = zcache_get(LOCAL_CLIENT, pool_id, &oid, index,
 +                      kva, &size, 0, 0);
 +#ifdef __PG_WAS_ACTIVE
 +              if (ret == 0)
 +                      SetPageWasActive(page);
 +#endif
 +      }
 +      preempt_enable();
 +      return ret;
 +}
 +
 +static void zcache_cleancache_flush_page(int pool_id,
 +                                      struct cleancache_filekey key,
 +                                      pgoff_t index)
 +{
 +      u32 ind = (u32) index;
 +      struct tmem_oid oid = *(struct tmem_oid *)&key;
 +
 +      if (likely(ind == index))
 +              (void)zcache_flush(LOCAL_CLIENT, pool_id, &oid, ind);
 +}
 +
 +static void zcache_cleancache_flush_inode(int pool_id,
 +                                      struct cleancache_filekey key)
 +{
 +      struct tmem_oid oid = *(struct tmem_oid *)&key;
 +
 +      (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
 +}
 +
 +static void zcache_cleancache_flush_fs(int pool_id)
 +{
 +      if (pool_id >= 0)
 +              (void)zcache_destroy_pool(pool_id);
 +}
 +
 +static int zcache_cleancache_init_fs(size_t pagesize)
 +{
 +      BUG_ON(sizeof(struct cleancache_filekey) !=
 +                              sizeof(struct tmem_oid));
 +      BUG_ON(pagesize != PAGE_SIZE);
 +      return zcache_local_new_pool(0);
 +}
 +
 +static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
 +{
 +      /* shared pools are unsupported and map to private */
 +      BUG_ON(sizeof(struct cleancache_filekey) !=
 +                              sizeof(struct tmem_oid));
 +      BUG_ON(pagesize != PAGE_SIZE);
 +      return zcache_local_new_pool(0);
 +}
 +
 +static struct cleancache_ops zcache_cleancache_ops = {
 +      .put_page = zcache_cleancache_put_page,
 +      .get_page = zcache_cleancache_get_page,
 +      .invalidate_page = zcache_cleancache_flush_page,
 +      .invalidate_inode = zcache_cleancache_flush_inode,
 +      .invalidate_fs = zcache_cleancache_flush_fs,
 +      .init_shared_fs = zcache_cleancache_init_shared_fs,
 +      .init_fs = zcache_cleancache_init_fs
 +};
 +
 +struct cleancache_ops zcache_cleancache_register_ops(void)
 +{
 +      struct cleancache_ops old_ops =
 +              cleancache_register_ops(&zcache_cleancache_ops);
 +
 +      return old_ops;
 +}
 +#endif
 +
 +#ifdef CONFIG_FRONTSWAP
 +/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
 +static int zcache_frontswap_poolid = -1;
 +
 +/*
 + * Swizzling increases objects per swaptype, increasing tmem concurrency
 + * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
 + */
 +#define SWIZ_BITS             8
 +#define SWIZ_MASK             ((1 << SWIZ_BITS) - 1)
 +#define _oswiz(_type, _ind)   ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
 +#define iswiz(_ind)           (_ind >> SWIZ_BITS)
 +
 +static inline struct tmem_oid oswiz(unsigned type, u32 ind)
 +{
 +      struct tmem_oid oid = { .oid = { 0 } };
 +      oid.oid[0] = _oswiz(type, ind);
 +      return oid;
 +}
 +
 +static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
 +                                 struct page *page)
 +{
 +      u64 ind64 = (u64)offset;
 +      u32 ind = (u32)offset;
 +      struct tmem_oid oid = oswiz(type, ind);
 +      int ret = -1;
 +      unsigned long flags;
 +      char *kva;
 +
 +      BUG_ON(!PageLocked(page));
 +      if (likely(ind64 == ind)) {
 +              local_irq_save(flags);
 +              kva = page_address(page);
 +              ret = zcache_put(LOCAL_CLIENT, zcache_frontswap_poolid,
 +                              &oid, iswiz(ind), kva, PAGE_SIZE, 0, 0);
 +              local_irq_restore(flags);
 +      }
 +      return ret;
 +}
 +
 +/* returns 0 if the page was successfully gotten from frontswap, -1 if
 + * was not present (should never happen!) */
 +static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
 +                                 struct page *page)
 +{
 +      u64 ind64 = (u64)offset;
 +      u32 ind = (u32)offset;
 +      struct tmem_oid oid = oswiz(type, ind);
 +      int ret = -1;
 +
 +      preempt_disable(); /* FIXME, remove this? */
 +      BUG_ON(!PageLocked(page));
 +      if (likely(ind64 == ind)) {
 +              char *kva = page_address(page);
 +              size_t size = PAGE_SIZE;
 +
 +              ret = zcache_get(LOCAL_CLIENT, zcache_frontswap_poolid,
 +                                      &oid, iswiz(ind), kva, &size, 0, -1);
 +      }
 +      preempt_enable(); /* FIXME, remove this? */
 +      return ret;
 +}
 +
 +/* flush a single page from frontswap */
 +static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
 +{
 +      u64 ind64 = (u64)offset;
 +      u32 ind = (u32)offset;
 +      struct tmem_oid oid = oswiz(type, ind);
 +
 +      if (likely(ind64 == ind))
 +              (void)zcache_flush(LOCAL_CLIENT, zcache_frontswap_poolid,
 +                                      &oid, iswiz(ind));
 +}
 +
 +/* flush all pages from the passed swaptype */
 +static void zcache_frontswap_flush_area(unsigned type)
 +{
 +      struct tmem_oid oid;
 +      int ind;
 +
 +      for (ind = SWIZ_MASK; ind >= 0; ind--) {
 +              oid = oswiz(type, ind);
 +              (void)zcache_flush_object(LOCAL_CLIENT,
 +                                              zcache_frontswap_poolid, &oid);
 +      }
 +}
 +
 +static void zcache_frontswap_init(unsigned ignored)
 +{
 +      /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
 +      if (zcache_frontswap_poolid < 0)
 +              zcache_frontswap_poolid =
 +                              zcache_local_new_pool(TMEM_POOL_PERSIST);
 +}
 +
 +static struct frontswap_ops zcache_frontswap_ops = {
 +      .put_page = zcache_frontswap_put_page,
 +      .get_page = zcache_frontswap_get_page,
 +      .invalidate_page = zcache_frontswap_flush_page,
 +      .invalidate_area = zcache_frontswap_flush_area,
 +      .init = zcache_frontswap_init
 +};
 +
 +struct frontswap_ops zcache_frontswap_register_ops(void)
 +{
 +      struct frontswap_ops old_ops =
 +              frontswap_register_ops(&zcache_frontswap_ops);
 +
 +      return old_ops;
 +}
 +#endif
 +
 +/*
 + * frontswap selfshrinking
 + */
 +
 +#ifdef CONFIG_FRONTSWAP
 +/* In HZ, controls frequency of worker invocation. */
 +static unsigned int selfshrink_interval __read_mostly = 5;
 +
 +static void selfshrink_process(struct work_struct *work);
 +static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
 +
 +/* Enable/disable with sysfs. */
 +static bool frontswap_selfshrinking __read_mostly;
 +
 +/* Enable/disable with kernel boot option. */
 +static bool use_frontswap_selfshrink __initdata = true;
 +
 +/*
 + * The default values for the following parameters were deemed reasonable
 + * by experimentation, may be workload-dependent, and can all be
 + * adjusted via sysfs.
 + */
 +
 +/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
 +static unsigned int frontswap_hysteresis __read_mostly = 20;
 +
 +/*
 + * Number of selfshrink worker invocations to wait before observing that
 + * frontswap selfshrinking should commence. Note that selfshrinking does
 + * not use a separate worker thread.
 + */
 +static unsigned int frontswap_inertia __read_mostly = 3;
 +
 +/* Countdown to next invocation of frontswap_shrink() */
 +static unsigned long frontswap_inertia_counter;
 +
 +/*
 + * Invoked by the selfshrink worker thread, uses current number of pages
 + * in frontswap (frontswap_curr_pages()), previous status, and control
 + * values (hysteresis and inertia) to determine if frontswap should be
 + * shrunk and what the new frontswap size should be.  Note that
 + * frontswap_shrink is essentially a partial swapoff that immediately
 + * transfers pages from the "swap device" (frontswap) back into kernel
 + * RAM; despite the name, frontswap "shrinking" is very different from
 + * the "shrinker" interface used by the kernel MM subsystem to reclaim
 + * memory.
 + */
 +static void frontswap_selfshrink(void)
 +{
 +      static unsigned long cur_frontswap_pages;
 +      static unsigned long last_frontswap_pages;
 +      static unsigned long tgt_frontswap_pages;
 +
 +      last_frontswap_pages = cur_frontswap_pages;
 +      cur_frontswap_pages = frontswap_curr_pages();
 +      if (!cur_frontswap_pages ||
 +                      (cur_frontswap_pages > last_frontswap_pages)) {
 +              frontswap_inertia_counter = frontswap_inertia;
 +              return;
 +      }
 +      if (frontswap_inertia_counter && --frontswap_inertia_counter)
 +              return;
 +      if (cur_frontswap_pages <= frontswap_hysteresis)
 +              tgt_frontswap_pages = 0;
 +      else
 +              tgt_frontswap_pages = cur_frontswap_pages -
 +                      (cur_frontswap_pages / frontswap_hysteresis);
 +      frontswap_shrink(tgt_frontswap_pages);
 +}
 +
 +static int __init ramster_nofrontswap_selfshrink_setup(char *s)
 +{
 +      use_frontswap_selfshrink = false;
 +      return 1;
 +}
 +
 +__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
 +
 +static void selfshrink_process(struct work_struct *work)
 +{
 +      if (frontswap_selfshrinking && frontswap_enabled) {
 +              frontswap_selfshrink();
 +              schedule_delayed_work(&selfshrink_worker,
 +                      selfshrink_interval * HZ);
 +      }
 +}
 +
 +static int ramster_enabled;
 +
 +static int __init ramster_selfshrink_init(void)
 +{
 +      frontswap_selfshrinking = ramster_enabled && use_frontswap_selfshrink;
 +      if (frontswap_selfshrinking)
 +              pr_info("ramster: Initializing frontswap "
 +                                      "selfshrinking driver.\n");
 +      else
 +              return -ENODEV;
 +
 +      schedule_delayed_work(&selfshrink_worker, selfshrink_interval * HZ);
 +
 +      return 0;
 +}
 +
 +subsys_initcall(ramster_selfshrink_init);
 +#endif
 +
 +/*
 + * zcache initialization
 + * NOTE FOR NOW ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
 + * NOTHING HAPPENS!
 + */
 +
 +static int ramster_enabled;
 +
 +static int __init enable_ramster(char *s)
 +{
 +      ramster_enabled = 1;
 +      return 1;
 +}
 +__setup("ramster", enable_ramster);
 +
 +/* allow independent dynamic disabling of cleancache and frontswap */
 +
 +static int use_cleancache = 1;
 +
 +static int __init no_cleancache(char *s)
 +{
 +      pr_info("INIT no_cleancache called\n");
 +      use_cleancache = 0;
 +      return 1;
 +}
 +
 +/*
 + * FIXME: need to guarantee this gets checked before zcache_init is called
 + * What is the correct way to achieve this?
 + */
 +early_param("nocleancache", no_cleancache);
 +
 +static int use_frontswap = 1;
 +
 +static int __init no_frontswap(char *s)
 +{
 +      pr_info("INIT no_frontswap called\n");
 +      use_frontswap = 0;
 +      return 1;
 +}
 +
 +__setup("nofrontswap", no_frontswap);
 +
 +static int __init zcache_init(void)
 +{
 +      int ret = 0;
 +
 +#ifdef CONFIG_SYSFS
 +      ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
 +      ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
 +      if (ret) {
 +              pr_err("ramster: can't create sysfs\n");
 +              goto out;
 +      }
 +#endif /* CONFIG_SYSFS */
 +#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
 +      if (ramster_enabled) {
 +              unsigned int cpu;
 +
 +              (void)r2net_register_handlers();
 +              tmem_register_hostops(&zcache_hostops);
 +              tmem_register_pamops(&zcache_pamops);
 +              ret = register_cpu_notifier(&zcache_cpu_notifier_block);
 +              if (ret) {
 +                      pr_err("ramster: can't register cpu notifier\n");
 +                      goto out;
 +              }
 +              for_each_online_cpu(cpu) {
 +                      void *pcpu = (void *)(long)cpu;
 +                      zcache_cpu_notifier(&zcache_cpu_notifier_block,
 +                              CPU_UP_PREPARE, pcpu);
 +              }
 +      }
 +      zcache_objnode_cache = kmem_cache_create("zcache_objnode",
 +                              sizeof(struct tmem_objnode), 0, 0, NULL);
 +      zcache_obj_cache = kmem_cache_create("zcache_obj",
 +                              sizeof(struct tmem_obj), 0, 0, NULL);
 +      ramster_flnode_cache = kmem_cache_create("ramster_flnode",
 +                              sizeof(struct flushlist_node), 0, 0, NULL);
 +#endif
 +#ifdef CONFIG_CLEANCACHE
 +      pr_info("INIT ramster_enabled=%d use_cleancache=%d\n",
 +                                      ramster_enabled, use_cleancache);
 +      if (ramster_enabled && use_cleancache) {
 +              struct cleancache_ops old_ops;
 +
 +              zbud_init();
 +              register_shrinker(&zcache_shrinker);
 +              old_ops = zcache_cleancache_register_ops();
 +              pr_info("ramster: cleancache enabled using kernel "
 +                      "transcendent memory and compression buddies\n");
 +              if (old_ops.init_fs != NULL)
 +                      pr_warning("ramster: cleancache_ops overridden");
 +      }
 +#endif
 +#ifdef CONFIG_FRONTSWAP
 +      pr_info("INIT ramster_enabled=%d use_frontswap=%d\n",
 +                                      ramster_enabled, use_frontswap);
 +      if (ramster_enabled && use_frontswap) {
 +              struct frontswap_ops old_ops;
 +
 +              zcache_new_client(LOCAL_CLIENT);
 +              old_ops = zcache_frontswap_register_ops();
 +              pr_info("ramster: frontswap enabled using kernel "
 +                      "transcendent memory and xvmalloc\n");
 +              if (old_ops.init != NULL)
 +                      pr_warning("ramster: frontswap_ops overridden");
 +      }
 +      if (ramster_enabled && (use_frontswap || use_cleancache))
 +              ramster_remotify_init();
 +#endif
 +out:
 +      return ret;
 +}
 +
 +module_init(zcache_init)
@@@ -6,10 -6,9 +6,10 @@@
   *
   * Zcache provides an in-kernel "host implementation" for transcendent memory
   * and, thus indirectly, for cleancache and frontswap.  Zcache includes two
 - * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
 + * page-accessible memory [1] interfaces, both utilizing the crypto compression
 + * API:
   * 1) "compression buddies" ("zbud") is used for ephemeral pages
 - * 2) xvmalloc is used for persistent pages.
 + * 2) zsmalloc is used for persistent pages.
   * Xvmalloc (based on the TLSF allocator) has very low fragmentation
   * so maximizes space efficiency, while zbud allows pairs (and potentially,
   * in the future, more than a pair of) compressed pages to be closely linked
  #include <linux/cpu.h>
  #include <linux/highmem.h>
  #include <linux/list.h>
 -#include <linux/lzo.h>
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/types.h>
  #include <linux/atomic.h>
  #include <linux/math64.h>
 +#include <linux/crypto.h>
 +#include <linux/string.h>
  #include "tmem.h"
  
 -#include "../zram/xvmalloc.h" /* if built in drivers/staging */
 +#include "../zsmalloc/zsmalloc.h"
  
  #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
  #error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
@@@ -62,7 -60,7 +62,7 @@@ MODULE_LICENSE("GPL")
  
  struct zcache_client {
        struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
 -      struct xv_pool *xvpool;
 +      struct zs_pool *zspool;
        bool allocated;
        atomic_t refcount;
  };
@@@ -83,38 -81,6 +83,38 @@@ static inline bool is_local_client(stru
        return cli == &zcache_host;
  }
  
 +/* crypto API for zcache  */
 +#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
 +static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
 +static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
 +
 +enum comp_op {
 +      ZCACHE_COMPOP_COMPRESS,
 +      ZCACHE_COMPOP_DECOMPRESS
 +};
 +
 +static inline int zcache_comp_op(enum comp_op op,
 +                              const u8 *src, unsigned int slen,
 +                              u8 *dst, unsigned int *dlen)
 +{
 +      struct crypto_comp *tfm;
 +      int ret;
 +
 +      BUG_ON(!zcache_comp_pcpu_tfms);
 +      tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
 +      BUG_ON(!tfm);
 +      switch (op) {
 +      case ZCACHE_COMPOP_COMPRESS:
 +              ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
 +              break;
 +      case ZCACHE_COMPOP_DECOMPRESS:
 +              ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
 +              break;
 +      }
 +      put_cpu();
 +      return ret;
 +}
 +
  /**********
   * Compression buddies ("zbud") provides for packing two (or, possibly
   * in the future, more) compressed ephemeral pages into a single "raw"
@@@ -333,12 -299,10 +333,12 @@@ static void zbud_free_and_delist(struc
        struct zbud_page *zbpg =
                container_of(zh, struct zbud_page, buddy[budnum]);
  
 +      spin_lock(&zbud_budlists_spinlock);
        spin_lock(&zbpg->lock);
        if (list_empty(&zbpg->bud_list)) {
                /* ignore zombie page... see zbud_evict_pages() */
                spin_unlock(&zbpg->lock);
 +              spin_unlock(&zbud_budlists_spinlock);
                return;
        }
        size = zbud_free(zh);
        zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
        if (zh_other->size == 0) { /* was unbuddied: unlist and free */
                chunks = zbud_size_to_chunks(size) ;
 -              spin_lock(&zbud_budlists_spinlock);
                BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
                list_del_init(&zbpg->bud_list);
                zbud_unbuddied[chunks].count--;
                zbud_free_raw_page(zbpg);
        } else { /* was buddied: move remaining buddy to unbuddied list */
                chunks = zbud_size_to_chunks(zh_other->size) ;
 -              spin_lock(&zbud_budlists_spinlock);
                list_del_init(&zbpg->bud_list);
                zcache_zbud_buddied_count--;
                list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
@@@ -441,7 -407,7 +441,7 @@@ static int zbud_decompress(struct page 
  {
        struct zbud_page *zbpg;
        unsigned budnum = zbud_budnum(zh);
 -      size_t out_len = PAGE_SIZE;
 +      unsigned int out_len = PAGE_SIZE;
        char *to_va, *from_va;
        unsigned size;
        int ret = 0;
        }
        ASSERT_SENTINEL(zh, ZBH);
        BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
-       to_va = kmap_atomic(page, KM_USER0);
+       to_va = kmap_atomic(page);
        size = zh->size;
        from_va = zbud_data(zh, size);
 -      ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
 -      BUG_ON(ret != LZO_E_OK);
 +      ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
 +                              to_va, &out_len);
 +      BUG_ON(ret);
        BUG_ON(out_len != PAGE_SIZE);
-       kunmap_atomic(to_va, KM_USER0);
+       kunmap_atomic(to_va);
  out:
        spin_unlock(&zbpg->lock);
        return ret;
@@@ -657,8 -622,8 +657,8 @@@ static int zbud_show_cumul_chunk_counts
  #endif
  
  /**********
 - * This "zv" PAM implementation combines the TLSF-based xvMalloc
 - * with lzo1x compression to maximize the amount of data that can
 + * This "zv" PAM implementation combines the slab-based zsmalloc
 + * with the crypto compression API to maximize the amount of data that can
   * be packed into a physical page.
   *
   * Zv represents a PAM page with the index and object (plus a "size" value
@@@ -671,7 -636,6 +671,7 @@@ struct zv_hdr 
        uint32_t pool_id;
        struct tmem_oid oid;
        uint32_t index;
 +      size_t size;
        DECL_SENTINEL
  };
  
@@@ -693,72 -657,72 +693,72 @@@ static unsigned int zv_max_mean_zsize 
  static atomic_t zv_curr_dist_counts[NCHUNKS];
  static atomic_t zv_cumul_dist_counts[NCHUNKS];
  
 -static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
 +static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
                                struct tmem_oid *oid, uint32_t index,
                                void *cdata, unsigned clen)
  {
 -      struct page *page;
 -      struct zv_hdr *zv = NULL;
 -      uint32_t offset;
 -      int alloc_size = clen + sizeof(struct zv_hdr);
 -      int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 -      int ret;
 +      struct zv_hdr *zv;
 +      u32 size = clen + sizeof(struct zv_hdr);
 +      int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 +      void *handle = NULL;
  
        BUG_ON(!irqs_disabled());
        BUG_ON(chunks >= NCHUNKS);
 -      ret = xv_malloc(xvpool, alloc_size,
 -                      &page, &offset, ZCACHE_GFP_MASK);
 -      if (unlikely(ret))
 +      handle = zs_malloc(pool, size);
 +      if (!handle)
                goto out;
        atomic_inc(&zv_curr_dist_counts[chunks]);
        atomic_inc(&zv_cumul_dist_counts[chunks]);
 -      zv = kmap_atomic(page) + offset;
 +      zv = zs_map_object(pool, handle);
        zv->index = index;
        zv->oid = *oid;
        zv->pool_id = pool_id;
 +      zv->size = clen;
        SET_SENTINEL(zv, ZVH);
        memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
 -      kunmap_atomic(zv);
 +      zs_unmap_object(pool, handle);
  out:
 -      return zv;
 +      return handle;
  }
  
 -static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
 +static void zv_free(struct zs_pool *pool, void *handle)
  {
        unsigned long flags;
 -      struct page *page;
 -      uint32_t offset;
 -      uint16_t size = xv_get_object_size(zv);
 -      int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
 +      struct zv_hdr *zv;
 +      uint16_t size;
 +      int chunks;
  
 +      zv = zs_map_object(pool, handle);
        ASSERT_SENTINEL(zv, ZVH);
 +      size = zv->size + sizeof(struct zv_hdr);
 +      INVERT_SENTINEL(zv, ZVH);
 +      zs_unmap_object(pool, handle);
 +
 +      chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
        BUG_ON(chunks >= NCHUNKS);
        atomic_dec(&zv_curr_dist_counts[chunks]);
 -      size -= sizeof(*zv);
 -      BUG_ON(size == 0);
 -      INVERT_SENTINEL(zv, ZVH);
 -      page = virt_to_page(zv);
 -      offset = (unsigned long)zv & ~PAGE_MASK;
 +
        local_irq_save(flags);
 -      xv_free(xvpool, page, offset);
 +      zs_free(pool, handle);
        local_irq_restore(flags);
  }
  
 -static void zv_decompress(struct page *page, struct zv_hdr *zv)
 +static void zv_decompress(struct page *page, void *handle)
  {
 -      size_t clen = PAGE_SIZE;
 +      unsigned int clen = PAGE_SIZE;
        char *to_va;
 -      unsigned size;
        int ret;
 +      struct zv_hdr *zv;
  
 +      zv = zs_map_object(zcache_host.zspool, handle);
 +      BUG_ON(zv->size == 0);
        ASSERT_SENTINEL(zv, ZVH);
-       to_va = kmap_atomic(page, KM_USER0);
 -      size = xv_get_object_size(zv) - sizeof(*zv);
 -      BUG_ON(size == 0);
+       to_va = kmap_atomic(page);
 -      ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
 -                                      size, to_va, &clen);
 +      ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
 +                              zv->size, to_va, &clen);
-       kunmap_atomic(to_va, KM_USER0);
+       kunmap_atomic(to_va);
 -      BUG_ON(ret != LZO_E_OK);
 +      zs_unmap_object(zcache_host.zspool, handle);
 +      BUG_ON(ret);
        BUG_ON(clen != PAGE_SIZE);
  }
  
@@@ -984,8 -948,8 +984,8 @@@ int zcache_new_client(uint16_t cli_id
                goto out;
        cli->allocated = 1;
  #ifdef CONFIG_FRONTSWAP
 -      cli->xvpool = xv_create_pool();
 -      if (cli->xvpool == NULL)
 +      cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
 +      if (cli->zspool == NULL)
                goto out;
  #endif
        ret = 0;
@@@ -1168,14 -1132,14 +1168,14 @@@ static atomic_t zcache_curr_pers_pampd_
  static unsigned long zcache_curr_pers_pampd_count_max;
  
  /* forward reference */
 -static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
 +static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
  
  static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
                                struct tmem_pool *pool, struct tmem_oid *oid,
                                 uint32_t index)
  {
        void *pampd = NULL, *cdata;
 -      size_t clen;
 +      unsigned clen;
        int ret;
        unsigned long count;
        struct page *page = (struct page *)(data);
                }
                /* reject if mean compression is too poor */
                if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
 -                      total_zsize = xv_get_total_size_bytes(cli->xvpool);
 +                      total_zsize = zs_get_total_size_bytes(cli->zspool);
                        zv_mean_zsize = div_u64(total_zsize,
                                                curr_pers_pampd_count);
                        if (zv_mean_zsize > zv_max_mean_zsize) {
                                goto out;
                        }
                }
 -              pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
 +              pampd = (void *)zv_create(cli->zspool, pool->pool_id,
                                                oid, index, cdata, clen);
                if (pampd == NULL)
                        goto out;
@@@ -1282,7 -1246,7 +1282,7 @@@ static void zcache_pampd_free(void *pam
                atomic_dec(&zcache_curr_eph_pampd_count);
                BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
        } else {
 -              zv_free(cli->xvpool, (struct zv_hdr *)pampd);
 +              zv_free(cli->zspool, pampd);
                atomic_dec(&zcache_curr_pers_pampd_count);
                BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
        }
@@@ -1321,73 -1285,55 +1321,73 @@@ static struct tmem_pamops zcache_pamop
   * zcache compression/decompression and related per-cpu stuff
   */
  
 -#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
 -#define LZO_DSTMEM_PAGE_ORDER 1
 -static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
  static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
 +#define ZCACHE_DSTMEM_ORDER 1
  
 -static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
 +static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
  {
        int ret = 0;
        unsigned char *dmem = __get_cpu_var(zcache_dstmem);
 -      unsigned char *wmem = __get_cpu_var(zcache_workmem);
        char *from_va;
  
        BUG_ON(!irqs_disabled());
 -      if (unlikely(dmem == NULL || wmem == NULL))
 -              goto out;  /* no buffer, so can't compress */
 +      if (unlikely(dmem == NULL))
 +              goto out;  /* no buffer or no compressor so can't compress */
 +      *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
-       from_va = kmap_atomic(from, KM_USER0);
+       from_va = kmap_atomic(from);
        mb();
 -      ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
 -      BUG_ON(ret != LZO_E_OK);
 +      ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
 +                              out_len);
 +      BUG_ON(ret);
        *out_va = dmem;
-       kunmap_atomic(from_va, KM_USER0);
+       kunmap_atomic(from_va);
        ret = 1;
  out:
        return ret;
  }
  
 +static int zcache_comp_cpu_up(int cpu)
 +{
 +      struct crypto_comp *tfm;
 +
 +      tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
 +      if (IS_ERR(tfm))
 +              return NOTIFY_BAD;
 +      *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
 +      return NOTIFY_OK;
 +}
 +
 +static void zcache_comp_cpu_down(int cpu)
 +{
 +      struct crypto_comp *tfm;
 +
 +      tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
 +      crypto_free_comp(tfm);
 +      *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
 +}
  
  static int zcache_cpu_notifier(struct notifier_block *nb,
                                unsigned long action, void *pcpu)
  {
 -      int cpu = (long)pcpu;
 +      int ret, cpu = (long)pcpu;
        struct zcache_preload *kp;
  
        switch (action) {
        case CPU_UP_PREPARE:
 +              ret = zcache_comp_cpu_up(cpu);
 +              if (ret != NOTIFY_OK) {
 +                      pr_err("zcache: can't allocate compressor transform\n");
 +                      return ret;
 +              }
                per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
 -                      GFP_KERNEL | __GFP_REPEAT,
 -                      LZO_DSTMEM_PAGE_ORDER),
 -              per_cpu(zcache_workmem, cpu) =
 -                      kzalloc(LZO1X_MEM_COMPRESS,
 -                              GFP_KERNEL | __GFP_REPEAT);
 +                      GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
                break;
        case CPU_DEAD:
        case CPU_UP_CANCELED:
 +              zcache_comp_cpu_down(cpu);
                free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
 -                              LZO_DSTMEM_PAGE_ORDER);
 +                      ZCACHE_DSTMEM_ORDER);
                per_cpu(zcache_dstmem, cpu) = NULL;
 -              kfree(per_cpu(zcache_workmem, cpu));
 -              per_cpu(zcache_workmem, cpu) = NULL;
                kp = &per_cpu(zcache_preloads, cpu);
                while (kp->nr) {
                        kmem_cache_free(zcache_objnode_cache,
@@@ -1972,44 -1918,6 +1972,44 @@@ static int __init no_frontswap(char *s
  
  __setup("nofrontswap", no_frontswap);
  
 +static int __init enable_zcache_compressor(char *s)
 +{
 +      strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
 +      zcache_enabled = 1;
 +      return 1;
 +}
 +__setup("zcache=", enable_zcache_compressor);
 +
 +
 +static int zcache_comp_init(void)
 +{
 +      int ret = 0;
 +
 +      /* check crypto algorithm */
 +      if (*zcache_comp_name != '\0') {
 +              ret = crypto_has_comp(zcache_comp_name, 0, 0);
 +              if (!ret)
 +                      pr_info("zcache: %s not supported\n",
 +                                      zcache_comp_name);
 +      }
 +      if (!ret)
 +              strcpy(zcache_comp_name, "lzo");
 +      ret = crypto_has_comp(zcache_comp_name, 0, 0);
 +      if (!ret) {
 +              ret = 1;
 +              goto out;
 +      }
 +      pr_info("zcache: using %s compressor\n", zcache_comp_name);
 +
 +      /* alloc percpu transforms */
 +      ret = 0;
 +      zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
 +      if (!zcache_comp_pcpu_tfms)
 +              ret = 1;
 +out:
 +      return ret;
 +}
 +
  static int __init zcache_init(void)
  {
        int ret = 0;
                        pr_err("zcache: can't register cpu notifier\n");
                        goto out;
                }
 +              ret = zcache_comp_init();
 +              if (ret) {
 +                      pr_err("zcache: compressor initialization failed\n");
 +                      goto out;
 +              }
                for_each_online_cpu(cpu) {
                        void *pcpu = (void *)(long)cpu;
                        zcache_cpu_notifier(&zcache_cpu_notifier_block,
  
                old_ops = zcache_frontswap_register_ops();
                pr_info("zcache: frontswap enabled using kernel "
 -                      "transcendent memory and xvmalloc\n");
 +                      "transcendent memory and zsmalloc\n");
                if (old_ops.init != NULL)
                        pr_warning("zcache: frontswap_ops overridden");
        }
@@@ -40,7 -40,7 +40,7 @@@ static int zram_major
  struct zram *zram_devices;
  
  /* Module params (documentation at end) */
 -unsigned int zram_num_devices;
 +static unsigned int num_devices;
  
  static void zram_stat_inc(u32 *v)
  {
@@@ -135,9 -135,13 +135,9 @@@ static void zram_set_disksize(struct zr
  
  static void zram_free_page(struct zram *zram, size_t index)
  {
 -      u32 clen;
 -      void *obj;
 +      void *handle = zram->table[index].handle;
  
 -      struct page *page = zram->table[index].page;
 -      u32 offset = zram->table[index].offset;
 -
 -      if (unlikely(!page)) {
 +      if (unlikely(!handle)) {
                /*
                 * No memory is allocated for zero filled pages.
                 * Simply clear zero page flag.
        }
  
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
 -              clen = PAGE_SIZE;
 -              __free_page(page);
 +              __free_page(handle);
                zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
                zram_stat_dec(&zram->stats.pages_expand);
                goto out;
        }
  
 -      obj = kmap_atomic(page) + offset;
 -      clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
 -      kunmap_atomic(obj);
 +      zs_free(zram->mem_pool, handle);
  
 -      xv_free(zram->mem_pool, page, offset);
 -      if (clen <= PAGE_SIZE / 2)
 +      if (zram->table[index].size <= PAGE_SIZE / 2)
                zram_stat_dec(&zram->stats.good_compress);
  
  out:
 -      zram_stat64_sub(zram, &zram->stats.compr_size, clen);
 +      zram_stat64_sub(zram, &zram->stats.compr_size,
 +                      zram->table[index].size);
        zram_stat_dec(&zram->stats.pages_stored);
  
 -      zram->table[index].page = NULL;
 -      zram->table[index].offset = 0;
 +      zram->table[index].handle = NULL;
 +      zram->table[index].size = 0;
  }
  
  static void handle_zero_page(struct bio_vec *bvec)
        struct page *page = bvec->bv_page;
        void *user_mem;
  
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
        memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
  
        flush_dcache_page(page);
  }
@@@ -188,12 -195,12 +188,12 @@@ static void handle_uncompressed_page(st
        struct page *page = bvec->bv_page;
        unsigned char *user_mem, *cmem;
  
-       user_mem = kmap_atomic(page, KM_USER0);
-       cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
+       user_mem = kmap_atomic(page);
 -      cmem = kmap_atomic(zram->table[index].page);
++      cmem = kmap_atomic(zram->table[index].handle);
  
        memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
-       kunmap_atomic(cmem, KM_USER1);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(cmem);
+       kunmap_atomic(user_mem);
  
        flush_dcache_page(page);
  }
@@@ -220,7 -227,7 +220,7 @@@ static int zram_bvec_read(struct zram *
        }
  
        /* Requested page is not present in compressed area */
 -      if (unlikely(!zram->table[index].page)) {
 +      if (unlikely(!zram->table[index].handle)) {
                pr_debug("Read before write: sector=%lu, size=%u",
                         (ulong)(bio->bi_sector), bio->bi_size);
                handle_zero_page(bvec);
                }
        }
  
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
        if (!is_partial_io(bvec))
                uncmem = user_mem;
        clen = PAGE_SIZE;
  
 -      cmem = kmap_atomic(zram->table[index].page) +
 -              zram->table[index].offset;
 +      cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
  
        ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
 -                                  xv_get_object_size(cmem) - sizeof(*zheader),
 +                                  zram->table[index].size,
                                    uncmem, &clen);
  
        if (is_partial_io(bvec)) {
                kfree(uncmem);
        }
  
 -      kunmap_atomic(cmem);
 +      zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
  
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
@@@ -282,24 -290,25 +282,24 @@@ static int zram_read_before_write(struc
        unsigned char *cmem;
  
        if (zram_test_flag(zram, index, ZRAM_ZERO) ||
 -          !zram->table[index].page) {
 +          !zram->table[index].handle) {
                memset(mem, 0, PAGE_SIZE);
                return 0;
        }
  
 -      cmem = kmap_atomic(zram->table[index].page) +
 -              zram->table[index].offset;
 +      cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
  
        /* Page is stored uncompressed since it's incompressible */
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
                memcpy(mem, cmem, PAGE_SIZE);
-               kunmap_atomic(cmem, KM_USER0);
+               kunmap_atomic(cmem);
                return 0;
        }
  
        ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
 -                                  xv_get_object_size(cmem) - sizeof(*zheader),
 +                                  zram->table[index].size,
                                    mem, &clen);
 -      kunmap_atomic(cmem);
 +      zs_unmap_object(zram->mem_pool, zram->table[index].handle);
  
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
@@@ -317,7 -326,6 +317,7 @@@ static int zram_bvec_write(struct zram 
        int ret;
        u32 store_offset;
        size_t clen;
 +      void *handle;
        struct zobj_header *zheader;
        struct page *page, *page_store;
        unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
         * System overwrites unused sectors. Free memory associated
         * with this sector now.
         */
 -      if (zram->table[index].page ||
 +      if (zram->table[index].handle ||
            zram_test_flag(zram, index, ZRAM_ZERO))
                zram_free_page(zram, index);
  
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
  
        if (is_partial_io(bvec))
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
                uncmem = user_mem;
  
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem, KM_USER0);
+               kunmap_atomic(user_mem);
                if (is_partial_io(bvec))
                        kfree(uncmem);
                zram_stat_inc(&zram->stats.pages_zero);
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               zram->compress_workmem);
  
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
        if (is_partial_io(bvec))
                        kfree(uncmem);
  
                store_offset = 0;
                zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
                zram_stat_inc(&zram->stats.pages_expand);
 -              zram->table[index].page = page_store;
 +              handle = page_store;
-               src = kmap_atomic(page, KM_USER0);
-               cmem = kmap_atomic(page_store, KM_USER1);
+               src = kmap_atomic(page);
++              cmem = kmap_atomic(page_store);
                goto memstore;
        }
  
 -      if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
 -                    &zram->table[index].page, &store_offset,
 -                    GFP_NOIO | __GFP_HIGHMEM)) {
 +      handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
 +      if (!handle) {
                pr_info("Error allocating memory for compressed "
                        "page: %u, size=%zu\n", index, clen);
                ret = -ENOMEM;
                goto out;
        }
 +      cmem = zs_map_object(zram->mem_pool, handle);
  
  memstore:
 -      zram->table[index].offset = store_offset;
 -
 -      cmem = kmap_atomic(zram->table[index].page) +
 -              zram->table[index].offset;
 -
  #if 0
        /* Back-reference needed for memory defragmentation */
        if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
  
        memcpy(cmem, src, clen);
  
 -      kunmap_atomic(cmem);
 -      if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
 +      if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               kunmap_atomic(cmem, KM_USER1);
-               kunmap_atomic(src, KM_USER0);
++              kunmap_atomic(cmem);
+               kunmap_atomic(src);
 +      } else {
 +              zs_unmap_object(zram->mem_pool, handle);
 +      }
 +
 +      zram->table[index].handle = handle;
 +      zram->table[index].size = clen;
  
        /* Update stats */
        zram_stat64_add(zram, &zram->stats.compr_size, clen);
@@@ -592,20 -598,25 +592,20 @@@ void __zram_reset_device(struct zram *z
  
        /* Free all pages that are still in this zram device */
        for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
 -              struct page *page;
 -              u16 offset;
 -
 -              page = zram->table[index].page;
 -              offset = zram->table[index].offset;
 -
 -              if (!page)
 +              void *handle = zram->table[index].handle;
 +              if (!handle)
                        continue;
  
                if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
 -                      __free_page(page);
 +                      __free_page(handle);
                else
 -                      xv_free(zram->mem_pool, page, offset);
 +                      zs_free(zram->mem_pool, handle);
        }
  
        vfree(zram->table);
        zram->table = NULL;
  
 -      xv_destroy_pool(zram->mem_pool);
 +      zs_destroy_pool(zram->mem_pool);
        zram->mem_pool = NULL;
  
        /* Reset stats */
@@@ -663,7 -674,7 +663,7 @@@ int zram_init_device(struct zram *zram
        /* zram devices sort of resembles non-rotational disks */
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
  
 -      zram->mem_pool = xv_create_pool();
 +      zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
        if (!zram->mem_pool) {
                pr_err("Error creating memory pool\n");
                ret = -ENOMEM;
@@@ -779,18 -790,13 +779,18 @@@ static void destroy_device(struct zram 
                blk_cleanup_queue(zram->queue);
  }
  
 +unsigned int zram_get_num_devices(void)
 +{
 +      return num_devices;
 +}
 +
  static int __init zram_init(void)
  {
        int ret, dev_id;
  
 -      if (zram_num_devices > max_num_devices) {
 +      if (num_devices > max_num_devices) {
                pr_warning("Invalid value for num_devices: %u\n",
 -                              zram_num_devices);
 +                              num_devices);
                ret = -EINVAL;
                goto out;
        }
                goto out;
        }
  
 -      if (!zram_num_devices) {
 +      if (!num_devices) {
                pr_info("num_devices not specified. Using default: 1\n");
 -              zram_num_devices = 1;
 +              num_devices = 1;
        }
  
        /* Allocate the device array and initialize each one */
 -      pr_info("Creating %u devices ...\n", zram_num_devices);
 -      zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
 +      pr_info("Creating %u devices ...\n", num_devices);
 +      zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
        if (!zram_devices) {
                ret = -ENOMEM;
                goto unregister;
        }
  
 -      for (dev_id = 0; dev_id < zram_num_devices; dev_id++) {
 +      for (dev_id = 0; dev_id < num_devices; dev_id++) {
                ret = create_device(&zram_devices[dev_id], dev_id);
                if (ret)
                        goto free_devices;
@@@ -838,7 -844,7 +838,7 @@@ static void __exit zram_exit(void
        int i;
        struct zram *zram;
  
 -      for (i = 0; i < zram_num_devices; i++) {
 +      for (i = 0; i < num_devices; i++) {
                zram = &zram_devices[i];
  
                destroy_device(zram);
        pr_debug("Cleanup done!\n");
  }
  
 -module_param(zram_num_devices, uint, 0);
 -MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
 +module_param(num_devices, uint, 0);
 +MODULE_PARM_DESC(num_devices, "Number of zram devices");
  
  module_init(zram_init);
  module_exit(zram_exit);
diff --combined fs/exec.c
+++ b/fs/exec.c
@@@ -63,8 -63,6 +63,8 @@@
  #include <trace/events/task.h>
  #include "internal.h"
  
 +#include <trace/events/sched.h>
 +
  int core_uses_pid;
  char core_pattern[CORENAME_MAX_SIZE] = "core";
  unsigned int core_pipe_limit;
@@@ -850,7 -848,6 +850,7 @@@ static int exec_mmap(struct mm_struct *
        if (old_mm) {
                up_read(&old_mm->mmap_sem);
                BUG_ON(active_mm != old_mm);
 +              setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
                mm_update_next_owner(old_mm);
                mmput(old_mm);
                return 0;
@@@ -978,8 -975,8 +978,8 @@@ static int de_thread(struct task_struc
        sig->notify_count = 0;
  
  no_thread_group:
 -      if (current->mm)
 -              setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
 +      /* we have changed execution domain */
 +      tsk->exit_signal = SIGCHLD;
  
        exit_itimers(sig);
        flush_itimer_signals();
@@@ -1342,13 -1339,13 +1342,13 @@@ int remove_arg_zero(struct linux_binpr
                        ret = -EFAULT;
                        goto out;
                }
-               kaddr = kmap_atomic(page, KM_USER0);
+               kaddr = kmap_atomic(page);
  
                for (; offset < PAGE_SIZE && kaddr[offset];
                                offset++, bprm->p++)
                        ;
  
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(kaddr);
                put_arg_page(page);
  
                if (offset == PAGE_SIZE)
@@@ -1405,10 -1402,9 +1405,10 @@@ int search_binary_handler(struct linux_
                         */
                        bprm->recursion_depth = depth;
                        if (retval >= 0) {
 -                              if (depth == 0)
 -                                      ptrace_event(PTRACE_EVENT_EXEC,
 -                                                      old_pid);
 +                              if (depth == 0) {
 +                                      trace_sched_process_exec(current, old_pid, bprm);
 +                                      ptrace_event(PTRACE_EVENT_EXEC, old_pid);
 +                              }
                                put_binfmt(fmt);
                                allow_write_access(bprm->file);
                                if (bprm->file)
diff --combined fs/namei.c
@@@ -1374,126 -1374,6 +1374,126 @@@ static inline int can_lookup(struct ino
        return 1;
  }
  
 +/*
 + * We can do the critical dentry name comparison and hashing
 + * operations one word at a time, but we are limited to:
 + *
 + * - Architectures with fast unaligned word accesses. We could
 + *   do a "get_unaligned()" if this helps and is sufficiently
 + *   fast.
 + *
 + * - Little-endian machines (so that we can generate the mask
 + *   of low bytes efficiently). Again, we *could* do a byte
 + *   swapping load on big-endian architectures if that is not
 + *   expensive enough to make the optimization worthless.
 + *
 + * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
 + *   do not trap on the (extremely unlikely) case of a page
 + *   crossing operation.
 + *
 + * - Furthermore, we need an efficient 64-bit compile for the
 + *   64-bit case in order to generate the "number of bytes in
 + *   the final mask". Again, that could be replaced with a
 + *   efficient population count instruction or similar.
 + */
 +#ifdef CONFIG_DCACHE_WORD_ACCESS
 +
 +#ifdef CONFIG_64BIT
 +
 +/*
 + * Jan Achrenius on G+: microoptimized version of
 + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
 + * that works for the bytemasks without having to
 + * mask them first.
 + */
 +static inline long count_masked_bytes(unsigned long mask)
 +{
 +      return mask*0x0001020304050608 >> 56;
 +}
 +
 +static inline unsigned int fold_hash(unsigned long hash)
 +{
 +      hash += hash >> (8*sizeof(int));
 +      return hash;
 +}
 +
 +#else /* 32-bit case */
 +
 +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
 +static inline long count_masked_bytes(long mask)
 +{
 +      /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
 +      long a = (0x0ff0001+mask) >> 23;
 +      /* Fix the 1 for 00 case */
 +      return a & mask;
 +}
 +
 +#define fold_hash(x) (x)
 +
 +#endif
 +
 +unsigned int full_name_hash(const unsigned char *name, unsigned int len)
 +{
 +      unsigned long a, mask;
 +      unsigned long hash = 0;
 +
 +      for (;;) {
 +              a = *(unsigned long *)name;
 +              hash *= 9;
 +              if (len < sizeof(unsigned long))
 +                      break;
 +              hash += a;
 +              name += sizeof(unsigned long);
 +              len -= sizeof(unsigned long);
 +              if (!len)
 +                      goto done;
 +      }
 +      mask = ~(~0ul << len*8);
 +      hash += mask & a;
 +done:
 +      return fold_hash(hash);
 +}
 +EXPORT_SYMBOL(full_name_hash);
 +
 +#define ONEBYTES      0x0101010101010101ul
 +#define SLASHBYTES    0x2f2f2f2f2f2f2f2ful
 +#define HIGHBITS      0x8080808080808080ul
 +
 +/* Return the high bit set in the first byte that is a zero */
 +static inline unsigned long has_zero(unsigned long a)
 +{
 +      return ((a - ONEBYTES) & ~a) & HIGHBITS;
 +}
 +
 +/*
 + * Calculate the length and hash of the path component, and
 + * return the length of the component;
 + */
 +static inline unsigned long hash_name(const char *name, unsigned int *hashp)
 +{
 +      unsigned long a, mask, hash, len;
 +
 +      hash = a = 0;
 +      len = -sizeof(unsigned long);
 +      do {
 +              hash = (hash + a) * 9;
 +              len += sizeof(unsigned long);
 +              a = *(unsigned long *)(name+len);
 +              /* Do we have any NUL or '/' bytes in this word? */
 +              mask = has_zero(a) | has_zero(a ^ SLASHBYTES);
 +      } while (!mask);
 +
 +      /* The mask *below* the first high bit set */
 +      mask = (mask - 1) & ~mask;
 +      mask >>= 7;
 +      hash += a & mask;
 +      *hashp = fold_hash(hash);
 +
 +      return len + count_masked_bytes(mask);
 +}
 +
 +#else
 +
  unsigned int full_name_hash(const unsigned char *name, unsigned int len)
  {
        unsigned long hash = init_name_hash();
@@@ -1522,8 -1402,6 +1522,8 @@@ static inline unsigned long hash_name(c
        return len;
  }
  
 +#endif
 +
  /*
   * Name resolution.
   * This is the basic name resolution function, turning a pathname into
@@@ -3493,9 -3371,9 +3493,9 @@@ retry
        if (err)
                goto fail;
  
-       kaddr = kmap_atomic(page, KM_USER0);
+       kaddr = kmap_atomic(page);
        memcpy(kaddr, symname, len-1);
-       kunmap_atomic(kaddr, KM_USER0);
+       kunmap_atomic(kaddr);
  
        err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
                                                        page, fsdata);
diff --combined net/rds/ib_recv.c
@@@ -763,7 -763,7 +763,7 @@@ static void rds_ib_cong_recv(struct rds
                to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
                BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
  
-               addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
+               addr = kmap_atomic(sg_page(&frag->f_sg));
  
                src = addr + frag_off;
                dst = (void *)map->m_page_addrs[map_page] + map_off;
                        uncongested |= ~(*src) & *dst;
                        *dst++ = *src++;
                }
-               kunmap_atomic(addr, KM_SOFTIRQ0);
+               kunmap_atomic(addr);
  
                copied += to_copy;
  
@@@ -826,7 -826,7 +826,7 @@@ static void rds_ib_process_recv(struct 
  
        if (data_len < sizeof(struct rds_header)) {
                rds_ib_conn_error(conn, "incoming message "
 -                     "from %pI4 didn't inclue a "
 +                     "from %pI4 didn't include a "
                       "header, disconnecting and "
                       "reconnecting\n",
                       &conn->c_faddr);
                        rds_ib_cong_recv(conn, ibinc);
                else {
                        rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
-                                         &ibinc->ii_inc, GFP_ATOMIC,
-                                         KM_SOFTIRQ0);
+                                         &ibinc->ii_inc, GFP_ATOMIC);
                        state->ack_next = be64_to_cpu(hdr->h_sequence);
                        state->ack_next_valid = 1;
                }
diff --combined net/rds/iw_recv.c
@@@ -598,7 -598,7 +598,7 @@@ static void rds_iw_cong_recv(struct rds
                to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
                BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
  
-               addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
+               addr = kmap_atomic(frag->f_page);
  
                src = addr + frag_off;
                dst = (void *)map->m_page_addrs[map_page] + map_off;
                        uncongested |= ~(*src) & *dst;
                        *dst++ = *src++;
                }
-               kunmap_atomic(addr, KM_SOFTIRQ0);
+               kunmap_atomic(addr);
  
                copied += to_copy;
  
@@@ -661,7 -661,7 +661,7 @@@ static void rds_iw_process_recv(struct 
  
        if (byte_len < sizeof(struct rds_header)) {
                rds_iw_conn_error(conn, "incoming message "
 -                     "from %pI4 didn't inclue a "
 +                     "from %pI4 didn't include a "
                       "header, disconnecting and "
                       "reconnecting\n",
                       &conn->c_faddr);
                        rds_iw_cong_recv(conn, iwinc);
                else {
                        rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
-                                         &iwinc->ii_inc, GFP_ATOMIC,
-                                         KM_SOFTIRQ0);
+                                         &iwinc->ii_inc, GFP_ATOMIC);
                        state->ack_next = be64_to_cpu(hdr->h_sequence);
                        state->ack_next_valid = 1;
                }