5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+Please note that unlike the global swappiness, memcg knob set to 0
+really prevents from any swapping even if there is a swap storage
+available. This might lead to memcg OOM killer if there are no file
+pages to reclaim.
Following cgroups' swappiness can't be changed.
- root cgroup (uses /proc/sys/vm/swappiness).
This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
-PROMISC mdoe.
+PROMISC mode.
high_memory = __va(max_low_pfn * PAGE_SIZE);
- reset_zone_present_pages();
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/irqflags.h>
#include <asm/bcache.h>
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
- tasklet_enable(&jme->linkch_task);
- tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
+ (unsigned long) jme);
rc = jme_request_irq(jme);
if (rc)
tasklet_init(&jme->pcc_task,
jme_pcc_tasklet,
(unsigned long) jme);
- tasklet_init(&jme->linkch_task,
- jme_link_change_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->txclean_task,
- jme_tx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxclean_task,
- jme_rx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxempty_task,
- jme_rx_empty_tasklet,
- (unsigned long) jme);
- tasklet_disable_nosync(&jme->linkch_task);
- tasklet_disable_nosync(&jme->txclean_task);
- tasklet_disable_nosync(&jme->rxclean_task);
- tasklet_disable_nosync(&jme->rxempty_task);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- tasklet_enable(&hw_priv->rx_tasklet);
- tasklet_enable(&hw_priv->tx_tasklet);
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
hw->promiscuous = 0;
hw->all_multi = 0;
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
- /* tasklet is enabled. */
- tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
- (unsigned long) hw_priv);
- tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
- (unsigned long) hw_priv);
-
- /* tasklet_enable will decrement the atomic counter. */
- tasklet_disable(&hw_priv->rx_tasklet);
- tasklet_disable(&hw_priv->tx_tasklet);
-
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);
static int __devinit smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- unsigned int byte_test;
+ unsigned int byte_test, mask;
unsigned int to = 100;
SMSC_TRACE(pdata, probe, "Driver Parameters:");
/*
* poll the READY bit in PMT_CTRL. Any other access to the device is
* forbidden while this bit isn't set. Try for 100ms
+ *
+ * Note that this test is done before the WORD_SWAP register is
+ * programmed. So in some configurations the READY bit is at 16 before
+ * WORD_SWAP is written to. This issue is worked around by waiting
+ * until either bit 0 or bit 16 gets set in PMT_CTRL.
+ *
+ * SMSC has confirmed that checking bit 16 (marked as reserved in
+ * the datasheet) is fine since these bits "will either never be set
+ * or can only go high after READY does (so also indicate the device
+ * is ready)".
*/
- while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+
+ mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
udelay(1000);
+
if (to == 0) {
pr_err("Device not READY in 100ms aborting\n");
return -ENODEV;
ingress_irq = rc;
tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
- 0, NULL, NULL);
+ 0, "tile_net", NULL);
if (rc != 0) {
netdev_err(dev, "request_irq failed: %d\n", rc);
destroy_irq(ingress_irq);
phy_start(lp->phy_dev);
}
+ /* Enable tasklets for Axi DMA error handling */
+ tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
+ (unsigned long) lp);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
if (ret)
ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
if (ret)
goto err_rx_irq;
- /* Enable tasklets for Axi DMA error handling */
- tasklet_enable(&lp->dma_err_tasklet);
+
return 0;
err_rx_irq:
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
+ tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
goto err_iounmap_2;
}
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
- tasklet_disable(&lp->dma_err_tasklet);
-
return 0;
err_iounmap_2:
struct mdiobb_ctrl *ctrl = bus->priv;
module_put(ctrl->ops->owner);
- mdiobus_unregister(bus);
mdiobus_free(bus);
}
EXPORT_SYMBOL(free_mdio_bitbang);
(ctx->ether_desc == NULL) || (ctx->control != intf))
goto error;
- /* claim interfaces, if any */
- temp = usb_driver_claim_interface(driver, ctx->data, dev);
- if (temp)
- goto error;
+ /* claim data interface, if different from control */
+ if (ctx->data != ctx->control) {
+ temp = usb_driver_claim_interface(driver, ctx->data, dev);
+ if (temp)
+ goto error;
+ }
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
tasklet_kill(&ctx->bh);
+ /* handle devices with combined control and data interface */
+ if (ctx->control == ctx->data)
+ ctx->data = NULL;
+
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
.driver_info = (unsigned long) &wwan_info,
},
+ /* Huawei NCM devices disguised as vendor specific */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_READ_;
+ addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
+ addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
/*
- * VXLAN: Virtual eXtensiable Local Area Network
+ * VXLAN: Virtual eXtensible Local Area Network
*
* Copyright (c) 2012 Vyatta Inc.
*
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
-/* VLAN + IP header + UDP + VXLAN */
-#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+ /* update header length based on lower device */
+ dev->hard_header_len = lowerdev->hard_header_len +
+ VXLAN_HEADROOM;
}
if (data[IFLA_VXLAN_TOS])
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
-#ifndef CONFIG_BRCMFISCAN
+#ifndef CONFIG_BRCMISCAN
/* scheduled scan settings */
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwlagn_tx_skb(priv, control->sta, skb))
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ spin_lock_irqsave(&rxq->lock, flags);
+ list_add(&rxb->list, &rxq->rx_used);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, trans_pcie->rx_page_order);
+ return;
+ }
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ /*
+ * free the page(s) as well to not break
+ * the invariant that the items on the used
+ * list have no page(s)
+ */
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
/**
* rio_map_inb_region -- Map inbound memory region.
* @mport: Master port.
- * @lstart: physical address of memory region to be mapped
+ * @local: physical address of memory region to be mapped
* @rbase: RIO base address assigned to this window
* @size: Size of the memory region
* @rflags: Flags for mapping.
QETH_DBF_TEXT(SETUP, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
+
+ switch (cmd->hdr.return_code) {
+ case IPA_RC_NOTSUPP:
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+ card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
+ card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
+ return -0;
+ default:
+ if (cmd->hdr.return_code) {
+ QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
+ "rc=%d\n",
+ dev_name(&card->gdev->dev),
+ cmd->hdr.return_code);
+ return 0;
+ }
+ }
+
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- } else {
+ } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- }
+ } else
+ QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
+ "\n", dev_name(&card->gdev->dev));
QETH_DBF_TEXT(SETUP, 2, "suppenbl");
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
QETH_DBF_TEXT(SETUP, 2, "doL2init");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
- rc = qeth_query_setadapterparms(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
- "device %s: x%x\n", CARD_BUS_ID(card), rc);
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "could not query adapter "
+ "parameters on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ }
}
if (card->info.type == QETH_CARD_TYPE_IQD ||
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc)
+ if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
-extern void reset_zone_present_pages(void);
-extern void fixup_zone_present_pages(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
unsigned long size,
enum memmap_context context);
-extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+extern void lruvec_init(struct lruvec *lruvec);
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
{
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
+ * @switches: List of switches in this netowrk
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
+ * @destid_table: destID allocation table
*/
struct rio_net {
struct list_head node; /* node in list of networks */
int order = ilog2(BITS_PER_LONG);
__free_pages_bootmem(pfn_to_page(start), order);
- fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
- start, start + BITS_PER_LONG);
count += BITS_PER_LONG;
start += BITS_PER_LONG;
} else {
if (vec & 1) {
page = pfn_to_page(start + off);
__free_pages_bootmem(page, 0);
- fixup_zone_present_pages(
- page_to_nid(page),
- start + off, start + off + 1);
count++;
}
vec >>= 1;
pages = bdata->node_low_pfn - bdata->node_min_pfn;
pages = bootmem_bootmap_pages(pages);
count += pages;
- while (pages--) {
- fixup_zone_present_pages(page_to_nid(page),
- page_to_pfn(page), page_to_pfn(page) + 1);
+ while (pages--)
__free_pages_bootmem(page++, 0);
- }
bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
{
unsigned long addr = (unsigned long)vaddr;
- if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+ if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
return pte_page(pkmap_page_table[i]);
}
struct mem_cgroup *memcg)
{
struct mem_cgroup_per_zone *mz;
+ struct lruvec *lruvec;
- if (mem_cgroup_disabled())
- return &zone->lruvec;
+ if (mem_cgroup_disabled()) {
+ lruvec = &zone->lruvec;
+ goto out;
+ }
mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
- return &mz->lruvec;
+ lruvec = &mz->lruvec;
+out:
+ /*
+ * Since a node can be onlined after the mem_cgroup was created,
+ * we have to be prepared to initialize lruvec->zone here;
+ * and if offlined then reonlined, we need to reinitialize it.
+ */
+ if (unlikely(lruvec->zone != zone))
+ lruvec->zone = zone;
+ return lruvec;
}
/*
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc;
+ struct lruvec *lruvec;
- if (mem_cgroup_disabled())
- return &zone->lruvec;
+ if (mem_cgroup_disabled()) {
+ lruvec = &zone->lruvec;
+ goto out;
+ }
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
- return &mz->lruvec;
+ lruvec = &mz->lruvec;
+out:
+ /*
+ * Since a node can be onlined after the mem_cgroup was created,
+ * we have to be prepared to initialize lruvec->zone here;
+ * and if offlined then reonlined, we need to reinitialize it.
+ */
+ if (unlikely(lruvec->zone != zone))
+ lruvec->zone = zone;
+ return lruvec;
}
/**
static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
{
u64 limit;
- u64 memsw;
limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
- limit += total_swap_pages << PAGE_SHIFT;
- memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
/*
- * If memsw is finite and limits the amount of swap space available
- * to this memcg, return that limit.
+ * Do not consider swap space if we cannot swap due to swappiness
*/
- return min(limit, memsw);
+ if (mem_cgroup_swappiness(memcg)) {
+ u64 memsw;
+
+ limit += total_swap_pages << PAGE_SHIFT;
+ memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+
+ /*
+ * If memsw is finite and limits the amount of swap space
+ * available to this memcg, return that limit.
+ */
+ limit = min(limit, memsw);
+ }
+
+ return limit;
}
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
int node, int zid, enum lru_list lru)
{
- struct mem_cgroup_per_zone *mz;
+ struct lruvec *lruvec;
unsigned long flags, loop;
struct list_head *list;
struct page *busy;
struct zone *zone;
zone = &NODE_DATA(node)->node_zones[zid];
- mz = mem_cgroup_zoneinfo(memcg, node, zid);
- list = &mz->lruvec.lists[lru];
+ lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+ list = &lruvec->lists[lru];
- loop = mz->lru_size[lru];
+ loop = mem_cgroup_get_lru_size(lruvec, lru);
/* give some margin against EBUSY etc...*/
loop += 256;
busy = NULL;
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
- lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
+ lruvec_init(&mz->lruvec);
mz->usage_in_excess = 0;
mz->on_tree = false;
mz->memcg = memcg;
int ret = 0;
int page_mkwrite = 0;
struct page *dirty_page = NULL;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
- bool mmun_called = false; /* For mmu_notifiers */
+ unsigned long mmun_start = 0; /* For mmu_notifiers */
+ unsigned long mmun_end = 0; /* For mmu_notifiers */
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
goto oom_free_new;
mmun_start = address & PAGE_MASK;
- mmun_end = (address & PAGE_MASK) + PAGE_SIZE;
- mmun_called = true;
+ mmun_end = mmun_start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
page_cache_release(new_page);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (mmun_called)
+ if (mmun_end > mmun_start)
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
void __ref put_page_bootmem(struct page *page)
{
unsigned long type;
- struct zone *zone;
type = (unsigned long) page->lru.next;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
__free_pages_bootmem(page, 0);
-
- zone = page_zone(page);
- zone_span_writelock(zone);
- zone->present_pages++;
- zone_span_writeunlock(zone);
- totalram_pages++;
}
}
struct vm_area_struct *vma = mm->mmap;
while (vma) {
struct anon_vma_chain *avc;
+ vma_lock_anon_vma(vma);
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
+ vma_unlock_anon_vma(vma);
vma = vma->vm_next;
i++;
}
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+void lruvec_init(struct lruvec *lruvec)
{
enum lru_list lru;
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
-
-#ifdef CONFIG_MEMCG
- lruvec->zone = zone;
-#endif
}
return 0;
__free_pages_memory(start_pfn, end_pfn);
- fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT),
- start_pfn, end_pfn);
return end_pfn - start_pfn;
}
phys_addr_t start, end, size;
u64 i;
- reset_zone_present_pages();
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
count += __free_memory_core(start, end);
zone->zone_pgdat = pgdat;
zone_pcp_init(zone);
- lruvec_init(&zone->lruvec, zone);
+ lruvec_init(&zone->lruvec);
if (!size)
continue;
dump_page_flags(page->flags);
mem_cgroup_print_bad_page(page);
}
-
-/* reset zone->present_pages */
-void reset_zone_present_pages(void)
-{
- struct zone *z;
- int i, nid;
-
- for_each_node_state(nid, N_HIGH_MEMORY) {
- for (i = 0; i < MAX_NR_ZONES; i++) {
- z = NODE_DATA(nid)->node_zones + i;
- z->present_pages = 0;
- }
- }
-}
-
-/* calculate zone's present pages in buddy system */
-void fixup_zone_present_pages(int nid, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- struct zone *z;
- unsigned long zone_start_pfn, zone_end_pfn;
- int i;
-
- for (i = 0; i < MAX_NR_ZONES; i++) {
- z = NODE_DATA(nid)->node_zones + i;
- zone_start_pfn = z->zone_start_pfn;
- zone_end_pfn = zone_start_pfn + z->spanned_pages;
-
- /* if the two regions intersect */
- if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn))
- z->present_pages += min(end_pfn, zone_end_pfn) -
- max(start_pfn, zone_start_pfn);
- }
-}
kfree(info->symlink);
simple_xattrs_free(&info->xattrs);
- BUG_ON(inode->i_blocks);
+ WARN_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
clear_inode(inode);
}
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, swp_to_radix_entry(swap));
- /* We already confirmed swap, and make no allocation */
- VM_BUG_ON(error);
+ /*
+ * We already confirmed swap under page lock, and make
+ * no memory allocation here, so usually no possibility
+ * of error; but free_swap_and_cache() only trylocks a
+ * page, so it is just possible that the entry has been
+ * truncated or holepunched since swap was confirmed.
+ * shmem_undo_range() will have done some of the
+ * unaccounting, now delete_from_swap_cache() will do
+ * the rest (including mem_cgroup_uncharge_swapcache).
+ * Reset swap.val? No, leave it so "failed" goes back to
+ * "repeat": reading a hole and writing should succeed.
+ */
+ if (error)
+ delete_from_swap_cache(page);
}
if (error)
goto failed;
BUG_ON(!current->mm);
pathname = getname(specialfile);
- err = PTR_ERR(pathname);
if (IS_ERR(pathname))
- goto out;
+ return PTR_ERR(pathname);
victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
err = PTR_ERR(victim);
out_dput:
filp_close(victim, NULL);
out:
+ putname(pathname);
return err;
}
return false;
}
-#ifdef CONFIG_COMPACTION
-/*
- * If compaction is deferred for sc->order then scale the number of pages
- * reclaimed based on the number of consecutive allocation failures
- */
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- struct zone *zone = lruvec_zone(lruvec);
-
- if (zone->compact_order_failed <= sc->order)
- pages_for_compaction <<= zone->compact_defer_shift;
- return pages_for_compaction;
-}
-#else
-static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- return pages_for_compaction;
-}
-#endif
-
/*
* Reclaim/compaction is used for high-order allocation requests. It reclaims
* order-0 pages before compacting the zone. should_continue_reclaim() returns
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
-
- pages_for_compaction = scale_for_compaction(pages_for_compaction,
- lruvec, sc);
inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (nr_swap_pages > 0)
inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
soft_iface->last_rx = jiffies;
+ /* Let the bridge loop avoidance check the packet. If will
+ * not handle it, we can safely push it up.
+ */
+ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
+ goto out;
+
if (orig_node)
batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
ethhdr->h_source);
if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
- /* Let the bridge loop avoidance check the packet. If will
- * not handle it, we can safely push it up.
- */
- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
- goto out;
-
netif_rx(skb);
goto out;
*/
tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
+ /* the change can carry possible "attribute" flags like the
+ * TT_CLIENT_WIFI, therefore they have to be copied in the
+ * client entry
+ */
+ tt_global_entry->common.flags |= flags;
+
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
* delete + roaming change for this originator.
memcpy(tt_change->addr, tt_common_entry->addr,
ETH_ALEN);
- tt_change->flags = BATADV_NO_FLAGS;
+ tt_change->flags = tt_common_entry->flags;
tt_count++;
tt_change++;
{
bool ret = false;
+ /* if the originator is a backbone node (meaning it belongs to the same
+ * LAN of this node) the temporary client must not be added because to
+ * reach such destination the node must use the LAN instead of the mesh
+ */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+ goto out;
+
if (!batadv_tt_global_add(bat_priv, orig_node, addr,
BATADV_TT_CLIENT_TEMP,
atomic_read(&orig_node->last_ttvn)))
if (hdev->dev_type != HCI_AMP)
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
- schedule_work(&hdev->power_on);
-
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
+ schedule_work(&hdev->power_on);
+
return id;
err_wqueue:
struct hci_dev *d;
size_t rp_len;
u16 count;
- int i, err;
+ int err;
BT_DBG("sock %p", sk);
return -ENOMEM;
}
- rp->num_controllers = cpu_to_le16(count);
-
- i = 0;
+ count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
if (!mgmt_valid_hdev(d))
continue;
- rp->index[i++] = cpu_to_le16(d->id);
+ rp->index[count++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
+ rp->num_controllers = cpu_to_le16(count);
+ rp_len = sizeof(*rp) + (2 * count);
+
read_unlock(&hci_dev_list_lock);
err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
continue;
list_del(&match->list);
+ kfree(match);
found++;
}
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
- hcon->dst_type, reason);
+ hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
cancel_delayed_work_sync(&conn->security_timer);
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
- rflow->last_qtail)) >= 0))
+ rflow->last_qtail)) >= 0)) {
+ tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+ }
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
*/
ha = list_first_entry(&dev->dev_addrs.list,
struct netdev_hw_addr, list);
- if (ha->addr == dev->dev_addr && ha->refcount == 1)
+ if (!memcmp(ha->addr, addr, dev->addr_len) &&
+ ha->type == addr_type && ha->refcount == 1)
return -ENOENT;
err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
- if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
- (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
- (1<<IP_RETOPTS) | (1<<IP_TOS) |
- (1<<IP_TTL) | (1<<IP_HDRINCL) |
- (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
- (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
- (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
- optname == IP_UNICAST_IF ||
- optname == IP_MULTICAST_TTL ||
- optname == IP_MULTICAST_ALL ||
- optname == IP_MULTICAST_LOOP ||
- optname == IP_RECVORIGDSTADDR) {
+ switch (optname) {
+ case IP_PKTINFO:
+ case IP_RECVTTL:
+ case IP_RECVOPTS:
+ case IP_RECVTOS:
+ case IP_RETOPTS:
+ case IP_TOS:
+ case IP_TTL:
+ case IP_HDRINCL:
+ case IP_MTU_DISCOVER:
+ case IP_RECVERR:
+ case IP_ROUTER_ALERT:
+ case IP_FREEBIND:
+ case IP_PASSSEC:
+ case IP_TRANSPARENT:
+ case IP_MINTTL:
+ case IP_NODEFRAG:
+ case IP_UNICAST_IF:
+ case IP_MULTICAST_TTL:
+ case IP_MULTICAST_ALL:
+ case IP_MULTICAST_LOOP:
+ case IP_RECVORIGDSTADDR:
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ return -1;
+
tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
+ skb->mark = 0;
+ secpath_reset(skb);
skb->dev = tunnel->dev;
return 1;
}
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied && likely(!tp->repair))
+ if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
}
out:
- if (copied && likely(!tp->repair))
+ if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
return copied + copied_syn;
goto discard;
}
- /* ts_recent update must be made after we are sure that the packet
- * is in window.
- */
- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
/* step 3: check security and precedence [ignored] */
/* step 4: Check for a SYN
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
goto discard;
+ /* ts_recent update must be made after we are sure that the packet
+ * is in window.
+ */
+ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */
} else
goto discard;
+ /* ts_recent update must be made after we are sure that the packet
+ * is in window.
+ */
+ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
/* step 6: check the URG bit */
tcp_urg(sk, skb, th);
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/hash.h>
#include <linux/tcp_metrics.h>
+#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
- net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
+ net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!net->ipv4.tcp_metrics_hash)
+ net->ipv4.tcp_metrics_hash = vzalloc(size);
+
if (!net->ipv4.tcp_metrics_hash)
return -ENOMEM;
tm = next;
}
}
- kfree(net->ipv4.tcp_metrics_hash);
+ if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
+ vfree(net->ipv4.tcp_metrics_hash);
+ else
+ kfree(net->ipv4.tcp_metrics_hash);
}
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
+ if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
+ goto repair; /* Skip network transmission */
+
cwnd_quota = tcp_cwnd_test(tp, skb);
if (!cwnd_quota)
break;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
+repair:
/* Advance the send_head. This one is sent out.
* This call will increment packets_out.
*/
if (val < 0 || val > 255)
goto e_inval;
np->min_hopcount = val;
+ retv = 0;
break;
case IPV6_DONTFRAG:
np->dontfrag = valbool;
else
local->probe_req_reg--;
+ if (!local->open_count)
+ break;
+
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
break;
default:
struct net_device *dev);
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
local->hw.wiphy->cipher_suites,
sizeof(u32) * local->hw.wiphy->n_cipher_suites,
GFP_KERNEL);
- if (!suites)
- return -ENOMEM;
+ if (!suites) {
+ result = -ENOMEM;
+ goto fail_wiphy_register;
+ }
for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
u32 suite = local->hw.wiphy->cipher_suites[r];
if (suite == WLAN_CIPHER_SUITE_WEP40 ||
struct cfg80211_sched_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_sched_scan_ies sched_scan_ies;
+ struct ieee80211_sched_scan_ies sched_scan_ies = {};
int ret, i;
mutex_lock(&local->mtx);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
- __skb_queue_purge(&sta->ps_tx_buf[ac]);
- __skb_queue_purge(&sta->tx_filtered[ac]);
+ ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
+ ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
}
#ifdef CONFIG_MAC80211_MESH
tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
if (!tid_tx)
continue;
- __skb_queue_purge(&tid_tx->pending);
+ ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
kfree(tid_tx);
}
struct ieee80211_local *local = sdata->local;
struct sk_buff_head pending;
int filtered = 0, buffered = 0, ac;
+ unsigned long flags;
clear_sta_flag(sta, WLAN_STA_SP);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
+ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
+ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
tmp = skb_queue_len(&pending);
filtered += tmp - count;
count = tmp;
+ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
+ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
tmp = skb_queue_len(&pending);
buffered += tmp - count;
}
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ieee80211_free_txskb);
+
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(skbs)))
+ ieee80211_free_txskb(hw, skb);
+}
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
- __skb_queue_purge(&tx->skbs);
+ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{
+ struct sk_buff *skb;
int i;
- for (i = 0; i < local->hw.queues; i++)
- skb_queue_purge(&local->pending[i]);
+ for (i = 0; i < local->hw.queues; i++) {
+ while ((skb = skb_dequeue(&local->pending[i])) != NULL)
+ ieee80211_free_txskb(&local->hw, skb);
+ }
}
/*
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
+ if (!sdata->u.mgd.associated)
+ continue;
ieee80211_send_nullfunc(local, sdata, 0);
}
.open = sctp_snmp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .release = single_release_net,
};
/* Set up the proc fs entry for 'snmp' object. */
.open = sctp_eps_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
/* Set up the proc fs entry for 'eps' object. */
.open = sctp_assocs_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
/* Set up the proc fs entry for 'assocs' object. */
.open = sctp_remaddr_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_net,
};
int __net_init sctp_remaddr_proc_init(struct net *net)
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
- /* IEEE 802.11b/g, channels 12..13. No HT40
- * channel fits here. */
- REG_RULE(2467-10, 2472+10, 20, 6, 20,
+ /* IEEE 802.11b/g, channels 12..13. */
+ REG_RULE(2467-10, 2472+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* IEEE 802.11 channel 14 - Only JP enables