Make a sweep through mm/ and convert code that uses -1 directly to using
the more appropriate NUMA_NO_NODE.
Signed-off-by: David Rientjes <rientjes@google.com>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
struct page *page;
unsigned long _address;
spinlock_t *ptl;
struct page *page;
unsigned long _address;
spinlock_t *ptl;
+ int node = NUMA_NO_NODE;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
* be more sophisticated and look at more pages,
* but isn't for now.
*/
* be more sophisticated and look at more pages,
* but isn't for now.
*/
+ if (node == NUMA_NO_NODE)
node = page_to_nid(page);
VM_BUG_ON(PageCompound(page));
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
node = page_to_nid(page);
VM_BUG_ON(PageCompound(page));
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
* the allocation to memory nodes instead
*
* preferred Try a specific node first before normal fallback.
* the allocation to memory nodes instead
*
* preferred Try a specific node first before normal fallback.
- * As a special case node -1 here means do the allocation
+ * As a special case NUMA_NO_NODE here means do the allocation
* on the local CPU. This is normally identical to default,
* but useful to set in a VMA when you have a non default
* process policy.
* on the local CPU. This is normally identical to default,
* but useful to set in a VMA when you have a non default
* process policy.
if (!pol) {
node = numa_node_id();
if (!pol) {
node = numa_node_id();
+ if (node != NUMA_NO_NODE)
pol = &preferred_node_policy[node];
/* preferred_node_policy is not initialised early in boot */
pol = &preferred_node_policy[node];
/* preferred_node_policy is not initialised early in boot */
struct mempolicy *policy;
pr_debug("setting mode %d flags %d nodes[0] %lx\n",
struct mempolicy *policy;
pr_debug("setting mode %d flags %d nodes[0] %lx\n",
- mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
+ mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
if (mode == MPOL_DEFAULT) {
if (nodes && !nodes_empty(*nodes))
if (mode == MPOL_DEFAULT) {
if (nodes && !nodes_empty(*nodes))
pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
start, start + len, mode, mode_flags,
pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
start, start + len, mode, mode_flags,
- nmask ? nodes_addr(*nmask)[0] : -1);
+ nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
vma->vm_pgoff,
sz, npol ? npol->mode : -1,
npol ? npol->flags : -1,
vma->vm_pgoff,
sz, npol ? npol->mode : -1,
npol ? npol->flags : -1,
- npol ? nodes_addr(npol->v.nodes)[0] : -1);
+ npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
if (npol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
if (npol) {
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
{
int n, val;
int min_val = INT_MAX;
{
int n, val;
int min_val = INT_MAX;
+ int best_node = NUMA_NO_NODE;
const struct cpumask *tmp = cpumask_of_node(0);
/* Use the local node if we haven't already */
const struct cpumask *tmp = cpumask_of_node(0);
/* Use the local node if we haven't already */
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
- __builtin_return_address(0));
+ return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
+ GFP_KERNEL, __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(__get_vm_area);
}
EXPORT_SYMBOL_GPL(__get_vm_area);
unsigned long start, unsigned long end,
const void *caller)
{
unsigned long start, unsigned long end,
const void *caller)
{
- return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
- caller);
+ return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
+ GFP_KERNEL, caller);
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
- -1, GFP_KERNEL, __builtin_return_address(0));
+ NUMA_NO_NODE, GFP_KERNEL,
+ __builtin_return_address(0));
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
- -1, GFP_KERNEL, caller);
+ NUMA_NO_NODE, GFP_KERNEL, caller);
* @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
- * @node: node to use for allocation or -1
+ * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* @align: desired alignment
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @align: desired alignment
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
- * @node: node to use for allocation or -1
+ * @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_node(size, 1, gfp_mask, prot, -1,
+ return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);
__builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
+ return __vmalloc_node_flags(size, NUMA_NO_NODE,
+ GFP_KERNEL | __GFP_HIGHMEM);
}
EXPORT_SYMBOL(vmalloc);
}
EXPORT_SYMBOL(vmalloc);
*/
void *vzalloc(unsigned long size)
{
*/
void *vzalloc(unsigned long size)
{
- return __vmalloc_node_flags(size, -1,
+ return __vmalloc_node_flags(size, NUMA_NO_NODE,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);
ret = __vmalloc_node(size, SHMLBA,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
ret = __vmalloc_node(size, SHMLBA,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
- PAGE_KERNEL, -1, __builtin_return_address(0));
+ PAGE_KERNEL, NUMA_NO_NODE,
+ __builtin_return_address(0));
if (ret) {
area = find_vm_area(ret);
area->flags |= VM_USERMAP;
if (ret) {
area = find_vm_area(ret);
area->flags |= VM_USERMAP;
void *vmalloc_exec(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
void *vmalloc_exec(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
}
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
}
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
void *vmalloc_32(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
void *vmalloc_32(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_32);
}
EXPORT_SYMBOL(vmalloc_32);
void *ret;
ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
void *ret;
ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
- -1, __builtin_return_address(0));
+ NUMA_NO_NODE, __builtin_return_address(0));
if (ret) {
area = find_vm_area(ret);
area->flags |= VM_USERMAP;
if (ret) {
area = find_vm_area(ret);
area->flags |= VM_USERMAP;