queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
err = queue_pages_pgd_range(vma, start, endvma, nodes,
flags, private);
err = queue_pages_pgd_range(vma, start, endvma, nodes,
flags, private);
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
- vma = queue_pages_range(mm, start, end, nmask,
+ err = queue_pages_range(mm, start, end, nmask,
err = mbind_range(mm, start, end, new);
if (!err) {
err = mbind_range(mm, start, end, new);
if (!err) {
- nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma,
- MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+ nr_failed = migrate_pages(&pagelist, new_page,
+ start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
if (new->flags & MPOL_F_REBINDING)
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
if (new->flags & MPOL_F_REBINDING)