bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- /* TODO: memory partitions struct amdgpu_vm *vm = mapping->bo_va->base.vm;*/
+ struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
bool snoop = false;
bool is_local;
}
is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
num_possible_nodes() <= 1) ||
- (is_vram && adev == bo_adev /* TODO: memory partitions &&
- bo->mem_id == vm->mem_id*/);
+ (is_vram && adev == bo_adev &&
+ bo->mem_id == vm->mem_id);
snoop = true;
if (uncached) {
mtype = MTYPE_UC;
return;
}
- /* TODO: memory partitions. mem_id is hard-coded to 0 for now.
- * FIXME: Only supported on native mode for now. For carve-out, the
+ /* FIXME: Only supported on native mode for now. For carve-out, the
* NUMA affinity of the GPU/VM needs to come from the PCI info because
* memory partitions are not associated with different NUMA nodes.
*/
- if (adev->gmc.is_app_apu) {
- local_node = adev->gmc.mem_partitions[/*vm->mem_id*/0].numa.node;
+ if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
+ local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
} else {
dev_dbg(adev->dev, "Only native mode APU is supported.\n");
return;
}
nid = pfn_to_nid(addr >> PAGE_SHIFT);
dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
- /*vm->mem_id*/0, local_node, nid);
+ vm->mem_id, local_node, nid);
if (nid == local_node) {
uint64_t old_flags = *flags;
unsigned int mtype_local = MTYPE_RW;
mapping_flags |= AMDGPU_VM_MTYPE_UC;
} else if (domain == SVM_RANGE_VRAM_DOMAIN) {
/* local HBM region close to partition */
- if (bo_node->adev == node->adev /* TODO: memory partitions &&
- bo_node->mem_id == node->mem_id*/)
+ if (bo_node->adev == node->adev &&
+ (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
mapping_flags |= mtype_local;
/* local HBM region far from partition or remote XGMI GPU */
else if (svm_nodes_in_same_hive(bo_node, node))
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
- /* TODO: we still need to determine the vm_manager.vram_base_offset based on
- * the memory partition.
+ /* For dGPU mode, we use same vm_manager to allocate VRAM for
+ * different memory partition based on fpfn/lpfn, we should use
+ * same vm_manager.vram_base_offset regardless memory partition.
*/
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,