goto out;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out;
bo = mem->bo;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
- bo->tbo.ttm->pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (ret) {
pr_debug("%s: Failed to get user pages: %d\n",
__func__, ret);
return -ENOMEM;
}
- r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
if (r) {
kvfree(e->user_pages);
e->user_pages = NULL;
}
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
- bo->tbo.ttm->pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (r)
goto release_object;
#include <linux/firmware.h>
#include <linux/module.h>
-#include <linux/hmm.h>
-#include <linux/interval_tree.h>
-
#include <drm/drm.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-/**
- * struct amdgpu_mn
- *
- * @adev: amdgpu device pointer
- * @mm: process address space
- * @type: type of MMU notifier
- * @work: destruction work item
- * @node: hash table node to find structure by adev and mn
- * @lock: rw semaphore protecting the notifier nodes
- * @objects: interval tree containing amdgpu_mn_nodes
- * @mirror: HMM mirror function support
- *
- * Data for each amdgpu device and process address space.
- */
-struct amdgpu_mn {
- /* constant after initialisation */
- struct amdgpu_device *adev;
- struct mm_struct *mm;
- enum amdgpu_mn_type type;
-
- /* only used on destruction */
- struct work_struct work;
-
- /* protected by adev->mn_lock */
- struct hlist_node node;
-
- /* objects protected by lock */
- struct rw_semaphore lock;
- struct rb_root_cached objects;
-
- /* HMM mirror */
- struct hmm_mirror mirror;
-};
-
/**
* struct amdgpu_mn_node
*
#ifndef __AMDGPU_MN_H__
#define __AMDGPU_MN_H__
-/*
- * HMM mirror
- */
-struct amdgpu_mn;
-struct hmm_range;
+#include <linux/types.h>
+#include <linux/hmm.h>
+#include <linux/rwsem.h>
+#include <linux/workqueue.h>
+#include <linux/interval_tree.h>
enum amdgpu_mn_type {
AMDGPU_MN_TYPE_GFX,
AMDGPU_MN_TYPE_HSA,
};
+/**
+ * struct amdgpu_mn
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @type: type of MMU notifier
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+ * @mirror: HMM mirror function support
+ *
+ * Data for each amdgpu device and process address space.
+ */
+struct amdgpu_mn {
+ /* constant after initialisation */
+ struct amdgpu_device *adev;
+ struct mm_struct *mm;
+ enum amdgpu_mn_type type;
+
+ /* only used on destruction */
+ struct work_struct work;
+
+ /* protected by adev->mn_lock */
+ struct hlist_node node;
+
+ /* objects protected by lock */
+ struct rw_semaphore lock;
+ struct rb_root_cached objects;
+
+#ifdef CONFIG_HMM_MIRROR
+ /* HMM mirror */
+ struct hmm_mirror mirror;
+#endif
+};
+
#if defined(CONFIG_HMM_MIRROR)
void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
#define MAX_RETRY_HMM_RANGE_FAULT 16
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
+ struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
+ struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct mm_struct *mm = gtt->usertask->mm;
unsigned long start = gtt->userptr;
if (!mm) /* Happens during process shutdown */
return -ESRCH;
+ if (unlikely(!mirror)) {
+ DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
+ r = -EFAULT;
+ goto out;
+ }
+
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
#else
-static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct page **pages)
{
return -EPERM;
}