)
);
+TRACE_EVENT(
+ kvm_mmu_split_huge_page,
+ TP_PROTO(u64 gfn, u64 spte, int level, int errno),
+ TP_ARGS(gfn, spte, level, errno),
+
+ TP_STRUCT__entry(
+ __field(u64, gfn)
+ __field(u64, spte)
+ __field(int, level)
+ __field(int, errno)
+ ),
+
+ TP_fast_assign(
+ __entry->gfn = gfn;
+ __entry->spte = spte;
+ __entry->level = level;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("gfn %llx spte %llx level %d errno %d",
+ __entry->gfn, __entry->spte, __entry->level, __entry->errno)
+);
+
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
*/
ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
if (ret)
- return ret;
+ goto out;
/*
* tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
*/
kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE);
- return 0;
+out:
+ trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
+ return ret;
}
static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
if (!sp) {
ret = -ENOMEM;
+ trace_kvm_mmu_split_huge_page(iter.gfn,
+ iter.old_spte,
+ iter.level, ret);
break;
}
if (sp)
tdp_mmu_free_sp(sp);
-
return ret;
}