qi_submit_sync(&desc, iommu);
}
+int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type, int non_present_entry_flush)
+{
+
+ struct qi_desc desc;
+
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
+ | QI_CC_GRAN(type) | QI_CC_TYPE;
+ desc.high = 0;
+
+ qi_submit_sync(&desc, iommu);
+
+ return 0;
+
+}
+
+int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ int non_present_entry_flush)
+{
+ u8 dw = 0, dr = 0;
+
+ struct qi_desc desc;
+ int ih = 0;
+
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ if (cap_write_drain(iommu->cap))
+ dw = 1;
+
+ if (cap_read_drain(iommu->cap))
+ dr = 1;
+
+ desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+ | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
+ desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+ | QI_IOTLB_AM(size_order);
+
+ qi_submit_sync(&desc, iommu);
+
+ return 0;
+
+}
+
/*
* Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces
/* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
#define DMA_TLB_MAX_SIZE (0x3f)
/* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+#define QI_IOTLB_DID(did) (((u64)did) << 16)
+#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & PAGE_MASK_4K)
+#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_IOTLB_AM(am) (((u8)am))
+
+#define QI_CC_FM(fm) (((u64)fm) << 48)
+#define QI_CC_SID(sid) (((u64)sid) << 32)
+#define QI_CC_DID(did) (((u64)did) << 16)
+#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
struct qi_desc {
u64 low, high;
};
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
+extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type, int non_present_entry_flush);
+extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ int non_present_entry_flush);
+
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
void intel_iommu_domain_exit(struct dmar_domain *domain);