intel-iommu: PMEN support
Add support for protected memory enable bits by clearing them if they are
set at startup time. Some future boot loaders or firmware could have this
bit set after it loads the kernel, and it needs to be cleared if DMA's are
going to happen effectively.
Signed-off-by: mark gross <mgross@intel.com>
Acked-by: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 31fa6c9..585e188 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -692,6 +692,23 @@
DMA_TLB_PSI_FLUSH, non_present_entry_flush);
}
+static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+{
+ u32 pmen;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+ pmen = readl(iommu->reg + DMAR_PMEN_REG);
+ pmen &= ~DMA_PMEN_EPM;
+ writel(pmen, iommu->reg + DMAR_PMEN_REG);
+
+ /* wait for the protected region status bit to clear */
+ IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
+ readl, !(pmen & DMA_PMEN_PRS), pmen);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
static int iommu_enable_translation(struct intel_iommu *iommu)
{
u32 sts;
@@ -745,7 +762,7 @@
"non-zero reserved fields in PTE",
"Unknown"
};
-#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) - 1
+#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
char *dmar_get_fault_reason(u8 fault_reason)
{
@@ -1730,6 +1747,8 @@
iommu_flush_context_global(iommu, 0);
iommu_flush_iotlb_global(iommu, 0);
+ iommu_disable_protect_mem_regions(iommu);
+
ret = iommu_enable_translation(iommu);
if (ret)
goto error;