| /* |
| * IPMMU VMSA |
| * |
| * Copyright (C) 2014 Renesas Electronics Corporation |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License as published by |
| * the Free Software Foundation; version 2 of the License. |
| */ |
| |
| #include <linux/bitmap.h> |
| #include <linux/delay.h> |
| #include <linux/dma-iommu.h> |
| #include <linux/dma-mapping.h> |
| #include <linux/err.h> |
| #include <linux/export.h> |
| #include <linux/interrupt.h> |
| #include <linux/io.h> |
| #include <linux/iommu.h> |
| #include <linux/module.h> |
| #include <linux/of.h> |
| #include <linux/of_platform.h> |
| #include <linux/platform_device.h> |
| #include <linux/sizes.h> |
| #include <linux/slab.h> |
| |
| #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
| #include <asm/dma-iommu.h> |
| #include <asm/pgalloc.h> |
| #endif |
| |
| #include "io-pgtable.h" |
| |
| #define IPMMU_CTX_MAX 1 |
| |
| struct ipmmu_vmsa_device { |
| struct device *dev; |
| void __iomem *base; |
| struct iommu_device iommu; |
| |
| unsigned int num_utlbs; |
| spinlock_t lock; /* Protects ctx and domains[] */ |
| DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); |
| struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; |
| |
| struct dma_iommu_mapping *mapping; |
| }; |
| |
| struct ipmmu_vmsa_domain { |
| struct ipmmu_vmsa_device *mmu; |
| struct iommu_domain io_domain; |
| |
| struct io_pgtable_cfg cfg; |
| struct io_pgtable_ops *iop; |
| |
| unsigned int context_id; |
| struct mutex mutex; /* Protects mappings */ |
| }; |
| |
| struct ipmmu_vmsa_iommu_priv { |
| struct ipmmu_vmsa_device *mmu; |
| struct device *dev; |
| struct list_head list; |
| }; |
| |
| static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) |
| { |
| return container_of(dom, struct ipmmu_vmsa_domain, io_domain); |
| } |
| |
| static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) |
| { |
| return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL; |
| } |
| |
| #define TLB_LOOP_TIMEOUT 100 /* 100us */ |
| |
| /* ----------------------------------------------------------------------------- |
| * Registers Definition |
| */ |
| |
| #define IM_NS_ALIAS_OFFSET 0x800 |
| |
| #define IM_CTX_SIZE 0x40 |
| |
| #define IMCTR 0x0000 |
| #define IMCTR_TRE (1 << 17) |
| #define IMCTR_AFE (1 << 16) |
| #define IMCTR_RTSEL_MASK (3 << 4) |
| #define IMCTR_RTSEL_SHIFT 4 |
| #define IMCTR_TREN (1 << 3) |
| #define IMCTR_INTEN (1 << 2) |
| #define IMCTR_FLUSH (1 << 1) |
| #define IMCTR_MMUEN (1 << 0) |
| |
| #define IMCAAR 0x0004 |
| |
| #define IMTTBCR 0x0008 |
| #define IMTTBCR_EAE (1 << 31) |
| #define IMTTBCR_PMB (1 << 30) |
| #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) |
| #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) |
| #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) |
| #define IMTTBCR_SH1_MASK (3 << 28) |
| #define IMTTBCR_ORGN1_NC (0 << 26) |
| #define IMTTBCR_ORGN1_WB_WA (1 << 26) |
| #define IMTTBCR_ORGN1_WT (2 << 26) |
| #define IMTTBCR_ORGN1_WB (3 << 26) |
| #define IMTTBCR_ORGN1_MASK (3 << 26) |
| #define IMTTBCR_IRGN1_NC (0 << 24) |
| #define IMTTBCR_IRGN1_WB_WA (1 << 24) |
| #define IMTTBCR_IRGN1_WT (2 << 24) |
| #define IMTTBCR_IRGN1_WB (3 << 24) |
| #define IMTTBCR_IRGN1_MASK (3 << 24) |
| #define IMTTBCR_TSZ1_MASK (7 << 16) |
| #define IMTTBCR_TSZ1_SHIFT 16 |
| #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) |
| #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) |
| #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) |
| #define IMTTBCR_SH0_MASK (3 << 12) |
| #define IMTTBCR_ORGN0_NC (0 << 10) |
| #define IMTTBCR_ORGN0_WB_WA (1 << 10) |
| #define IMTTBCR_ORGN0_WT (2 << 10) |
| #define IMTTBCR_ORGN0_WB (3 << 10) |
| #define IMTTBCR_ORGN0_MASK (3 << 10) |
| #define IMTTBCR_IRGN0_NC (0 << 8) |
| #define IMTTBCR_IRGN0_WB_WA (1 << 8) |
| #define IMTTBCR_IRGN0_WT (2 << 8) |
| #define IMTTBCR_IRGN0_WB (3 << 8) |
| #define IMTTBCR_IRGN0_MASK (3 << 8) |
| #define IMTTBCR_SL0_LVL_2 (0 << 4) |
| #define IMTTBCR_SL0_LVL_1 (1 << 4) |
| #define IMTTBCR_TSZ0_MASK (7 << 0) |
| #define IMTTBCR_TSZ0_SHIFT O |
| |
| #define IMBUSCR 0x000c |
| #define IMBUSCR_DVM (1 << 2) |
| #define IMBUSCR_BUSSEL_SYS (0 << 0) |
| #define IMBUSCR_BUSSEL_CCI (1 << 0) |
| #define IMBUSCR_BUSSEL_IMCAAR (2 << 0) |
| #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) |
| #define IMBUSCR_BUSSEL_MASK (3 << 0) |
| |
| #define IMTTLBR0 0x0010 |
| #define IMTTUBR0 0x0014 |
| #define IMTTLBR1 0x0018 |
| #define IMTTUBR1 0x001c |
| |
| #define IMSTR 0x0020 |
| #define IMSTR_ERRLVL_MASK (3 << 12) |
| #define IMSTR_ERRLVL_SHIFT 12 |
| #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) |
| #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) |
| #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) |
| #define IMSTR_ERRCODE_MASK (7 << 8) |
| #define IMSTR_MHIT (1 << 4) |
| #define IMSTR_ABORT (1 << 2) |
| #define IMSTR_PF (1 << 1) |
| #define IMSTR_TF (1 << 0) |
| |
| #define IMMAIR0 0x0028 |
| #define IMMAIR1 0x002c |
| #define IMMAIR_ATTR_MASK 0xff |
| #define IMMAIR_ATTR_DEVICE 0x04 |
| #define IMMAIR_ATTR_NC 0x44 |
| #define IMMAIR_ATTR_WBRWA 0xff |
| #define IMMAIR_ATTR_SHIFT(n) ((n) << 3) |
| #define IMMAIR_ATTR_IDX_NC 0 |
| #define IMMAIR_ATTR_IDX_WBRWA 1 |
| #define IMMAIR_ATTR_IDX_DEV 2 |
| |
| #define IMEAR 0x0030 |
| |
| #define IMPCTR 0x0200 |
| #define IMPSTR 0x0208 |
| #define IMPEAR 0x020c |
| #define IMPMBA(n) (0x0280 + ((n) * 4)) |
| #define IMPMBD(n) (0x02c0 + ((n) * 4)) |
| |
| #define IMUCTR(n) (0x0300 + ((n) * 16)) |
| #define IMUCTR_FIXADDEN (1 << 31) |
| #define IMUCTR_FIXADD_MASK (0xff << 16) |
| #define IMUCTR_FIXADD_SHIFT 16 |
| #define IMUCTR_TTSEL_MMU(n) ((n) << 4) |
| #define IMUCTR_TTSEL_PMB (8 << 4) |
| #define IMUCTR_TTSEL_MASK (15 << 4) |
| #define IMUCTR_FLUSH (1 << 1) |
| #define IMUCTR_MMUEN (1 << 0) |
| |
| #define IMUASID(n) (0x0308 + ((n) * 16)) |
| #define IMUASID_ASID8_MASK (0xff << 8) |
| #define IMUASID_ASID8_SHIFT 8 |
| #define IMUASID_ASID0_MASK (0xff << 0) |
| #define IMUASID_ASID0_SHIFT 0 |
| |
| /* ----------------------------------------------------------------------------- |
| * Read/Write Access |
| */ |
| |
| static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) |
| { |
| return ioread32(mmu->base + offset); |
| } |
| |
| static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, |
| u32 data) |
| { |
| iowrite32(data, mmu->base + offset); |
| } |
| |
| static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) |
| { |
| return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); |
| } |
| |
| static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, |
| u32 data) |
| { |
| ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); |
| } |
| |
| /* ----------------------------------------------------------------------------- |
| * TLB and microTLB Management |
| */ |
| |
| /* Wait for any pending TLB invalidations to complete */ |
| static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) |
| { |
| unsigned int count = 0; |
| |
| while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { |
| cpu_relax(); |
| if (++count == TLB_LOOP_TIMEOUT) { |
| dev_err_ratelimited(domain->mmu->dev, |
| "TLB sync timed out -- MMU may be deadlocked\n"); |
| return; |
| } |
| udelay(1); |
| } |
| } |
| |
| static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) |
| { |
| u32 reg; |
| |
| reg = ipmmu_ctx_read(domain, IMCTR); |
| reg |= IMCTR_FLUSH; |
| ipmmu_ctx_write(domain, IMCTR, reg); |
| |
| ipmmu_tlb_sync(domain); |
| } |
| |
| /* |
| * Enable MMU translation for the microTLB. |
| */ |
| static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, |
| unsigned int utlb) |
| { |
| struct ipmmu_vmsa_device *mmu = domain->mmu; |
| |
| /* |
| * TODO: Reference-count the microTLB as several bus masters can be |
| * connected to the same microTLB. |
| */ |
| |
| /* TODO: What should we set the ASID to ? */ |
| ipmmu_write(mmu, IMUASID(utlb), 0); |
| /* TODO: Do we need to flush the microTLB ? */ |
| ipmmu_write(mmu, IMUCTR(utlb), |
| IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | |
| IMUCTR_MMUEN); |
| } |
| |
| /* |
| * Disable MMU translation for the microTLB. |
| */ |
| static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, |
| unsigned int utlb) |
| { |
| struct ipmmu_vmsa_device *mmu = domain->mmu; |
| |
| ipmmu_write(mmu, IMUCTR(utlb), 0); |
| } |
| |
| static void ipmmu_tlb_flush_all(void *cookie) |
| { |
| struct ipmmu_vmsa_domain *domain = cookie; |
| |
| ipmmu_tlb_invalidate(domain); |
| } |
| |
| static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, |
| size_t granule, bool leaf, void *cookie) |
| { |
| /* The hardware doesn't support selective TLB flush. */ |
| } |
| |
| static const struct iommu_gather_ops ipmmu_gather_ops = { |
| .tlb_flush_all = ipmmu_tlb_flush_all, |
| .tlb_add_flush = ipmmu_tlb_add_flush, |
| .tlb_sync = ipmmu_tlb_flush_all, |
| }; |
| |
| /* ----------------------------------------------------------------------------- |
| * Domain/Context Management |
| */ |
| |
| static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, |
| struct ipmmu_vmsa_domain *domain) |
| { |
| unsigned long flags; |
| int ret; |
| |
| spin_lock_irqsave(&mmu->lock, flags); |
| |
| ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); |
| if (ret != IPMMU_CTX_MAX) { |
| mmu->domains[ret] = domain; |
| set_bit(ret, mmu->ctx); |
| } |
| |
| spin_unlock_irqrestore(&mmu->lock, flags); |
| |
| return ret; |
| } |
| |
| static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, |
| unsigned int context_id) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&mmu->lock, flags); |
| |
| clear_bit(context_id, mmu->ctx); |
| mmu->domains[context_id] = NULL; |
| |
| spin_unlock_irqrestore(&mmu->lock, flags); |
| } |
| |
| static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
| { |
| u64 ttbr; |
| int ret; |
| |
| /* |
| * Allocate the page table operations. |
| * |
| * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory |
| * access, Long-descriptor format" that the NStable bit being set in a |
| * table descriptor will result in the NStable and NS bits of all child |
| * entries being ignored and considered as being set. The IPMMU seems |
| * not to comply with this, as it generates a secure access page fault |
| * if any of the NStable and NS bits isn't set when running in |
| * non-secure mode. |
| */ |
| domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; |
| domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; |
| domain->cfg.ias = 32; |
| domain->cfg.oas = 40; |
| domain->cfg.tlb = &ipmmu_gather_ops; |
| domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); |
| domain->io_domain.geometry.force_aperture = true; |
| /* |
| * TODO: Add support for coherent walk through CCI with DVM and remove |
| * cache handling. For now, delegate it to the io-pgtable code. |
| */ |
| domain->cfg.iommu_dev = domain->mmu->dev; |
| |
| /* |
| * Find an unused context. |
| */ |
| ret = ipmmu_domain_allocate_context(domain->mmu, domain); |
| if (ret == IPMMU_CTX_MAX) |
| return -EBUSY; |
| |
| domain->context_id = ret; |
| |
| domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, |
| domain); |
| if (!domain->iop) { |
| ipmmu_domain_free_context(domain->mmu, domain->context_id); |
| return -EINVAL; |
| } |
| |
| /* TTBR0 */ |
| ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
| ipmmu_ctx_write(domain, IMTTLBR0, ttbr); |
| ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); |
| |
| /* |
| * TTBCR |
| * We use long descriptors with inner-shareable WBWA tables and allocate |
| * the whole 32-bit VA space to TTBR0. |
| */ |
| ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | |
| IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | |
| IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); |
| |
| /* MAIR0 */ |
| ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); |
| |
| /* IMBUSCR */ |
| ipmmu_ctx_write(domain, IMBUSCR, |
| ipmmu_ctx_read(domain, IMBUSCR) & |
| ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); |
| |
| /* |
| * IMSTR |
| * Clear all interrupt flags. |
| */ |
| ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); |
| |
| /* |
| * IMCTR |
| * Enable the MMU and interrupt generation. The long-descriptor |
| * translation table format doesn't use TEX remapping. Don't enable AF |
| * software management as we have no use for it. Flush the TLB as |
| * required when modifying the context registers. |
| */ |
| ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); |
| |
| return 0; |
| } |
| |
| static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) |
| { |
| if (!domain->mmu) |
| return; |
| |
| /* |
| * Disable the context. Flush the TLB as required when modifying the |
| * context registers. |
| * |
| * TODO: Is TLB flush really needed ? |
| */ |
| ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); |
| ipmmu_tlb_sync(domain); |
| ipmmu_domain_free_context(domain->mmu, domain->context_id); |
| } |
| |
| /* ----------------------------------------------------------------------------- |
| * Fault Handling |
| */ |
| |
| static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) |
| { |
| const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; |
| struct ipmmu_vmsa_device *mmu = domain->mmu; |
| u32 status; |
| u32 iova; |
| |
| status = ipmmu_ctx_read(domain, IMSTR); |
| if (!(status & err_mask)) |
| return IRQ_NONE; |
| |
| iova = ipmmu_ctx_read(domain, IMEAR); |
| |
| /* |
| * Clear the error status flags. Unlike traditional interrupt flag |
| * registers that must be cleared by writing 1, this status register |
| * seems to require 0. The error address register must be read before, |
| * otherwise its value will be 0. |
| */ |
| ipmmu_ctx_write(domain, IMSTR, 0); |
| |
| /* Log fatal errors. */ |
| if (status & IMSTR_MHIT) |
| dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", |
| iova); |
| if (status & IMSTR_ABORT) |
| dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", |
| iova); |
| |
| if (!(status & (IMSTR_PF | IMSTR_TF))) |
| return IRQ_NONE; |
| |
| /* |
| * Try to handle page faults and translation faults. |
| * |
| * TODO: We need to look up the faulty device based on the I/O VA. Use |
| * the IOMMU device for now. |
| */ |
| if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) |
| return IRQ_HANDLED; |
| |
| dev_err_ratelimited(mmu->dev, |
| "Unhandled fault: status 0x%08x iova 0x%08x\n", |
| status, iova); |
| |
| return IRQ_HANDLED; |
| } |
| |
| static irqreturn_t ipmmu_irq(int irq, void *dev) |
| { |
| struct ipmmu_vmsa_device *mmu = dev; |
| irqreturn_t status = IRQ_NONE; |
| unsigned int i; |
| unsigned long flags; |
| |
| spin_lock_irqsave(&mmu->lock, flags); |
| |
| /* |
| * Check interrupts for all active contexts. |
| */ |
| for (i = 0; i < IPMMU_CTX_MAX; i++) { |
| if (!mmu->domains[i]) |
| continue; |
| if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) |
| status = IRQ_HANDLED; |
| } |
| |
| spin_unlock_irqrestore(&mmu->lock, flags); |
| |
| return status; |
| } |
| |
| /* ----------------------------------------------------------------------------- |
| * IOMMU Operations |
| */ |
| |
| static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) |
| { |
| struct ipmmu_vmsa_domain *domain; |
| |
| domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
| if (!domain) |
| return NULL; |
| |
| mutex_init(&domain->mutex); |
| |
| return &domain->io_domain; |
| } |
| |
| static void ipmmu_domain_free(struct iommu_domain *io_domain) |
| { |
| struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| |
| /* |
| * Free the domain resources. We assume that all devices have already |
| * been detached. |
| */ |
| ipmmu_domain_destroy_context(domain); |
| free_io_pgtable_ops(domain->iop); |
| kfree(domain); |
| } |
| |
| static int ipmmu_attach_device(struct iommu_domain *io_domain, |
| struct device *dev) |
| { |
| struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
| struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
| struct ipmmu_vmsa_device *mmu = priv->mmu; |
| struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| unsigned int i; |
| int ret = 0; |
| |
| if (!priv || !priv->mmu) { |
| dev_err(dev, "Cannot attach to IPMMU\n"); |
| return -ENXIO; |
| } |
| |
| mutex_lock(&domain->mutex); |
| |
| if (!domain->mmu) { |
| /* The domain hasn't been used yet, initialize it. */ |
| domain->mmu = mmu; |
| ret = ipmmu_domain_init_context(domain); |
| } else if (domain->mmu != mmu) { |
| /* |
| * Something is wrong, we can't attach two devices using |
| * different IOMMUs to the same domain. |
| */ |
| dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", |
| dev_name(mmu->dev), dev_name(domain->mmu->dev)); |
| ret = -EINVAL; |
| } else |
| dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); |
| |
| mutex_unlock(&domain->mutex); |
| |
| if (ret < 0) |
| return ret; |
| |
| for (i = 0; i < fwspec->num_ids; ++i) |
| ipmmu_utlb_enable(domain, fwspec->ids[i]); |
| |
| return 0; |
| } |
| |
| static void ipmmu_detach_device(struct iommu_domain *io_domain, |
| struct device *dev) |
| { |
| struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
| struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| unsigned int i; |
| |
| for (i = 0; i < fwspec->num_ids; ++i) |
| ipmmu_utlb_disable(domain, fwspec->ids[i]); |
| |
| /* |
| * TODO: Optimize by disabling the context when no device is attached. |
| */ |
| } |
| |
| static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, |
| phys_addr_t paddr, size_t size, int prot) |
| { |
| struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| |
| if (!domain) |
| return -ENODEV; |
| |
| return domain->iop->map(domain->iop, iova, paddr, size, prot); |
| } |
| |
| static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, |
| size_t size) |
| { |
| struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| |
| return domain->iop->unmap(domain->iop, iova, size); |
| } |
| |
| static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, |
| dma_addr_t iova) |
| { |
| struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| |
| /* TODO: Is locking needed ? */ |
| |
| return domain->iop->iova_to_phys(domain->iop, iova); |
| } |
| |
| static int ipmmu_init_platform_device(struct device *dev, |
| struct of_phandle_args *args) |
| { |
| struct platform_device *ipmmu_pdev; |
| struct ipmmu_vmsa_iommu_priv *priv; |
| |
| ipmmu_pdev = of_find_device_by_node(args->np); |
| if (!ipmmu_pdev) |
| return -ENODEV; |
| |
| priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| if (!priv) |
| return -ENOMEM; |
| |
| priv->mmu = platform_get_drvdata(ipmmu_pdev); |
| priv->dev = dev; |
| dev->iommu_fwspec->iommu_priv = priv; |
| return 0; |
| } |
| |
| static int ipmmu_of_xlate(struct device *dev, |
| struct of_phandle_args *spec) |
| { |
| iommu_fwspec_add_ids(dev, spec->args, 1); |
| |
| /* Initialize once - xlate() will call multiple times */ |
| if (to_priv(dev)) |
| return 0; |
| |
| return ipmmu_init_platform_device(dev, spec); |
| } |
| |
| #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
| |
| static struct iommu_domain *ipmmu_domain_alloc(unsigned type) |
| { |
| if (type != IOMMU_DOMAIN_UNMANAGED) |
| return NULL; |
| |
| return __ipmmu_domain_alloc(type); |
| } |
| |
| static int ipmmu_add_device(struct device *dev) |
| { |
| struct ipmmu_vmsa_device *mmu = NULL; |
| struct iommu_group *group; |
| int ret; |
| |
| /* |
| * Only let through devices that have been verified in xlate() |
| */ |
| if (!to_priv(dev)) |
| return -ENODEV; |
| |
| /* Create a device group and add the device to it. */ |
| group = iommu_group_alloc(); |
| if (IS_ERR(group)) { |
| dev_err(dev, "Failed to allocate IOMMU group\n"); |
| ret = PTR_ERR(group); |
| goto error; |
| } |
| |
| ret = iommu_group_add_device(group, dev); |
| iommu_group_put(group); |
| |
| if (ret < 0) { |
| dev_err(dev, "Failed to add device to IPMMU group\n"); |
| group = NULL; |
| goto error; |
| } |
| |
| /* |
| * Create the ARM mapping, used by the ARM DMA mapping core to allocate |
| * VAs. This will allocate a corresponding IOMMU domain. |
| * |
| * TODO: |
| * - Create one mapping per context (TLB). |
| * - Make the mapping size configurable ? We currently use a 2GB mapping |
| * at a 1GB offset to ensure that NULL VAs will fault. |
| */ |
| mmu = to_priv(dev)->mmu; |
| if (!mmu->mapping) { |
| struct dma_iommu_mapping *mapping; |
| |
| mapping = arm_iommu_create_mapping(&platform_bus_type, |
| SZ_1G, SZ_2G); |
| if (IS_ERR(mapping)) { |
| dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); |
| ret = PTR_ERR(mapping); |
| goto error; |
| } |
| |
| mmu->mapping = mapping; |
| } |
| |
| /* Attach the ARM VA mapping to the device. */ |
| ret = arm_iommu_attach_device(dev, mmu->mapping); |
| if (ret < 0) { |
| dev_err(dev, "Failed to attach device to VA mapping\n"); |
| goto error; |
| } |
| |
| return 0; |
| |
| error: |
| if (mmu) |
| arm_iommu_release_mapping(mmu->mapping); |
| |
| if (!IS_ERR_OR_NULL(group)) |
| iommu_group_remove_device(dev); |
| |
| return ret; |
| } |
| |
| static void ipmmu_remove_device(struct device *dev) |
| { |
| arm_iommu_detach_device(dev); |
| iommu_group_remove_device(dev); |
| } |
| |
| static const struct iommu_ops ipmmu_ops = { |
| .domain_alloc = ipmmu_domain_alloc, |
| .domain_free = ipmmu_domain_free, |
| .attach_dev = ipmmu_attach_device, |
| .detach_dev = ipmmu_detach_device, |
| .map = ipmmu_map, |
| .unmap = ipmmu_unmap, |
| .map_sg = default_iommu_map_sg, |
| .iova_to_phys = ipmmu_iova_to_phys, |
| .add_device = ipmmu_add_device, |
| .remove_device = ipmmu_remove_device, |
| .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
| .of_xlate = ipmmu_of_xlate, |
| }; |
| |
| #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ |
| |
| #ifdef CONFIG_IOMMU_DMA |
| |
| static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); |
| static LIST_HEAD(ipmmu_slave_devices); |
| |
| static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) |
| { |
| struct iommu_domain *io_domain = NULL; |
| |
| switch (type) { |
| case IOMMU_DOMAIN_UNMANAGED: |
| io_domain = __ipmmu_domain_alloc(type); |
| break; |
| |
| case IOMMU_DOMAIN_DMA: |
| io_domain = __ipmmu_domain_alloc(type); |
| if (io_domain) |
| iommu_get_dma_cookie(io_domain); |
| break; |
| } |
| |
| return io_domain; |
| } |
| |
| static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) |
| { |
| switch (io_domain->type) { |
| case IOMMU_DOMAIN_DMA: |
| iommu_put_dma_cookie(io_domain); |
| /* fall-through */ |
| default: |
| ipmmu_domain_free(io_domain); |
| break; |
| } |
| } |
| |
| static int ipmmu_add_device_dma(struct device *dev) |
| { |
| struct iommu_group *group; |
| |
| /* |
| * Only let through devices that have been verified in xlate() |
| */ |
| if (!to_priv(dev)) |
| return -ENODEV; |
| |
| group = iommu_group_get_for_dev(dev); |
| if (IS_ERR(group)) |
| return PTR_ERR(group); |
| |
| spin_lock(&ipmmu_slave_devices_lock); |
| list_add(&to_priv(dev)->list, &ipmmu_slave_devices); |
| spin_unlock(&ipmmu_slave_devices_lock); |
| return 0; |
| } |
| |
| static void ipmmu_remove_device_dma(struct device *dev) |
| { |
| struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
| |
| spin_lock(&ipmmu_slave_devices_lock); |
| list_del(&priv->list); |
| spin_unlock(&ipmmu_slave_devices_lock); |
| |
| iommu_group_remove_device(dev); |
| } |
| |
| static struct device *ipmmu_find_sibling_device(struct device *dev) |
| { |
| struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
| struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL; |
| bool found = false; |
| |
| spin_lock(&ipmmu_slave_devices_lock); |
| |
| list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) { |
| if (priv == sibling_priv) |
| continue; |
| if (sibling_priv->mmu == priv->mmu) { |
| found = true; |
| break; |
| } |
| } |
| |
| spin_unlock(&ipmmu_slave_devices_lock); |
| |
| return found ? sibling_priv->dev : NULL; |
| } |
| |
| static struct iommu_group *ipmmu_find_group_dma(struct device *dev) |
| { |
| struct iommu_group *group; |
| struct device *sibling; |
| |
| sibling = ipmmu_find_sibling_device(dev); |
| if (sibling) |
| group = iommu_group_get(sibling); |
| if (!sibling || IS_ERR(group)) |
| group = generic_device_group(dev); |
| |
| return group; |
| } |
| |
| static const struct iommu_ops ipmmu_ops = { |
| .domain_alloc = ipmmu_domain_alloc_dma, |
| .domain_free = ipmmu_domain_free_dma, |
| .attach_dev = ipmmu_attach_device, |
| .detach_dev = ipmmu_detach_device, |
| .map = ipmmu_map, |
| .unmap = ipmmu_unmap, |
| .map_sg = default_iommu_map_sg, |
| .iova_to_phys = ipmmu_iova_to_phys, |
| .add_device = ipmmu_add_device_dma, |
| .remove_device = ipmmu_remove_device_dma, |
| .device_group = ipmmu_find_group_dma, |
| .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
| .of_xlate = ipmmu_of_xlate, |
| }; |
| |
| #endif /* CONFIG_IOMMU_DMA */ |
| |
| /* ----------------------------------------------------------------------------- |
| * Probe/remove and init |
| */ |
| |
| static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) |
| { |
| unsigned int i; |
| |
| /* Disable all contexts. */ |
| for (i = 0; i < 4; ++i) |
| ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); |
| } |
| |
| static int ipmmu_probe(struct platform_device *pdev) |
| { |
| struct ipmmu_vmsa_device *mmu; |
| struct resource *res; |
| int irq; |
| int ret; |
| |
| mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); |
| if (!mmu) { |
| dev_err(&pdev->dev, "cannot allocate device data\n"); |
| return -ENOMEM; |
| } |
| |
| mmu->dev = &pdev->dev; |
| mmu->num_utlbs = 32; |
| spin_lock_init(&mmu->lock); |
| bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); |
| |
| /* Map I/O memory and request IRQ. */ |
| res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| mmu->base = devm_ioremap_resource(&pdev->dev, res); |
| if (IS_ERR(mmu->base)) |
| return PTR_ERR(mmu->base); |
| |
| /* |
| * The IPMMU has two register banks, for secure and non-secure modes. |
| * The bank mapped at the beginning of the IPMMU address space |
| * corresponds to the running mode of the CPU. When running in secure |
| * mode the non-secure register bank is also available at an offset. |
| * |
| * Secure mode operation isn't clearly documented and is thus currently |
| * not implemented in the driver. Furthermore, preliminary tests of |
| * non-secure operation with the main register bank were not successful. |
| * Offset the registers base unconditionally to point to the non-secure |
| * alias space for now. |
| */ |
| mmu->base += IM_NS_ALIAS_OFFSET; |
| |
| irq = platform_get_irq(pdev, 0); |
| if (irq < 0) { |
| dev_err(&pdev->dev, "no IRQ found\n"); |
| return irq; |
| } |
| |
| ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, |
| dev_name(&pdev->dev), mmu); |
| if (ret < 0) { |
| dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); |
| return ret; |
| } |
| |
| ipmmu_device_reset(mmu); |
| |
| ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, |
| dev_name(&pdev->dev)); |
| if (ret) |
| return ret; |
| |
| iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); |
| iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode); |
| |
| ret = iommu_device_register(&mmu->iommu); |
| if (ret) |
| return ret; |
| |
| /* |
| * We can't create the ARM mapping here as it requires the bus to have |
| * an IOMMU, which only happens when bus_set_iommu() is called in |
| * ipmmu_init() after the probe function returns. |
| */ |
| |
| platform_set_drvdata(pdev, mmu); |
| |
| return 0; |
| } |
| |
| static int ipmmu_remove(struct platform_device *pdev) |
| { |
| struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); |
| |
| iommu_device_sysfs_remove(&mmu->iommu); |
| iommu_device_unregister(&mmu->iommu); |
| |
| #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
| arm_iommu_release_mapping(mmu->mapping); |
| #endif |
| |
| ipmmu_device_reset(mmu); |
| |
| return 0; |
| } |
| |
| static const struct of_device_id ipmmu_of_ids[] = { |
| { .compatible = "renesas,ipmmu-vmsa", }, |
| { } |
| }; |
| |
| static struct platform_driver ipmmu_driver = { |
| .driver = { |
| .name = "ipmmu-vmsa", |
| .of_match_table = of_match_ptr(ipmmu_of_ids), |
| }, |
| .probe = ipmmu_probe, |
| .remove = ipmmu_remove, |
| }; |
| |
| static int __init ipmmu_init(void) |
| { |
| int ret; |
| |
| ret = platform_driver_register(&ipmmu_driver); |
| if (ret < 0) |
| return ret; |
| |
| if (!iommu_present(&platform_bus_type)) |
| bus_set_iommu(&platform_bus_type, &ipmmu_ops); |
| |
| return 0; |
| } |
| |
| static void __exit ipmmu_exit(void) |
| { |
| return platform_driver_unregister(&ipmmu_driver); |
| } |
| |
| subsys_initcall(ipmmu_init); |
| module_exit(ipmmu_exit); |
| |
| MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); |
| MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); |
| MODULE_LICENSE("GPL v2"); |