Merge branch 'pci/host-vmd' into next

* pci/host-vmd:
  x86/PCI: Add driver for Intel Volume Management Device (VMD)
  PCI/AER: Use 32 bit PCI domain numbers
  x86/PCI: Allow DMA ops specific to a PCI domain
  irqdomain: Export irq_domain_set_info() for module use
  genirq/MSI: Relax msi_domain_alloc() to support parentless MSI irqdomains
diff --git a/MAINTAINERS b/MAINTAINERS
index 9ea7817..94544db 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8216,6 +8216,12 @@
 F:	Documentation/devicetree/bindings/pci/host-generic-pci.txt
 F:	drivers/pci/host/pci-host-generic.c
 
+PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
+M:	Keith Busch <keith.busch@intel.com>
+L:	linux-pci@vger.kernel.org
+S:	Supported
+F:	arch/x86/pci/vmd.c
+
 PCIE DRIVER FOR ST SPEAR13XX
 M:	Pratyush Anand <pratyush.anand@gmail.com>
 L:	linux-pci@vger.kernel.org
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index db3622f..3e6aca8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2665,6 +2665,19 @@
 	def_bool y
         depends on PCI
 
+config VMD
+	depends on PCI_MSI
+	tristate "Volume Management Device Driver"
+	default N
+	---help---
+	  Adds support for the Intel Volume Management Device (VMD). VMD is a
+	  secondary PCI host bridge that allows PCI Express root ports,
+	  and devices attached to them, to be removed from the default
+	  PCI domain and placed within the VMD domain. This provides
+	  more bus resources than are otherwise possible with a
+	  single domain. If you know your system provides one of these and
+	  has devices attached to it, say Y; if you are not sure, say N.
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 03dd729..684ed6c 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -10,6 +10,16 @@
 #endif
 };
 
+#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
+struct dma_domain {
+	struct list_head node;
+	struct dma_map_ops *dma_ops;
+	int domain_nr;
+};
+void add_dma_domain(struct dma_domain *domain);
+void del_dma_domain(struct dma_domain *domain);
+#endif
+
 struct pdev_archdata {
 };
 
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 1e3408e..1815b73 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -130,6 +130,11 @@
 			char		*uv_name;
 		};
 #endif
+#if IS_ENABLED(CONFIG_VMD)
+		struct {
+			struct msi_desc *desc;
+		};
+#endif
 	};
 };
 
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 5c6fc35..97062a6 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -23,6 +23,8 @@
 obj-$(CONFIG_AMD_NB)		+= amd_bus.o
 obj-$(CONFIG_PCI_CNB20LE_QUIRK)	+= broadcom_bus.o
 
+obj-$(CONFIG_VMD) += vmd.o
+
 ifeq ($(CONFIG_PCI_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
 endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index eccd4d9..2879efc 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -641,6 +641,43 @@
 	return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
 }
 
+#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
+static LIST_HEAD(dma_domain_list);
+static DEFINE_SPINLOCK(dma_domain_list_lock);
+
+void add_dma_domain(struct dma_domain *domain)
+{
+	spin_lock(&dma_domain_list_lock);
+	list_add(&domain->node, &dma_domain_list);
+	spin_unlock(&dma_domain_list_lock);
+}
+EXPORT_SYMBOL_GPL(add_dma_domain);
+
+void del_dma_domain(struct dma_domain *domain)
+{
+	spin_lock(&dma_domain_list_lock);
+	list_del(&domain->node);
+	spin_unlock(&dma_domain_list_lock);
+}
+EXPORT_SYMBOL_GPL(del_dma_domain);
+
+static void set_dma_domain_ops(struct pci_dev *pdev)
+{
+	struct dma_domain *domain;
+
+	spin_lock(&dma_domain_list_lock);
+	list_for_each_entry(domain, &dma_domain_list, node) {
+		if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
+			pdev->dev.archdata.dma_ops = domain->dma_ops;
+			break;
+		}
+	}
+	spin_unlock(&dma_domain_list_lock);
+}
+#else
+static void set_dma_domain_ops(struct pci_dev *pdev) {}
+#endif
+
 int pcibios_add_device(struct pci_dev *dev)
 {
 	struct setup_data *data;
@@ -670,6 +707,7 @@
 		pa_data = data->next;
 		iounmap(data);
 	}
+	set_dma_domain_ops(dev);
 	return 0;
 }
 
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
new file mode 100644
index 0000000..d57e480
--- /dev/null
+++ b/arch/x86/pci/vmd.c
@@ -0,0 +1,723 @@
+/*
+ * Volume Management Device driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+#include <asm/irqdomain.h>
+#include <asm/device.h>
+#include <asm/msi.h>
+#include <asm/msidef.h>
+
+#define VMD_CFGBAR	0
+#define VMD_MEMBAR1	2
+#define VMD_MEMBAR2	4
+
+/*
+ * Lock for manipulating VMD IRQ lists.
+ */
+static DEFINE_RAW_SPINLOCK(list_lock);
+
+/**
+ * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
+ * @node:	list item for parent traversal.
+ * @rcu:	RCU callback item for freeing.
+ * @irq:	back pointer to parent.
+ * @virq:	the virtual IRQ value provided to the requesting driver.
+ *
+ * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
+ * a VMD IRQ using this structure.
+ */
+struct vmd_irq {
+	struct list_head	node;
+	struct rcu_head		rcu;
+	struct vmd_irq_list	*irq;
+	unsigned int		virq;
+};
+
+/**
+ * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
+ * @irq_list:	the list of irq's the VMD one demuxes to.
+ * @vmd_vector:	the h/w IRQ assigned to the VMD.
+ * @index:	index into the VMD MSI-X table; used for message routing.
+ * @count:	number of child IRQs assigned to this vector; used to track
+ *		sharing.
+ */
+struct vmd_irq_list {
+	struct list_head	irq_list;
+	struct vmd_dev		*vmd;
+	unsigned int		vmd_vector;
+	unsigned int		index;
+	unsigned int		count;
+};
+
+struct vmd_dev {
+	struct pci_dev		*dev;
+
+	spinlock_t		cfg_lock;
+	char __iomem		*cfgbar;
+
+	int msix_count;
+	struct msix_entry	*msix_entries;
+	struct vmd_irq_list	*irqs;
+
+	struct pci_sysdata	sysdata;
+	struct resource		resources[3];
+	struct irq_domain	*irq_domain;
+	struct pci_bus		*bus;
+
+#ifdef CONFIG_X86_DEV_DMA_OPS
+	struct dma_map_ops	dma_ops;
+	struct dma_domain	dma_domain;
+#endif
+};
+
+static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
+{
+	return container_of(bus->sysdata, struct vmd_dev, sysdata);
+}
+
+/*
+ * Drivers managing a device in a VMD domain allocate their own IRQs as before,
+ * but the MSI entry for the hardware it's driving will be programmed with a
+ * destination ID for the VMD MSI-X table.  The VMD muxes interrupts in its
+ * domain into one of its own, and the VMD driver de-muxes these for the
+ * handlers sharing that VMD IRQ.  The vmd irq_domain provides the operations
+ * and irq_chip to set this up.
+ */
+static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct vmd_irq *vmdirq = data->chip_data;
+	struct vmd_irq_list *irq = vmdirq->irq;
+
+	msg->address_hi = MSI_ADDR_BASE_HI;
+	msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(irq->index);
+	msg->data = 0;
+}
+
+/*
+ * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
+ */
+static void vmd_irq_enable(struct irq_data *data)
+{
+	struct vmd_irq *vmdirq = data->chip_data;
+
+	raw_spin_lock(&list_lock);
+	list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+	raw_spin_unlock(&list_lock);
+
+	data->chip->irq_unmask(data);
+}
+
+static void vmd_irq_disable(struct irq_data *data)
+{
+	struct vmd_irq *vmdirq = data->chip_data;
+
+	data->chip->irq_mask(data);
+
+	raw_spin_lock(&list_lock);
+	list_del_rcu(&vmdirq->node);
+	raw_spin_unlock(&list_lock);
+}
+
+/*
+ * XXX: Stubbed until we develop acceptable way to not create conflicts with
+ * other devices sharing the same vector.
+ */
+static int vmd_irq_set_affinity(struct irq_data *data,
+				const struct cpumask *dest, bool force)
+{
+	return -EINVAL;
+}
+
+static struct irq_chip vmd_msi_controller = {
+	.name			= "VMD-MSI",
+	.irq_enable		= vmd_irq_enable,
+	.irq_disable		= vmd_irq_disable,
+	.irq_compose_msi_msg	= vmd_compose_msi_msg,
+	.irq_set_affinity	= vmd_irq_set_affinity,
+};
+
+static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
+				     msi_alloc_info_t *arg)
+{
+	return 0;
+}
+
+/*
+ * XXX: We can be even smarter selecting the best IRQ once we solve the
+ * affinity problem.
+ */
+static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd)
+{
+	int i, best = 0;
+
+	raw_spin_lock(&list_lock);
+	for (i = 1; i < vmd->msix_count; i++)
+		if (vmd->irqs[i].count < vmd->irqs[best].count)
+			best = i;
+	vmd->irqs[best].count++;
+	raw_spin_unlock(&list_lock);
+
+	return &vmd->irqs[best];
+}
+
+static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+			unsigned int virq, irq_hw_number_t hwirq,
+			msi_alloc_info_t *arg)
+{
+	struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus);
+	struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+
+	if (!vmdirq)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vmdirq->node);
+	vmdirq->irq = vmd_next_irq(vmd);
+	vmdirq->virq = virq;
+
+	irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
+			    vmdirq, handle_simple_irq, vmd, NULL);
+	return 0;
+}
+
+static void vmd_msi_free(struct irq_domain *domain,
+			struct msi_domain_info *info, unsigned int virq)
+{
+	struct vmd_irq *vmdirq = irq_get_chip_data(virq);
+
+	/* XXX: Potential optimization to rebalance */
+	raw_spin_lock(&list_lock);
+	vmdirq->irq->count--;
+	raw_spin_unlock(&list_lock);
+
+	kfree_rcu(vmdirq, rcu);
+}
+
+static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
+			   int nvec, msi_alloc_info_t *arg)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+	if (nvec > vmd->msix_count)
+		return vmd->msix_count;
+
+	memset(arg, 0, sizeof(*arg));
+	return 0;
+}
+
+static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+	arg->desc = desc;
+}
+
+static struct msi_domain_ops vmd_msi_domain_ops = {
+	.get_hwirq	= vmd_get_hwirq,
+	.msi_init	= vmd_msi_init,
+	.msi_free	= vmd_msi_free,
+	.msi_prepare	= vmd_msi_prepare,
+	.set_desc	= vmd_set_desc,
+};
+
+static struct msi_domain_info vmd_msi_domain_info = {
+	.flags		= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+			  MSI_FLAG_PCI_MSIX,
+	.ops		= &vmd_msi_domain_ops,
+	.chip		= &vmd_msi_controller,
+};
+
+#ifdef CONFIG_X86_DEV_DMA_OPS
+/*
+ * VMD replaces the requester ID with its own.  DMA mappings for devices in a
+ * VMD domain need to be mapped for the VMD, not the device requiring
+ * the mapping.
+ */
+static struct device *to_vmd_dev(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+	return &vmd->dev->dev;
+}
+
+static struct dma_map_ops *vmd_dma_ops(struct device *dev)
+{
+	return to_vmd_dev(dev)->archdata.dma_ops;
+}
+
+static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
+		       gfp_t flag, struct dma_attrs *attrs)
+{
+	return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
+				       attrs);
+}
+
+static void vmd_free(struct device *dev, size_t size, void *vaddr,
+		     dma_addr_t addr, struct dma_attrs *attrs)
+{
+	return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
+				      attrs);
+}
+
+static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
+		    void *cpu_addr, dma_addr_t addr, size_t size,
+		    struct dma_attrs *attrs)
+{
+	return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
+				      size, attrs);
+}
+
+static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
+			   void *cpu_addr, dma_addr_t addr, size_t size,
+			   struct dma_attrs *attrs)
+{
+	return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
+					     addr, size, attrs);
+}
+
+static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
+			       unsigned long offset, size_t size,
+			       enum dma_data_direction dir,
+			       struct dma_attrs *attrs)
+{
+	return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
+					  dir, attrs);
+}
+
+static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
+			   enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
+}
+
+static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		      enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+}
+
+static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+			 enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+}
+
+static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+				    size_t size, enum dma_data_direction dir)
+{
+	vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
+}
+
+static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
+				       size_t size, enum dma_data_direction dir)
+{
+	vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
+						 dir);
+}
+
+static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction dir)
+{
+	vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
+}
+
+static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+				   int nents, enum dma_data_direction dir)
+{
+	vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
+}
+
+static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
+{
+	return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
+}
+
+static int vmd_dma_supported(struct device *dev, u64 mask)
+{
+	return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
+}
+
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+static u64 vmd_get_required_mask(struct device *dev)
+{
+	return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
+}
+#endif
+
+static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
+{
+	struct dma_domain *domain = &vmd->dma_domain;
+
+	if (vmd->dev->dev.archdata.dma_ops)
+		del_dma_domain(domain);
+}
+
+#define ASSIGN_VMD_DMA_OPS(source, dest, fn)	\
+	do {					\
+		if (source->fn)			\
+			dest->fn = vmd_##fn;	\
+	} while (0)
+
+static void vmd_setup_dma_ops(struct vmd_dev *vmd)
+{
+	const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops;
+	struct dma_map_ops *dest = &vmd->dma_ops;
+	struct dma_domain *domain = &vmd->dma_domain;
+
+	domain->domain_nr = vmd->sysdata.domain;
+	domain->dma_ops = dest;
+
+	if (!source)
+		return;
+	ASSIGN_VMD_DMA_OPS(source, dest, alloc);
+	ASSIGN_VMD_DMA_OPS(source, dest, free);
+	ASSIGN_VMD_DMA_OPS(source, dest, mmap);
+	ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
+	ASSIGN_VMD_DMA_OPS(source, dest, map_page);
+	ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
+	ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
+	ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
+	ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
+	ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+	ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
+#endif
+	add_dma_domain(domain);
+}
+#undef ASSIGN_VMD_DMA_OPS
+#else
+static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
+static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
+#endif
+
+static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
+				  unsigned int devfn, int reg, int len)
+{
+	char __iomem *addr = vmd->cfgbar +
+			     (bus->number << 20) + (devfn << 12) + reg;
+
+	if ((addr - vmd->cfgbar) + len >=
+	    resource_size(&vmd->dev->resource[VMD_CFGBAR]))
+		return NULL;
+
+	return addr;
+}
+
+/*
+ * CPU may deadlock if config space is not serialized on some versions of this
+ * hardware, so all config space access is done under a spinlock.
+ */
+static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
+			int len, u32 *value)
+{
+	struct vmd_dev *vmd = vmd_from_bus(bus);
+	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!addr)
+		return -EFAULT;
+
+	spin_lock_irqsave(&vmd->cfg_lock, flags);
+	switch (len) {
+	case 1:
+		*value = readb(addr);
+		break;
+	case 2:
+		*value = readw(addr);
+		break;
+	case 4:
+		*value = readl(addr);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+	return ret;
+}
+
+/*
+ * VMD h/w converts non-posted config writes to posted memory writes. The
+ * read-back in this function forces the completion so it returns only after
+ * the config space was written, as expected.
+ */
+static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
+			 int len, u32 value)
+{
+	struct vmd_dev *vmd = vmd_from_bus(bus);
+	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!addr)
+		return -EFAULT;
+
+	spin_lock_irqsave(&vmd->cfg_lock, flags);
+	switch (len) {
+	case 1:
+		writeb(value, addr);
+		readb(addr);
+		break;
+	case 2:
+		writew(value, addr);
+		readw(addr);
+		break;
+	case 4:
+		writel(value, addr);
+		readl(addr);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+	return ret;
+}
+
+static struct pci_ops vmd_ops = {
+	.read		= vmd_pci_read,
+	.write		= vmd_pci_write,
+};
+
+/*
+ * VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
+ */
+static int vmd_find_free_domain(void)
+{
+	int domain = 0xffff;
+	struct pci_bus *bus = NULL;
+
+	while ((bus = pci_find_next_bus(bus)) != NULL)
+		domain = max_t(int, domain, pci_domain_nr(bus));
+	return domain + 1;
+}
+
+static int vmd_enable_domain(struct vmd_dev *vmd)
+{
+	struct pci_sysdata *sd = &vmd->sysdata;
+	struct resource *res;
+	u32 upper_bits;
+	unsigned long flags;
+	LIST_HEAD(resources);
+
+	res = &vmd->dev->resource[VMD_CFGBAR];
+	vmd->resources[0] = (struct resource) {
+		.name  = "VMD CFGBAR",
+		.start = res->start,
+		.end   = (resource_size(res) >> 20) - 1,
+		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+	};
+
+	res = &vmd->dev->resource[VMD_MEMBAR1];
+	upper_bits = upper_32_bits(res->end);
+	flags = res->flags & ~IORESOURCE_SIZEALIGN;
+	if (!upper_bits)
+		flags &= ~IORESOURCE_MEM_64;
+	vmd->resources[1] = (struct resource) {
+		.name  = "VMD MEMBAR1",
+		.start = res->start,
+		.end   = res->end,
+		.flags = flags,
+	};
+
+	res = &vmd->dev->resource[VMD_MEMBAR2];
+	upper_bits = upper_32_bits(res->end);
+	flags = res->flags & ~IORESOURCE_SIZEALIGN;
+	if (!upper_bits)
+		flags &= ~IORESOURCE_MEM_64;
+	vmd->resources[2] = (struct resource) {
+		.name  = "VMD MEMBAR2",
+		.start = res->start + 0x2000,
+		.end   = res->end,
+		.flags = flags,
+	};
+
+	sd->domain = vmd_find_free_domain();
+	if (sd->domain < 0)
+		return sd->domain;
+
+	sd->node = pcibus_to_node(vmd->dev->bus);
+
+	vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
+						    NULL);
+	if (!vmd->irq_domain)
+		return -ENODEV;
+
+	pci_add_resource(&resources, &vmd->resources[0]);
+	pci_add_resource(&resources, &vmd->resources[1]);
+	pci_add_resource(&resources, &vmd->resources[2]);
+	vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
+				       &resources);
+	if (!vmd->bus) {
+		pci_free_resource_list(&resources);
+		irq_domain_remove(vmd->irq_domain);
+		return -ENODEV;
+	}
+
+	vmd_setup_dma_ops(vmd);
+	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+	pci_rescan_bus(vmd->bus);
+
+	WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+			       "domain"), "Can't create symlink to domain\n");
+	return 0;
+}
+
+static irqreturn_t vmd_irq(int irq, void *data)
+{
+	struct vmd_irq_list *irqs = data;
+	struct vmd_irq *vmdirq;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
+		generic_handle_irq(vmdirq->virq);
+	rcu_read_unlock();
+
+	return IRQ_HANDLED;
+}
+
+static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct vmd_dev *vmd;
+	int i, err;
+
+	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
+		return -ENOMEM;
+
+	vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
+	if (!vmd)
+		return -ENOMEM;
+
+	vmd->dev = dev;
+	err = pcim_enable_device(dev);
+	if (err < 0)
+		return err;
+
+	vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
+	if (!vmd->cfgbar)
+		return -ENOMEM;
+
+	pci_set_master(dev);
+	if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
+	    dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
+		return -ENODEV;
+
+	vmd->msix_count = pci_msix_vec_count(dev);
+	if (vmd->msix_count < 0)
+		return -ENODEV;
+
+	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
+				 GFP_KERNEL);
+	if (!vmd->irqs)
+		return -ENOMEM;
+
+	vmd->msix_entries = devm_kcalloc(&dev->dev, vmd->msix_count,
+					 sizeof(*vmd->msix_entries),
+					 GFP_KERNEL);
+	if (!vmd->msix_entries)
+		return -ENOMEM;
+	for (i = 0; i < vmd->msix_count; i++)
+		vmd->msix_entries[i].entry = i;
+
+	vmd->msix_count = pci_enable_msix_range(vmd->dev, vmd->msix_entries, 1,
+						vmd->msix_count);
+	if (vmd->msix_count < 0)
+		return vmd->msix_count;
+
+	for (i = 0; i < vmd->msix_count; i++) {
+		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
+		vmd->irqs[i].vmd_vector = vmd->msix_entries[i].vector;
+		vmd->irqs[i].index = i;
+
+		err = devm_request_irq(&dev->dev, vmd->irqs[i].vmd_vector,
+				       vmd_irq, 0, "vmd", &vmd->irqs[i]);
+		if (err)
+			return err;
+	}
+
+	spin_lock_init(&vmd->cfg_lock);
+	pci_set_drvdata(dev, vmd);
+	err = vmd_enable_domain(vmd);
+	if (err)
+		return err;
+
+	dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
+		 vmd->sysdata.domain);
+	return 0;
+}
+
+static void vmd_remove(struct pci_dev *dev)
+{
+	struct vmd_dev *vmd = pci_get_drvdata(dev);
+
+	pci_set_drvdata(dev, NULL);
+	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
+	pci_stop_root_bus(vmd->bus);
+	pci_remove_root_bus(vmd->bus);
+	vmd_teardown_dma_ops(vmd);
+	irq_domain_remove(vmd->irq_domain);
+}
+
+#ifdef CONFIG_PM
+static int vmd_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	pci_save_state(pdev);
+	return 0;
+}
+
+static int vmd_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	pci_restore_state(pdev);
+	return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
+
+static const struct pci_device_id vmd_ids[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, vmd_ids);
+
+static struct pci_driver vmd_drv = {
+	.name		= "vmd",
+	.id_table	= vmd_ids,
+	.probe		= vmd_probe,
+	.remove		= vmd_remove,
+	.driver		= {
+		.pm	= &vmd_dev_pm_ops,
+	},
+};
+module_pci_driver(vmd_drv);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.6");
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 182224a..20db790 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -41,12 +41,12 @@
 	u32 header_log1;
 	u32 header_log2;
 	u32 header_log3;
-	u16 domain;
+	u32 domain;
 };
 
 struct aer_error {
 	struct list_head list;
-	u16 domain;
+	u32 domain;
 	unsigned int bus;
 	unsigned int devfn;
 	int pos_cap_err;
@@ -74,7 +74,7 @@
 /* Protect einjected and pci_bus_ops_list */
 static DEFINE_SPINLOCK(inject_lock);
 
-static void aer_error_init(struct aer_error *err, u16 domain,
+static void aer_error_init(struct aer_error *err, u32 domain,
 			   unsigned int bus, unsigned int devfn,
 			   int pos_cap_err)
 {
@@ -86,7 +86,7 @@
 }
 
 /* inject_lock must be held before calling */
-static struct aer_error *__find_aer_error(u16 domain, unsigned int bus,
+static struct aer_error *__find_aer_error(u32 domain, unsigned int bus,
 					  unsigned int devfn)
 {
 	struct aer_error *err;
@@ -106,7 +106,7 @@
 	int domain = pci_domain_nr(dev->bus);
 	if (domain < 0)
 		return NULL;
-	return __find_aer_error((u16)domain, dev->bus->number, dev->devfn);
+	return __find_aer_error(domain, dev->bus->number, dev->devfn);
 }
 
 /* inject_lock must be held before calling */
@@ -196,7 +196,7 @@
 	domain = pci_domain_nr(bus);
 	if (domain < 0)
 		goto out;
-	err = __find_aer_error((u16)domain, bus->number, devfn);
+	err = __find_aer_error(domain, bus->number, devfn);
 	if (!err)
 		goto out;
 
@@ -228,7 +228,7 @@
 	domain = pci_domain_nr(bus);
 	if (domain < 0)
 		goto out;
-	err = __find_aer_error((u16)domain, bus->number, devfn);
+	err = __find_aer_error(domain, bus->number, devfn);
 	if (!err)
 		goto out;
 
@@ -329,7 +329,7 @@
 	u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
 	int ret = 0;
 
-	dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
+	dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn);
 	if (!dev)
 		return -ENODEV;
 	rpdev = pcie_find_root_port(dev);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 22aa961..ca05cc8 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1058,6 +1058,7 @@
 	__irq_set_handler(virq, handler, 0, handler_name);
 	irq_set_handler_data(virq, handler_data);
 }
+EXPORT_SYMBOL(irq_domain_set_info);
 
 /**
  * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 6b0c0b7..5e15cb4 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -109,9 +109,11 @@
 	if (irq_find_mapping(domain, hwirq) > 0)
 		return -EEXIST;
 
-	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
-	if (ret < 0)
-		return ret;
+	if (domain->parent) {
+		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+		if (ret < 0)
+			return ret;
+	}
 
 	for (i = 0; i < nr_irqs; i++) {
 		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);