aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-09-17 22:23:26 -0700
committerYinghai Lu <yinghai@kernel.org>2012-09-17 22:23:26 -0700
commit4f865bbc0c66649aa0766fde9043aa5728d77e72 (patch)
treee05cf32145e652217b786bbdb58078f92476f662
parentcc4bca91e848d9d864d8e1bc7895089df01debf8 (diff)
downloadlinux-yinghai-4f865bbc0c66649aa0766fde9043aa5728d77e72.tar.gz
IOMMU: Add init_dmar_one()
Will need that for hot added intel iommu Signed-off-by: Yinghai Lu <yinghai@kernel.org>
-rw-r--r--drivers/iommu/dmar.c4
-rw-r--r--drivers/iommu/intel-iommu.c123
2 files changed, 115 insertions, 12 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 37409dde683a6..4d12dbae78912 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -62,7 +62,7 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
list_add(&drhd->list, &dmar_drhd_units);
}
-static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
+static int dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
struct pci_dev **dev, u16 segment)
{
struct pci_bus *bus;
@@ -113,7 +113,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
return 0;
}
-int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
+int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct pci_dev ***devices, u16 segment)
{
struct acpi_dmar_device_scope *scope;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a869cfc99782f..363248d24c3ac 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2417,19 +2417,12 @@ static int __init init_dmars(void)
* initialize and program root entry to not present
* endfor
*/
- for_each_drhd_unit(drhd) {
/*
* lock not needed as this is only incremented in the single
* threaded kernel __init code path all other access are read
* only
*/
- if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
- g_num_of_iommus++;
- continue;
- }
- printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
- IOMMU_UNITS_SUPPORTED);
- }
+ g_num_of_iommus = IOMMU_UNITS_SUPPORTED;
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
@@ -2630,6 +2623,109 @@ error:
return ret;
}
+int init_dmar_one(struct dmar_drhd_unit *drhd)
+{
+ struct intel_iommu *iommu;
+ int ret;
+
+ /*
+ * for each drhd
+ * allocate root
+ * initialize and program root entry to not present
+ * endfor
+ */
+
+ if (drhd->ignored)
+ return 0;
+
+ iommu = drhd->iommu;
+ g_iommus[iommu->seq_id] = iommu;
+
+ ret = iommu_init_domains(iommu);
+ if (ret)
+ goto error;
+
+ /*
+ * TBD:
+ * we could share the same root & context tables
+ * among all IOMMU's. Need to Split it later.
+ */
+ ret = iommu_alloc_root_entry(iommu);
+ if (ret) {
+ printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+ goto error;
+ }
+
+ /*
+ * Start from the sane iommu hardware state.
+ */
+ /*
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
+ */
+ if (!iommu->qi) {
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
+ }
+
+ if (dmar_enable_qi(iommu)) {
+ /*
+ * Queued Invalidate not enabled, use Register Based
+ * Invalidate
+ */
+ iommu->flush.flush_context = __iommu_flush_context;
+ iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+ printk(KERN_INFO
+ "IOMMU %d 0x%Lx: using Register based invalidation\n",
+ iommu->seq_id, (unsigned long long)drhd->reg_base_addr);
+ } else {
+ iommu->flush.flush_context = qi_flush_context;
+ iommu->flush.flush_iotlb = qi_flush_iotlb;
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued invalidation\n",
+ iommu->seq_id, (unsigned long long)drhd->reg_base_addr);
+ }
+
+ /*
+ * for each drhd
+ * enable fault log
+ * global invalidate context cache
+ * global invalidate iotlb
+ * enable translation
+ */
+ iommu_flush_write_buffer(iommu);
+
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto error;
+
+ iommu_set_root_entry(iommu);
+
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+
+ ret = iommu_enable_translation(iommu);
+ if (ret)
+ goto error;
+
+ iommu_disable_protect_mem_regions(iommu);
+
+ return 0;
+error:
+ free_dmar_iommu(iommu);
+ free_iommu(iommu);
+ drhd->iommu = NULL;
+ return ret;
+}
+
+
/* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
@@ -3485,7 +3581,8 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
LIST_HEAD(dmar_atsr_units);
-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+int __dmar_parse_one_atsr(struct acpi_dmar_header *hdr,
+ struct dmar_atsr_unit **patsru)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
@@ -3500,11 +3597,17 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
atsru->segment = atsr->segment;
list_add(&atsru->list, &dmar_atsr_units);
+ if (patsru)
+ *patsru = atsru;
return 0;
}
+int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ return __dmar_parse_one_atsr(hdr, NULL);
+}
-static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+int atsr_parse_dev(struct dmar_atsr_unit *atsru)
{
int rc;
struct acpi_dmar_atsr *atsr;