diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ae42a9e43997..769e850b9767 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -310,7 +310,6 @@ static int iommu_skip_te_disable; #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 -static DEFINE_SPINLOCK(device_domain_lock); const struct iommu_ops intel_iommu_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) @@ -535,7 +534,7 @@ static int domain_update_device_node(struct dmar_domain *domain) struct device_domain_info *info; int nid = NUMA_NO_NODE; - spin_lock(&device_domain_lock); + spin_lock(&domain->lock); list_for_each_entry(info, &domain->devices, link) { /* * There could possibly be multiple device numa nodes as devices @@ -547,7 +546,7 @@ static int domain_update_device_node(struct dmar_domain *domain) if (nid != NUMA_NO_NODE) break; } - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); return nid; } @@ -1378,15 +1377,15 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu, if (!iommu->qi) return NULL; - spin_lock(&device_domain_lock); + spin_lock(&domain->lock); list_for_each_entry(info, &domain->devices, link) { if (info->iommu == iommu && info->bus == bus && info->devfn == devfn) { - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); return info->ats_supported ? info : NULL; } } - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); return NULL; } @@ -1396,7 +1395,7 @@ static void domain_update_iotlb(struct dmar_domain *domain) struct device_domain_info *info; bool has_iotlb_device = false; - spin_lock(&device_domain_lock); + spin_lock(&domain->lock); list_for_each_entry(info, &domain->devices, link) { if (info->ats_enabled) { has_iotlb_device = true; @@ -1404,7 +1403,7 @@ static void domain_update_iotlb(struct dmar_domain *domain) } } domain->has_iotlb_device = has_iotlb_device; - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); } static void iommu_enable_dev_iotlb(struct device_domain_info *info) @@ -1500,10 +1499,10 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, if (!domain->has_iotlb_device) return; - spin_lock(&device_domain_lock); + spin_lock(&domain->lock); list_for_each_entry(info, &domain->devices, link) __iommu_flush_dev_iotlb(info, addr, mask); - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); } static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, @@ -1763,6 +1762,7 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); + spin_lock_init(&domain->lock); return domain; } @@ -2446,9 +2446,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) if (ret) return ret; info->domain = domain; - spin_lock(&device_domain_lock); + spin_lock(&domain->lock); list_add(&info->link, &domain->devices); - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); /* PASID table is mandatory for a PCI device in scalable mode. */ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { @@ -4123,6 +4123,7 @@ static void domain_context_clear(struct device_domain_info *info) static void dmar_remove_one_dev_info(struct device *dev) { struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *domain = info->domain; struct intel_iommu *iommu = info->iommu; if (!dev_is_real_dma_subdevice(info->dev)) { @@ -4135,11 +4136,11 @@ static void dmar_remove_one_dev_info(struct device *dev) intel_pasid_free_table(info->dev); } - spin_lock(&device_domain_lock); + spin_lock(&domain->lock); list_del(&info->link); - spin_unlock(&device_domain_lock); + spin_unlock(&domain->lock); - domain_detach_iommu(info->domain, iommu); + domain_detach_iommu(domain, iommu); info->domain = NULL; } @@ -4422,7 +4423,7 @@ static bool domain_support_force_snooping(struct dmar_domain *domain) struct device_domain_info *info; bool support = true; - assert_spin_locked(&device_domain_lock); + assert_spin_locked(&domain->lock); list_for_each_entry(info, &domain->devices, link) { if (!ecap_sc_support(info->iommu->ecap)) { support = false; @@ -4437,8 +4438,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain) { struct device_domain_info *info; - assert_spin_locked(&device_domain_lock); - + assert_spin_locked(&domain->lock); /* * Second level page table supports per-PTE snoop control. The * iommu_map() interface will handle this by setting SNP bit. @@ -4460,15 +4460,15 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) if (dmar_domain->force_snooping) return true; - spin_lock(&device_domain_lock); + spin_lock(&dmar_domain->lock); if (!domain_support_force_snooping(dmar_domain)) { - spin_unlock(&device_domain_lock); + spin_unlock(&dmar_domain->lock); return false; } domain_set_force_snooping(dmar_domain); dmar_domain->force_snooping = true; - spin_unlock(&device_domain_lock); + spin_unlock(&dmar_domain->lock); return true; } diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 198c6c822ef4..df64d3d9c49a 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -541,6 +541,7 @@ struct dmar_domain { u8 force_snooping : 1; /* Create IOPTEs with snoop control */ u8 set_pte_snp:1; + spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ struct dma_pte *pgd; /* virtual address */