Skip to content
This repository has been archived by the owner on Sep 24, 2020. It is now read-only.

Commit

Permalink
iommu/vt-d: Make use of iova deferred flushing
Browse files Browse the repository at this point in the history
Remove the deferred flushing implementation in the Intel
VT-d driver and use the one from the common iova code
instead.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
joergroedel committed Aug 15, 2017
1 parent c8acb28 commit 13cf017
Showing 1 changed file with 38 additions and 159 deletions.
197 changes: 38 additions & 159 deletions drivers/iommu/intel-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -458,31 +458,6 @@ static LIST_HEAD(dmar_rmrr_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)

static void flush_unmaps_timeout(unsigned long data);

struct deferred_flush_entry {
unsigned long iova_pfn;
unsigned long nrpages;
struct dmar_domain *domain;
struct page *freelist;
};

#define HIGH_WATER_MARK 250
struct deferred_flush_table {
int next;
struct deferred_flush_entry entries[HIGH_WATER_MARK];
};

struct deferred_flush_data {
spinlock_t lock;
int timer_on;
struct timer_list timer;
long size;
struct deferred_flush_table *tables;
};

static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);

/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;

Expand Down Expand Up @@ -1309,6 +1284,13 @@ static void dma_free_pagelist(struct page *freelist)
}
}

static void iova_entry_free(unsigned long data)
{
struct page *freelist = (struct page *)data;

dma_free_pagelist(freelist);
}

/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
Expand Down Expand Up @@ -1622,6 +1604,25 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
addr, mask);
}

static void iommu_flush_iova(struct iova_domain *iovad)
{
struct dmar_domain *domain;
int idx;

domain = container_of(iovad, struct dmar_domain, iovad);

for_each_domain_iommu(idx, domain) {
struct intel_iommu *iommu = g_iommus[idx];
u16 did = domain->iommu_did[iommu->seq_id];

iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);

if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
0, MAX_AGAW_PFN_WIDTH);
}
}

static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{
u32 pmen;
Expand Down Expand Up @@ -1932,9 +1933,16 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
{
int adjust_width, agaw;
unsigned long sagaw;
int err;

init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);

err = init_iova_flush_queue(&domain->iovad,
iommu_flush_iova, iova_entry_free);
if (err)
return err;

domain_reserve_special_ranges(domain);

/* calculate AGAW */
Expand Down Expand Up @@ -1986,14 +1994,6 @@ static void domain_exit(struct dmar_domain *domain)
if (!domain)
return;

/* Flush any lazy unmaps that may reference this domain */
if (!intel_iommu_strict) {
int cpu;

for_each_possible_cpu(cpu)
flush_unmaps_timeout(cpu);
}

/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
domain_remove_dev_info(domain);
Expand Down Expand Up @@ -3206,7 +3206,7 @@ static int __init init_dmars(void)
bool copied_tables = false;
struct device *dev;
struct intel_iommu *iommu;
int i, ret, cpu;
int i, ret;

/*
* for each drhd
Expand Down Expand Up @@ -3239,22 +3239,6 @@ static int __init init_dmars(void)
goto error;
}

for_each_possible_cpu(cpu) {
struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
cpu);

dfd->tables = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_table),
GFP_KERNEL);
if (!dfd->tables) {
ret = -ENOMEM;
goto free_g_iommus;
}

spin_lock_init(&dfd->lock);
setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
}

for_each_active_iommu(iommu, drhd) {
g_iommus[iommu->seq_id] = iommu;

Expand Down Expand Up @@ -3437,10 +3421,9 @@ static int __init init_dmars(void)
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
free_g_iommus:
for_each_possible_cpu(cpu)
kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);

kfree(g_iommus);

error:
return ret;
}
Expand Down Expand Up @@ -3645,110 +3628,6 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
dir, *dev->dma_mask);
}

static void flush_unmaps(struct deferred_flush_data *flush_data)
{
int i, j;

flush_data->timer_on = 0;

/* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
struct deferred_flush_table *flush_table =
&flush_data->tables[i];
if (!iommu)
continue;

if (!flush_table->next)
continue;

/* In caching mode, global flushes turn emulation expensive */
if (!cap_caching_mode(iommu->cap))
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < flush_table->next; j++) {
unsigned long mask;
struct deferred_flush_entry *entry =
&flush_table->entries[j];
unsigned long iova_pfn = entry->iova_pfn;
unsigned long nrpages = entry->nrpages;
struct dmar_domain *domain = entry->domain;
struct page *freelist = entry->freelist;

/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
mm_to_dma_pfn(iova_pfn),
nrpages, !freelist, 0);
else {
mask = ilog2(nrpages);
iommu_flush_dev_iotlb(domain,
(uint64_t)iova_pfn << PAGE_SHIFT, mask);
}
free_iova_fast(&domain->iovad, iova_pfn, nrpages);
if (freelist)
dma_free_pagelist(freelist);
}
flush_table->next = 0;
}

flush_data->size = 0;
}

static void flush_unmaps_timeout(unsigned long cpuid)
{
struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
unsigned long flags;

spin_lock_irqsave(&flush_data->lock, flags);
flush_unmaps(flush_data);
spin_unlock_irqrestore(&flush_data->lock, flags);
}

static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
unsigned long nrpages, struct page *freelist)
{
unsigned long flags;
int entry_id, iommu_id;
struct intel_iommu *iommu;
struct deferred_flush_entry *entry;
struct deferred_flush_data *flush_data;

flush_data = raw_cpu_ptr(&deferred_flush);

/* Flush all CPUs' entries to avoid deferring too much. If
* this becomes a bottleneck, can just flush us, and rely on
* flush timer for the rest.
*/
if (flush_data->size == HIGH_WATER_MARK) {
int cpu;

for_each_online_cpu(cpu)
flush_unmaps_timeout(cpu);
}

spin_lock_irqsave(&flush_data->lock, flags);

iommu = domain_get_iommu(dom);
iommu_id = iommu->seq_id;

entry_id = flush_data->tables[iommu_id].next;
++(flush_data->tables[iommu_id].next);

entry = &flush_data->tables[iommu_id].entries[entry_id];
entry->domain = dom;
entry->iova_pfn = iova_pfn;
entry->nrpages = nrpages;
entry->freelist = freelist;

if (!flush_data->timer_on) {
mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
flush_data->timer_on = 1;
}
flush_data->size++;
spin_unlock_irqrestore(&flush_data->lock, flags);
}

static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
{
struct dmar_domain *domain;
Expand Down Expand Up @@ -3784,7 +3663,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
dma_free_pagelist(freelist);
} else {
add_unmap(domain, iova_pfn, nrpages, freelist);
queue_iova(&domain->iovad, iova_pfn, nrpages,
(unsigned long)freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
Expand Down Expand Up @@ -4721,7 +4601,6 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
static int intel_iommu_cpu_dead(unsigned int cpu)
{
free_all_cpu_cached_iovas(cpu);
flush_unmaps_timeout(cpu);
return 0;
}

Expand Down

0 comments on commit 13cf017

Please sign in to comment.