Skip to content

Commit

Permalink
xtensa: disable preemption around cache alias management calls
Browse files Browse the repository at this point in the history
Although cache alias management calls set up and tear down TLB entries
and fast_second_level_miss is able to restore TLB entry should it be
evicted they absolutely cannot preempt each other because they use the
same TLBTEMP area for different purposes.
Disable preemption around all cache alias management calls to enforce
that.

Cc: stable@vger.kernel.org
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
  • Loading branch information
jcmvbkbc committed Nov 16, 2020
1 parent 481535c commit 3a860d1
Showing 1 changed file with 14 additions and 0 deletions.
14 changes: 14 additions & 0 deletions arch/xtensa/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
kvaddr = TLBTEMP_BASE_1 +
(page_to_phys(page) & DCACHE_ALIAS_MASK);

preempt_disable();
__invalidate_dcache_page_alias(kvaddr,
page_to_phys(page));
preempt_enable();
}
}
}
Expand Down Expand Up @@ -156,6 +158,7 @@ void flush_dcache_page(struct page *page)
if (!alias && !mapping)
return;

preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);

Expand All @@ -166,6 +169,7 @@ void flush_dcache_page(struct page *page)

if (mapping)
__invalidate_icache_page_alias(virt, phys);
preempt_enable();
}

/* There shouldn't be an entry in the cache for this page anymore. */
Expand Down Expand Up @@ -199,8 +203,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);

preempt_disable();
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
preempt_enable();
}
EXPORT_SYMBOL(local_flush_cache_page);

Expand All @@ -227,11 +233,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
unsigned long phys = page_to_phys(page);
unsigned long tmp;

preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
preempt_enable();

clear_bit(PG_arch_1, &page->flags);
}
Expand Down Expand Up @@ -265,7 +273,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,

if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
preempt_enable();
}

/* Copy data */
Expand All @@ -280,9 +290,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);

preempt_disable();
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(t, phys);
preempt_enable();

} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
Expand All @@ -304,7 +316,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,

if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
preempt_enable();
}

memcpy(dst, src, len);
Expand Down

0 comments on commit 3a860d1

Please sign in to comment.