Skip to content

Commit ef6e06b

Browse files
weiny2akpm00
authored andcommitted
highmem: fix kmap_to_page() for kmap_local_page() addresses
kmap_to_page() is used to get the page for a virtual address which may be kmap'ed. Unfortunately, kmap_local_page() stores mappings in a thread local array separate from kmap(). These mappings were not checked by the call. Check the kmap_local_page() mappings and return the page if found. Because it is intended to remove kmap_to_page() add a warn on once to the kmap checks to flag potential issues early. NOTE Due to 32bit x86 use of kmap local in iomap atmoic, KMAP_LOCAL does not require HIGHMEM to be set. Therefore the support calls required a new KMAP_LOCAL section to fix 0day build errors. [akpm@linux-foundation.org: fix warning] Link: https://lkml.kernel.org/r/20221006040555.1502679-1-ira.weiny@intel.com Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reported-by: Al Viro <viro@zeniv.linux.org.uk> Reported-by: kernel test robot <lkp@intel.com> Cc: "Fabio M. De Francesco" <fmdefrancesco@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 15cd900 commit ef6e06b

File tree

1 file changed

+31
-12
lines changed

1 file changed

+31
-12
lines changed

mm/highmem.c

+31-12
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,17 @@
3030
#include <asm/tlbflush.h>
3131
#include <linux/vmalloc.h>
3232

33+
#ifdef CONFIG_KMAP_LOCAL
34+
static inline int kmap_local_calc_idx(int idx)
35+
{
36+
return idx + KM_MAX_IDX * smp_processor_id();
37+
}
38+
39+
#ifndef arch_kmap_local_map_idx
40+
#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
41+
#endif
42+
#endif /* CONFIG_KMAP_LOCAL */
43+
3344
/*
3445
* Virtual_count is not a pure "count".
3546
* 0 means that it is not mapped, and has not been mapped
@@ -142,12 +153,29 @@ pte_t *pkmap_page_table;
142153

143154
struct page *__kmap_to_page(void *vaddr)
144155
{
156+
unsigned long base = (unsigned long) vaddr & PAGE_MASK;
157+
struct kmap_ctrl *kctrl = &current->kmap_ctrl;
145158
unsigned long addr = (unsigned long)vaddr;
159+
int i;
160+
161+
/* kmap() mappings */
162+
if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) &&
163+
addr < PKMAP_ADDR(LAST_PKMAP)))
164+
return pte_page(pkmap_page_table[PKMAP_NR(addr)]);
146165

147-
if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
148-
int i = PKMAP_NR(addr);
166+
/* kmap_local_page() mappings */
167+
if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) &&
168+
base < __fix_to_virt(FIX_KMAP_BEGIN))) {
169+
for (i = 0; i < kctrl->idx; i++) {
170+
unsigned long base_addr;
171+
int idx;
149172

150-
return pte_page(pkmap_page_table[i]);
173+
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
174+
base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
175+
176+
if (base_addr == base)
177+
return pte_page(kctrl->pteval[i]);
178+
}
151179
}
152180

153181
return virt_to_page(vaddr);
@@ -462,10 +490,6 @@ static inline void kmap_local_idx_pop(void)
462490
# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
463491
#endif
464492

465-
#ifndef arch_kmap_local_map_idx
466-
#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
467-
#endif
468-
469493
#ifndef arch_kmap_local_unmap_idx
470494
#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
471495
#endif
@@ -494,11 +518,6 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr)
494518
return false;
495519
}
496520

497-
static inline int kmap_local_calc_idx(int idx)
498-
{
499-
return idx + KM_MAX_IDX * smp_processor_id();
500-
}
501-
502521
static pte_t *__kmap_pte;
503522

504523
static pte_t *kmap_get_pte(unsigned long vaddr, int idx)

0 commit comments

Comments
 (0)