|
30 | 30 | #include <asm/tlbflush.h>
|
31 | 31 | #include <linux/vmalloc.h>
|
32 | 32 |
|
| 33 | +#ifdef CONFIG_KMAP_LOCAL |
| 34 | +static inline int kmap_local_calc_idx(int idx) |
| 35 | +{ |
| 36 | + return idx + KM_MAX_IDX * smp_processor_id(); |
| 37 | +} |
| 38 | + |
| 39 | +#ifndef arch_kmap_local_map_idx |
| 40 | +#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) |
| 41 | +#endif |
| 42 | +#endif /* CONFIG_KMAP_LOCAL */ |
| 43 | + |
33 | 44 | /*
|
34 | 45 | * Virtual_count is not a pure "count".
|
35 | 46 | * 0 means that it is not mapped, and has not been mapped
|
@@ -142,12 +153,29 @@ pte_t *pkmap_page_table;
|
142 | 153 |
|
143 | 154 | struct page *__kmap_to_page(void *vaddr)
|
144 | 155 | {
|
| 156 | + unsigned long base = (unsigned long) vaddr & PAGE_MASK; |
| 157 | + struct kmap_ctrl *kctrl = ¤t->kmap_ctrl; |
145 | 158 | unsigned long addr = (unsigned long)vaddr;
|
| 159 | + int i; |
| 160 | + |
| 161 | + /* kmap() mappings */ |
| 162 | + if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) && |
| 163 | + addr < PKMAP_ADDR(LAST_PKMAP))) |
| 164 | + return pte_page(pkmap_page_table[PKMAP_NR(addr)]); |
146 | 165 |
|
147 |
| - if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { |
148 |
| - int i = PKMAP_NR(addr); |
| 166 | + /* kmap_local_page() mappings */ |
| 167 | + if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && |
| 168 | + base < __fix_to_virt(FIX_KMAP_BEGIN))) { |
| 169 | + for (i = 0; i < kctrl->idx; i++) { |
| 170 | + unsigned long base_addr; |
| 171 | + int idx; |
149 | 172 |
|
150 |
| - return pte_page(pkmap_page_table[i]); |
| 173 | + idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); |
| 174 | + base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 175 | + |
| 176 | + if (base_addr == base) |
| 177 | + return pte_page(kctrl->pteval[i]); |
| 178 | + } |
151 | 179 | }
|
152 | 180 |
|
153 | 181 | return virt_to_page(vaddr);
|
@@ -462,10 +490,6 @@ static inline void kmap_local_idx_pop(void)
|
462 | 490 | # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
|
463 | 491 | #endif
|
464 | 492 |
|
465 |
| -#ifndef arch_kmap_local_map_idx |
466 |
| -#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) |
467 |
| -#endif |
468 |
| - |
469 | 493 | #ifndef arch_kmap_local_unmap_idx
|
470 | 494 | #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
|
471 | 495 | #endif
|
@@ -494,11 +518,6 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr)
|
494 | 518 | return false;
|
495 | 519 | }
|
496 | 520 |
|
497 |
| -static inline int kmap_local_calc_idx(int idx) |
498 |
| -{ |
499 |
| - return idx + KM_MAX_IDX * smp_processor_id(); |
500 |
| -} |
501 |
| - |
502 | 521 | static pte_t *__kmap_pte;
|
503 | 522 |
|
504 | 523 | static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
|
|
0 commit comments