Skip to content

Commit 336e213

Browse files
committed
ARC: mm: preps ahead of HIGHMEM support
Before we plug in highmem support, some of code needs to be ready for it - copy_user_highpage() needs to be using the kmap_atomic API - mk_pte() can't assume page_address() - do_page_fault() can't assume VMALLOC_END is end of kernel vaddr space Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
1 parent d408464 commit 336e213

File tree

3 files changed

+22
-16
lines changed

3 files changed

+22
-16
lines changed

arch/arc/include/asm/pgtable.h

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -270,13 +270,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
270270
(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
271271
PAGE_SHIFT)))
272272

273-
#define mk_pte(page, pgprot) \
274-
({ \
275-
pte_t pte; \
276-
pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
277-
pte; \
278-
})
279-
273+
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
280274
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
281275
#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
282276
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -360,7 +354,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
360354
#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
361355
#endif
362356

363-
extern void paging_init(void);
364357
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
365358
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
366359
pte_t *ptep);

arch/arc/mm/cache.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -806,8 +806,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
806806
void copy_user_highpage(struct page *to, struct page *from,
807807
unsigned long u_vaddr, struct vm_area_struct *vma)
808808
{
809-
unsigned long kfrom = (unsigned long)page_address(from);
810-
unsigned long kto = (unsigned long)page_address(to);
809+
void *kfrom = kmap_atomic(from);
810+
void *kto = kmap_atomic(to);
811811
int clean_src_k_mappings = 0;
812812

813813
/*
@@ -817,13 +817,16 @@ void copy_user_highpage(struct page *to, struct page *from,
817817
*
818818
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
819819
* equally valid for SRC page as well
820+
*
821+
* For !VIPT cache, all of this gets compiled out as
822+
* addr_not_cache_congruent() is 0
820823
*/
821824
if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
822-
__flush_dcache_page(kfrom, u_vaddr);
825+
__flush_dcache_page((unsigned long)kfrom, u_vaddr);
823826
clean_src_k_mappings = 1;
824827
}
825828

826-
copy_page((void *)kto, (void *)kfrom);
829+
copy_page(kto, kfrom);
827830

828831
/*
829832
* Mark DST page K-mapping as dirty for a later finalization by
@@ -840,11 +843,14 @@ void copy_user_highpage(struct page *to, struct page *from,
840843
* sync the kernel mapping back to physical page
841844
*/
842845
if (clean_src_k_mappings) {
843-
__flush_dcache_page(kfrom, kfrom);
846+
__flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
844847
set_bit(PG_dc_clean, &from->flags);
845848
} else {
846849
clear_bit(PG_dc_clean, &from->flags);
847850
}
851+
852+
kunmap_atomic(kto);
853+
kunmap_atomic(kfrom);
848854
}
849855

850856
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)

arch/arc/mm/fault.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,14 @@
1818
#include <asm/pgalloc.h>
1919
#include <asm/mmu.h>
2020

21-
static int handle_vmalloc_fault(unsigned long address)
21+
/*
22+
* kernel virtual address is required to implement vmalloc/pkmap/fixmap
23+
* Refer to asm/processor.h for System Memory Map
24+
*
25+
* It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
26+
* from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
27+
*/
28+
noinline static int handle_kernel_vaddr_fault(unsigned long address)
2229
{
2330
/*
2431
* Synchronize this task's top level page-table
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
7279
* only copy the information from the master page table,
7380
* nothing more.
7481
*/
75-
if (address >= VMALLOC_START && address <= VMALLOC_END) {
76-
ret = handle_vmalloc_fault(address);
82+
if (address >= VMALLOC_START) {
83+
ret = handle_kernel_vaddr_fault(address);
7784
if (unlikely(ret))
7885
goto bad_area_nosemaphore;
7986
else

0 commit comments

Comments
 (0)