Skip to content

Commit

Permalink
drivers: rkflash: Using kmap_atomic to build page address mapping
Browse files Browse the repository at this point in the history
The virtual address of allocated __GFP_HIGHMEM pages is not mapped
in large memeory aarch32 system, Using kmap to build it, or it will
failed like:

    Unable to handle kernel NULL pointer dereference at virtual address 00000000
    pgd = 50ec00c8
    [00000000] *pgd=00000000
    Internal error: Oops: 817 [#1] PREEMPT SMP ARM
    Modules linked in:
    CPU: 3 PID: 67 Comm: rkflash Not tainted 4.19.111 rockchip-linux#67
    Hardware name: Generic DT based system
    PC is at memcpy+0x50/0x330
    LR is at 0x61640a68

Change-Id: I0cde9012d29e49d9ba751cb019ccfa784c01b7c7
Signed-off-by: Jon Lin <jon.lin@rock-chips.com>
  • Loading branch information
Jon Lin authored and rkhuangtao committed Oct 13, 2022
1 parent b46ed13 commit 24fe52a
Showing 1 changed file with 36 additions and 39 deletions.
75 changes: 36 additions & 39 deletions drivers/rkflash/rkflash_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,20 +245,26 @@ static int rkflash_blk_xfer(struct flash_blk_dev *dev,
static int rkflash_blk_check_buffer_align(struct request *req, char **pbuf)
{
int nr_vec = 0;
struct bio_vec bvec;
struct bio_vec bv;
struct req_iterator iter;
char *buffer;
void *firstbuf = 0;
char *nextbuffer = 0;

rq_for_each_segment(bvec, req, iter) {
buffer = page_address(bvec.bv_page) + bvec.bv_offset;
rq_for_each_segment(bv, req, iter) {
/* high mem return 0 and using kernel buffer */
if (PageHighMem(bv.bv_page))
return 0;

buffer = page_address(bv.bv_page) + bv.bv_offset;
if (!buffer)
return 0;
if (!firstbuf)
firstbuf = buffer;
nr_vec++;
if (nextbuffer && nextbuffer != buffer)
return 0;
nextbuffer = buffer + bvec.bv_len;
nextbuffer = buffer + bv.bv_len;
}
*pbuf = firstbuf;
return 1;
Expand All @@ -269,12 +275,11 @@ static int rkflash_blktrans_thread(void *arg)
struct flash_blk_ops *blk_ops = arg;
struct request_queue *rq = blk_ops->rq;
struct request *req = NULL;
char *buf;
char *buf, *page_buf;
struct req_iterator rq_iter;
struct bio_vec bvec;
unsigned long long sector_index = ULLONG_MAX;
unsigned long totle_nsect;
unsigned long rq_len = 0;
int rw_flag = 0;

spin_lock_irq(rq->queue_lock);
Expand Down Expand Up @@ -305,7 +310,6 @@ static int rkflash_blktrans_thread(void *arg)
dev = req->rq_disk->private_data;
totle_nsect = (req->__data_len) >> 9;
sector_index = blk_rq_pos(req);
rq_len = 0;
buf = 0;
res = 0;
rw_flag = req_op(req);
Expand All @@ -322,7 +326,7 @@ static int rkflash_blktrans_thread(void *arg)
if (!__blk_end_request_cur(req, res))
req = NULL;
continue;
} else if (rw_flag == REQ_OP_READ && mtd_read_temp_buffer) {
} else if (rw_flag == REQ_OP_READ) {
buf = mtd_read_temp_buffer;
rkflash_blk_check_buffer_align(req, &buf);
spin_unlock_irq(rq->queue_lock);
Expand All @@ -337,46 +341,39 @@ static int rkflash_blktrans_thread(void *arg)
char *p = buf;

rq_for_each_segment(bvec, req, rq_iter) {
memcpy(page_address(bvec.bv_page) +
page_buf = kmap_atomic(bvec.bv_page);
memcpy(page_buf +
bvec.bv_offset,
p,
bvec.bv_len);
p += bvec.bv_len;
kunmap_atomic(page_buf);
}
}
} else if (rw_flag == REQ_OP_WRITE){
rq_for_each_segment(bvec, req, rq_iter) {
if ((page_address(bvec.bv_page)
+ bvec.bv_offset)
== (buf + rq_len)) {
rq_len += bvec.bv_len;
} else {
if (rq_len) {
spin_unlock_irq(rq->queue_lock);
res = rkflash_blk_xfer(dev,
sector_index,
rq_len >> 9,
buf,
rw_flag,
totle_nsect);
spin_lock_irq(rq->queue_lock);
}
sector_index += rq_len >> 9;
buf = (page_address(bvec.bv_page) +
bvec.bv_offset);
rq_len = bvec.bv_len;
buf = mtd_read_temp_buffer;
rkflash_blk_check_buffer_align(req, &buf);
if (buf == mtd_read_temp_buffer) {
char *p = buf;

rq_for_each_segment(bvec, req, rq_iter) {
page_buf = kmap_atomic(bvec.bv_page);
memcpy(p,
page_buf +
bvec.bv_offset,
bvec.bv_len);
p += bvec.bv_len;
kunmap_atomic(page_buf);
}
}
if (rq_len) {
spin_unlock_irq(rq->queue_lock);
res = rkflash_blk_xfer(dev,
sector_index,
rq_len >> 9,
buf,
rw_flag,
totle_nsect);
spin_lock_irq(rq->queue_lock);
}
spin_unlock_irq(rq->queue_lock);
res = rkflash_blk_xfer(dev,
sector_index,
totle_nsect,
buf,
rw_flag,
totle_nsect);
spin_lock_irq(rq->queue_lock);
} else {
pr_err("%s error req flag\n", __func__);
}
Expand Down

0 comments on commit 24fe52a

Please sign in to comment.